1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2005 - 2016 Broadcom
4 * All rights reserved.
5 *
6 * Contact Information:
7 * linux-drivers@emulex.com
8 *
9 * Emulex
10 * 3333 Susan Street
11 * Costa Mesa, CA 92626
12 */
13
14 #include <linux/module.h>
15 #include "be.h"
16 #include "be_cmds.h"
17
18 const char * const be_misconfig_evt_port_state[] = {
19 "Physical Link is functional",
20 "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
21 "Optics of two types installed – Remove one optic or install matching pair of optics.",
22 "Incompatible optics – Replace with compatible optics for card to function.",
23 "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
24 "Uncertified optics – Replace with Avago-certified optics to enable link operation."
25 };
26
27 static char *be_port_misconfig_evt_severity[] = {
28 "KERN_WARN",
29 "KERN_INFO",
30 "KERN_ERR",
31 "KERN_WARN"
32 };
33
34 static char *phy_state_oper_desc[] = {
35 "Link is non-operational",
36 "Link is operational",
37 ""
38 };
39
40 static struct be_cmd_priv_map cmd_priv_map[] = {
41 {
42 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_FLOW_CONTROL,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 },
53 {
54 OPCODE_COMMON_SET_FLOW_CONTROL,
55 CMD_SUBSYSTEM_COMMON,
56 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
57 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
58 },
59 {
60 OPCODE_ETH_GET_PPORT_STATS,
61 CMD_SUBSYSTEM_ETH,
62 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
63 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
64 },
65 {
66 OPCODE_COMMON_GET_PHY_DETAILS,
67 CMD_SUBSYSTEM_COMMON,
68 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
69 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
70 },
71 {
72 OPCODE_LOWLEVEL_HOST_DDR_DMA,
73 CMD_SUBSYSTEM_LOWLEVEL,
74 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
75 },
76 {
77 OPCODE_LOWLEVEL_LOOPBACK_TEST,
78 CMD_SUBSYSTEM_LOWLEVEL,
79 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
80 },
81 {
82 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
83 CMD_SUBSYSTEM_LOWLEVEL,
84 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
85 },
86 {
87 OPCODE_COMMON_SET_HSW_CONFIG,
88 CMD_SUBSYSTEM_COMMON,
89 BE_PRIV_DEVCFG | BE_PRIV_VHADM |
90 BE_PRIV_DEVSEC
91 },
92 {
93 OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
94 CMD_SUBSYSTEM_COMMON,
95 BE_PRIV_DEVCFG
96 }
97 };
98
be_cmd_allowed(struct be_adapter * adapter,u8 opcode,u8 subsystem)99 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
100 {
101 int i;
102 int num_entries = ARRAY_SIZE(cmd_priv_map);
103 u32 cmd_privileges = adapter->cmd_privileges;
104
105 for (i = 0; i < num_entries; i++)
106 if (opcode == cmd_priv_map[i].opcode &&
107 subsystem == cmd_priv_map[i].subsystem)
108 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
109 return false;
110
111 return true;
112 }
113
embedded_payload(struct be_mcc_wrb * wrb)114 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
115 {
116 return wrb->payload.embedded_payload;
117 }
118
be_mcc_notify(struct be_adapter * adapter)119 static int be_mcc_notify(struct be_adapter *adapter)
120 {
121 struct be_queue_info *mccq = &adapter->mcc_obj.q;
122 u32 val = 0;
123
124 if (be_check_error(adapter, BE_ERROR_ANY))
125 return -EIO;
126
127 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
128 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
129
130 wmb();
131 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
132
133 return 0;
134 }
135
136 /* To check if valid bit is set, check the entire word as we don't know
137 * the endianness of the data (old entry is host endian while a new entry is
138 * little endian) */
be_mcc_compl_is_new(struct be_mcc_compl * compl)139 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
140 {
141 u32 flags;
142
143 if (compl->flags != 0) {
144 flags = le32_to_cpu(compl->flags);
145 if (flags & CQE_FLAGS_VALID_MASK) {
146 compl->flags = flags;
147 return true;
148 }
149 }
150 return false;
151 }
152
153 /* Need to reset the entire word that houses the valid bit */
be_mcc_compl_use(struct be_mcc_compl * compl)154 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
155 {
156 compl->flags = 0;
157 }
158
be_decode_resp_hdr(u32 tag0,u32 tag1)159 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
160 {
161 unsigned long addr;
162
163 addr = tag1;
164 addr = ((addr << 16) << 16) | tag0;
165 return (void *)addr;
166 }
167
be_skip_err_log(u8 opcode,u16 base_status,u16 addl_status)168 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
169 {
170 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
171 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
172 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
173 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
174 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
175 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
176 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
177 return true;
178 else
179 return false;
180 }
181
182 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
183 * loop (has not issued be_mcc_notify_wait())
184 */
be_async_cmd_process(struct be_adapter * adapter,struct be_mcc_compl * compl,struct be_cmd_resp_hdr * resp_hdr)185 static void be_async_cmd_process(struct be_adapter *adapter,
186 struct be_mcc_compl *compl,
187 struct be_cmd_resp_hdr *resp_hdr)
188 {
189 enum mcc_base_status base_status = base_status(compl->status);
190 u8 opcode = 0, subsystem = 0;
191
192 if (resp_hdr) {
193 opcode = resp_hdr->opcode;
194 subsystem = resp_hdr->subsystem;
195 }
196
197 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
198 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
199 complete(&adapter->et_cmd_compl);
200 return;
201 }
202
203 if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
204 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
205 complete(&adapter->et_cmd_compl);
206 return;
207 }
208
209 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
210 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
211 subsystem == CMD_SUBSYSTEM_COMMON) {
212 adapter->flash_status = compl->status;
213 complete(&adapter->et_cmd_compl);
214 return;
215 }
216
217 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
218 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
219 subsystem == CMD_SUBSYSTEM_ETH &&
220 base_status == MCC_STATUS_SUCCESS) {
221 be_parse_stats(adapter);
222 adapter->stats_cmd_sent = false;
223 return;
224 }
225
226 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
227 subsystem == CMD_SUBSYSTEM_COMMON) {
228 if (base_status == MCC_STATUS_SUCCESS) {
229 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
230 (void *)resp_hdr;
231 adapter->hwmon_info.be_on_die_temp =
232 resp->on_die_temperature;
233 } else {
234 adapter->be_get_temp_freq = 0;
235 adapter->hwmon_info.be_on_die_temp =
236 BE_INVALID_DIE_TEMP;
237 }
238 return;
239 }
240 }
241
be_mcc_compl_process(struct be_adapter * adapter,struct be_mcc_compl * compl)242 static int be_mcc_compl_process(struct be_adapter *adapter,
243 struct be_mcc_compl *compl)
244 {
245 enum mcc_base_status base_status;
246 enum mcc_addl_status addl_status;
247 struct be_cmd_resp_hdr *resp_hdr;
248 u8 opcode = 0, subsystem = 0;
249
250 /* Just swap the status to host endian; mcc tag is opaquely copied
251 * from mcc_wrb */
252 be_dws_le_to_cpu(compl, 4);
253
254 base_status = base_status(compl->status);
255 addl_status = addl_status(compl->status);
256
257 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
258 if (resp_hdr) {
259 opcode = resp_hdr->opcode;
260 subsystem = resp_hdr->subsystem;
261 }
262
263 be_async_cmd_process(adapter, compl, resp_hdr);
264
265 if (base_status != MCC_STATUS_SUCCESS &&
266 !be_skip_err_log(opcode, base_status, addl_status)) {
267 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
268 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
269 dev_warn(&adapter->pdev->dev,
270 "VF is not privileged to issue opcode %d-%d\n",
271 opcode, subsystem);
272 } else {
273 dev_err(&adapter->pdev->dev,
274 "opcode %d-%d failed:status %d-%d\n",
275 opcode, subsystem, base_status, addl_status);
276 }
277 }
278 return compl->status;
279 }
280
281 /* Link state evt is a string of bytes; no need for endian swapping */
be_async_link_state_process(struct be_adapter * adapter,struct be_mcc_compl * compl)282 static void be_async_link_state_process(struct be_adapter *adapter,
283 struct be_mcc_compl *compl)
284 {
285 struct be_async_event_link_state *evt =
286 (struct be_async_event_link_state *)compl;
287
288 /* When link status changes, link speed must be re-queried from FW */
289 adapter->phy.link_speed = -1;
290
291 /* On BEx the FW does not send a separate link status
292 * notification for physical and logical link.
293 * On other chips just process the logical link
294 * status notification
295 */
296 if (!BEx_chip(adapter) &&
297 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
298 return;
299
300 /* For the initial link status do not rely on the ASYNC event as
301 * it may not be received in some cases.
302 */
303 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
304 be_link_status_update(adapter,
305 evt->port_link_status & LINK_STATUS_MASK);
306 }
307
be_async_port_misconfig_event_process(struct be_adapter * adapter,struct be_mcc_compl * compl)308 static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
309 struct be_mcc_compl *compl)
310 {
311 struct be_async_event_misconfig_port *evt =
312 (struct be_async_event_misconfig_port *)compl;
313 u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
314 u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
315 u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
316 struct device *dev = &adapter->pdev->dev;
317 u8 msg_severity = DEFAULT_MSG_SEVERITY;
318 u8 phy_state_info;
319 u8 new_phy_state;
320
321 new_phy_state =
322 (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
323
324 if (new_phy_state == adapter->phy_state)
325 return;
326
327 adapter->phy_state = new_phy_state;
328
329 /* for older fw that doesn't populate link effect data */
330 if (!sfp_misconfig_evt_word2)
331 goto log_message;
332
333 phy_state_info =
334 (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
335
336 if (phy_state_info & PHY_STATE_INFO_VALID) {
337 msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
338
339 if (be_phy_unqualified(new_phy_state))
340 phy_oper_state = (phy_state_info & PHY_STATE_OPER);
341 }
342
343 log_message:
344 /* Log an error message that would allow a user to determine
345 * whether the SFPs have an issue
346 */
347 if (be_phy_state_unknown(new_phy_state))
348 dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
349 "Port %c: Unrecognized Optics state: 0x%x. %s",
350 adapter->port_name,
351 new_phy_state,
352 phy_state_oper_desc[phy_oper_state]);
353 else
354 dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
355 "Port %c: %s %s",
356 adapter->port_name,
357 be_misconfig_evt_port_state[new_phy_state],
358 phy_state_oper_desc[phy_oper_state]);
359
360 /* Log Vendor name and part no. if a misconfigured SFP is detected */
361 if (be_phy_misconfigured(new_phy_state))
362 adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
363 }
364
365 /* Grp5 CoS Priority evt */
be_async_grp5_cos_priority_process(struct be_adapter * adapter,struct be_mcc_compl * compl)366 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
367 struct be_mcc_compl *compl)
368 {
369 struct be_async_event_grp5_cos_priority *evt =
370 (struct be_async_event_grp5_cos_priority *)compl;
371
372 if (evt->valid) {
373 adapter->vlan_prio_bmap = evt->available_priority_bmap;
374 adapter->recommended_prio_bits =
375 evt->reco_default_priority << VLAN_PRIO_SHIFT;
376 }
377 }
378
379 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
be_async_grp5_qos_speed_process(struct be_adapter * adapter,struct be_mcc_compl * compl)380 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
381 struct be_mcc_compl *compl)
382 {
383 struct be_async_event_grp5_qos_link_speed *evt =
384 (struct be_async_event_grp5_qos_link_speed *)compl;
385
386 if (adapter->phy.link_speed >= 0 &&
387 evt->physical_port == adapter->port_num)
388 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
389 }
390
391 /*Grp5 PVID evt*/
be_async_grp5_pvid_state_process(struct be_adapter * adapter,struct be_mcc_compl * compl)392 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
393 struct be_mcc_compl *compl)
394 {
395 struct be_async_event_grp5_pvid_state *evt =
396 (struct be_async_event_grp5_pvid_state *)compl;
397
398 if (evt->enabled) {
399 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
400 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
401 } else {
402 adapter->pvid = 0;
403 }
404 }
405
406 #define MGMT_ENABLE_MASK 0x4
be_async_grp5_fw_control_process(struct be_adapter * adapter,struct be_mcc_compl * compl)407 static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
408 struct be_mcc_compl *compl)
409 {
410 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
411 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
412
413 if (evt_dw1 & MGMT_ENABLE_MASK) {
414 adapter->flags |= BE_FLAGS_OS2BMC;
415 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
416 } else {
417 adapter->flags &= ~BE_FLAGS_OS2BMC;
418 }
419 }
420
be_async_grp5_evt_process(struct be_adapter * adapter,struct be_mcc_compl * compl)421 static void be_async_grp5_evt_process(struct be_adapter *adapter,
422 struct be_mcc_compl *compl)
423 {
424 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
425 ASYNC_EVENT_TYPE_MASK;
426
427 switch (event_type) {
428 case ASYNC_EVENT_COS_PRIORITY:
429 be_async_grp5_cos_priority_process(adapter, compl);
430 break;
431 case ASYNC_EVENT_QOS_SPEED:
432 be_async_grp5_qos_speed_process(adapter, compl);
433 break;
434 case ASYNC_EVENT_PVID_STATE:
435 be_async_grp5_pvid_state_process(adapter, compl);
436 break;
437 /* Async event to disable/enable os2bmc and/or mac-learning */
438 case ASYNC_EVENT_FW_CONTROL:
439 be_async_grp5_fw_control_process(adapter, compl);
440 break;
441 default:
442 break;
443 }
444 }
445
be_async_dbg_evt_process(struct be_adapter * adapter,struct be_mcc_compl * cmp)446 static void be_async_dbg_evt_process(struct be_adapter *adapter,
447 struct be_mcc_compl *cmp)
448 {
449 u8 event_type = 0;
450 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
451
452 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
453 ASYNC_EVENT_TYPE_MASK;
454
455 switch (event_type) {
456 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
457 if (evt->valid)
458 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
459 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
460 break;
461 default:
462 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
463 event_type);
464 break;
465 }
466 }
467
be_async_sliport_evt_process(struct be_adapter * adapter,struct be_mcc_compl * cmp)468 static void be_async_sliport_evt_process(struct be_adapter *adapter,
469 struct be_mcc_compl *cmp)
470 {
471 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
472 ASYNC_EVENT_TYPE_MASK;
473
474 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
475 be_async_port_misconfig_event_process(adapter, cmp);
476 }
477
is_link_state_evt(u32 flags)478 static inline bool is_link_state_evt(u32 flags)
479 {
480 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
481 ASYNC_EVENT_CODE_LINK_STATE;
482 }
483
is_grp5_evt(u32 flags)484 static inline bool is_grp5_evt(u32 flags)
485 {
486 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
487 ASYNC_EVENT_CODE_GRP_5;
488 }
489
is_dbg_evt(u32 flags)490 static inline bool is_dbg_evt(u32 flags)
491 {
492 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
493 ASYNC_EVENT_CODE_QNQ;
494 }
495
is_sliport_evt(u32 flags)496 static inline bool is_sliport_evt(u32 flags)
497 {
498 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
499 ASYNC_EVENT_CODE_SLIPORT;
500 }
501
be_mcc_event_process(struct be_adapter * adapter,struct be_mcc_compl * compl)502 static void be_mcc_event_process(struct be_adapter *adapter,
503 struct be_mcc_compl *compl)
504 {
505 if (is_link_state_evt(compl->flags))
506 be_async_link_state_process(adapter, compl);
507 else if (is_grp5_evt(compl->flags))
508 be_async_grp5_evt_process(adapter, compl);
509 else if (is_dbg_evt(compl->flags))
510 be_async_dbg_evt_process(adapter, compl);
511 else if (is_sliport_evt(compl->flags))
512 be_async_sliport_evt_process(adapter, compl);
513 }
514
be_mcc_compl_get(struct be_adapter * adapter)515 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
516 {
517 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
518 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
519
520 if (be_mcc_compl_is_new(compl)) {
521 queue_tail_inc(mcc_cq);
522 return compl;
523 }
524 return NULL;
525 }
526
be_async_mcc_enable(struct be_adapter * adapter)527 void be_async_mcc_enable(struct be_adapter *adapter)
528 {
529 spin_lock_bh(&adapter->mcc_cq_lock);
530
531 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
532 adapter->mcc_obj.rearm_cq = true;
533
534 spin_unlock_bh(&adapter->mcc_cq_lock);
535 }
536
be_async_mcc_disable(struct be_adapter * adapter)537 void be_async_mcc_disable(struct be_adapter *adapter)
538 {
539 spin_lock_bh(&adapter->mcc_cq_lock);
540
541 adapter->mcc_obj.rearm_cq = false;
542 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
543
544 spin_unlock_bh(&adapter->mcc_cq_lock);
545 }
546
be_process_mcc(struct be_adapter * adapter)547 int be_process_mcc(struct be_adapter *adapter)
548 {
549 struct be_mcc_compl *compl;
550 int num = 0, status = 0;
551 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
552
553 spin_lock(&adapter->mcc_cq_lock);
554
555 while ((compl = be_mcc_compl_get(adapter))) {
556 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
557 be_mcc_event_process(adapter, compl);
558 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
559 status = be_mcc_compl_process(adapter, compl);
560 atomic_dec(&mcc_obj->q.used);
561 }
562 be_mcc_compl_use(compl);
563 num++;
564 }
565
566 if (num)
567 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
568
569 spin_unlock(&adapter->mcc_cq_lock);
570 return status;
571 }
572
573 /* Wait till no more pending mcc requests are present */
be_mcc_wait_compl(struct be_adapter * adapter)574 static int be_mcc_wait_compl(struct be_adapter *adapter)
575 {
576 #define mcc_timeout 12000 /* 12s timeout */
577 int i, status = 0;
578 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
579
580 for (i = 0; i < mcc_timeout; i++) {
581 if (be_check_error(adapter, BE_ERROR_ANY))
582 return -EIO;
583
584 local_bh_disable();
585 status = be_process_mcc(adapter);
586 local_bh_enable();
587
588 if (atomic_read(&mcc_obj->q.used) == 0)
589 break;
590 usleep_range(500, 1000);
591 }
592 if (i == mcc_timeout) {
593 dev_err(&adapter->pdev->dev, "FW not responding\n");
594 be_set_error(adapter, BE_ERROR_FW);
595 return -EIO;
596 }
597 return status;
598 }
599
600 /* Notify MCC requests and wait for completion */
be_mcc_notify_wait(struct be_adapter * adapter)601 static int be_mcc_notify_wait(struct be_adapter *adapter)
602 {
603 int status;
604 struct be_mcc_wrb *wrb;
605 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
606 u32 index = mcc_obj->q.head;
607 struct be_cmd_resp_hdr *resp;
608
609 index_dec(&index, mcc_obj->q.len);
610 wrb = queue_index_node(&mcc_obj->q, index);
611
612 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
613
614 status = be_mcc_notify(adapter);
615 if (status)
616 goto out;
617
618 status = be_mcc_wait_compl(adapter);
619 if (status == -EIO)
620 goto out;
621
622 status = (resp->base_status |
623 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
624 CQE_ADDL_STATUS_SHIFT));
625 out:
626 return status;
627 }
628
be_mbox_db_ready_wait(struct be_adapter * adapter,void __iomem * db)629 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
630 {
631 int msecs = 0;
632 u32 ready;
633
634 do {
635 if (be_check_error(adapter, BE_ERROR_ANY))
636 return -EIO;
637
638 ready = ioread32(db);
639 if (ready == 0xffffffff)
640 return -1;
641
642 ready &= MPU_MAILBOX_DB_RDY_MASK;
643 if (ready)
644 break;
645
646 if (msecs > 4000) {
647 dev_err(&adapter->pdev->dev, "FW not responding\n");
648 be_set_error(adapter, BE_ERROR_FW);
649 be_detect_error(adapter);
650 return -1;
651 }
652
653 msleep(1);
654 msecs++;
655 } while (true);
656
657 return 0;
658 }
659
660 /*
661 * Insert the mailbox address into the doorbell in two steps
662 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
663 */
be_mbox_notify_wait(struct be_adapter * adapter)664 static int be_mbox_notify_wait(struct be_adapter *adapter)
665 {
666 int status;
667 u32 val = 0;
668 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
669 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
670 struct be_mcc_mailbox *mbox = mbox_mem->va;
671 struct be_mcc_compl *compl = &mbox->compl;
672
673 /* wait for ready to be set */
674 status = be_mbox_db_ready_wait(adapter, db);
675 if (status != 0)
676 return status;
677
678 val |= MPU_MAILBOX_DB_HI_MASK;
679 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
680 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
681 iowrite32(val, db);
682
683 /* wait for ready to be set */
684 status = be_mbox_db_ready_wait(adapter, db);
685 if (status != 0)
686 return status;
687
688 val = 0;
689 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
690 val |= (u32)(mbox_mem->dma >> 4) << 2;
691 iowrite32(val, db);
692
693 status = be_mbox_db_ready_wait(adapter, db);
694 if (status != 0)
695 return status;
696
697 /* A cq entry has been made now */
698 if (be_mcc_compl_is_new(compl)) {
699 status = be_mcc_compl_process(adapter, &mbox->compl);
700 be_mcc_compl_use(compl);
701 if (status)
702 return status;
703 } else {
704 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
705 return -1;
706 }
707 return 0;
708 }
709
be_POST_stage_get(struct be_adapter * adapter)710 u16 be_POST_stage_get(struct be_adapter *adapter)
711 {
712 u32 sem;
713
714 if (BEx_chip(adapter))
715 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
716 else
717 pci_read_config_dword(adapter->pdev,
718 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
719
720 return sem & POST_STAGE_MASK;
721 }
722
lancer_wait_ready(struct be_adapter * adapter)723 static int lancer_wait_ready(struct be_adapter *adapter)
724 {
725 #define SLIPORT_READY_TIMEOUT 30
726 u32 sliport_status;
727 int i;
728
729 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
730 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
731 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
732 return 0;
733
734 if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
735 !(sliport_status & SLIPORT_STATUS_RN_MASK))
736 return -EIO;
737
738 msleep(1000);
739 }
740
741 return sliport_status ? : -1;
742 }
743
be_fw_wait_ready(struct be_adapter * adapter)744 int be_fw_wait_ready(struct be_adapter *adapter)
745 {
746 u16 stage;
747 int status, timeout = 0;
748 struct device *dev = &adapter->pdev->dev;
749
750 if (lancer_chip(adapter)) {
751 status = lancer_wait_ready(adapter);
752 if (status) {
753 stage = status;
754 goto err;
755 }
756 return 0;
757 }
758
759 do {
760 /* There's no means to poll POST state on BE2/3 VFs */
761 if (BEx_chip(adapter) && be_virtfn(adapter))
762 return 0;
763
764 stage = be_POST_stage_get(adapter);
765 if (stage == POST_STAGE_ARMFW_RDY)
766 return 0;
767
768 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
769 if (msleep_interruptible(2000)) {
770 dev_err(dev, "Waiting for POST aborted\n");
771 return -EINTR;
772 }
773 timeout += 2;
774 } while (timeout < 60);
775
776 err:
777 dev_err(dev, "POST timeout; stage=%#x\n", stage);
778 return -ETIMEDOUT;
779 }
780
nonembedded_sgl(struct be_mcc_wrb * wrb)781 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
782 {
783 return &wrb->payload.sgl[0];
784 }
785
fill_wrb_tags(struct be_mcc_wrb * wrb,unsigned long addr)786 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
787 {
788 wrb->tag0 = addr & 0xFFFFFFFF;
789 wrb->tag1 = upper_32_bits(addr);
790 }
791
792 /* Don't touch the hdr after it's prepared */
793 /* mem will be NULL for embedded commands */
be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr * req_hdr,u8 subsystem,u8 opcode,int cmd_len,struct be_mcc_wrb * wrb,struct be_dma_mem * mem)794 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
795 u8 subsystem, u8 opcode, int cmd_len,
796 struct be_mcc_wrb *wrb,
797 struct be_dma_mem *mem)
798 {
799 struct be_sge *sge;
800
801 req_hdr->opcode = opcode;
802 req_hdr->subsystem = subsystem;
803 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
804 req_hdr->version = 0;
805 fill_wrb_tags(wrb, (ulong) req_hdr);
806 wrb->payload_length = cmd_len;
807 if (mem) {
808 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
809 MCC_WRB_SGE_CNT_SHIFT;
810 sge = nonembedded_sgl(wrb);
811 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
812 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
813 sge->len = cpu_to_le32(mem->size);
814 } else
815 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
816 be_dws_cpu_to_le(wrb, 8);
817 }
818
be_cmd_page_addrs_prepare(struct phys_addr * pages,u32 max_pages,struct be_dma_mem * mem)819 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
820 struct be_dma_mem *mem)
821 {
822 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
823 u64 dma = (u64)mem->dma;
824
825 for (i = 0; i < buf_pages; i++) {
826 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
827 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
828 dma += PAGE_SIZE_4K;
829 }
830 }
831
wrb_from_mbox(struct be_adapter * adapter)832 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
833 {
834 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
835 struct be_mcc_wrb *wrb
836 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
837 memset(wrb, 0, sizeof(*wrb));
838 return wrb;
839 }
840
wrb_from_mccq(struct be_adapter * adapter)841 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
842 {
843 struct be_queue_info *mccq = &adapter->mcc_obj.q;
844 struct be_mcc_wrb *wrb;
845
846 if (!mccq->created)
847 return NULL;
848
849 if (atomic_read(&mccq->used) >= mccq->len)
850 return NULL;
851
852 wrb = queue_head_node(mccq);
853 queue_head_inc(mccq);
854 atomic_inc(&mccq->used);
855 memset(wrb, 0, sizeof(*wrb));
856 return wrb;
857 }
858
use_mcc(struct be_adapter * adapter)859 static bool use_mcc(struct be_adapter *adapter)
860 {
861 return adapter->mcc_obj.q.created;
862 }
863
864 /* Must be used only in process context */
be_cmd_lock(struct be_adapter * adapter)865 static int be_cmd_lock(struct be_adapter *adapter)
866 {
867 if (use_mcc(adapter)) {
868 mutex_lock(&adapter->mcc_lock);
869 return 0;
870 } else {
871 return mutex_lock_interruptible(&adapter->mbox_lock);
872 }
873 }
874
875 /* Must be used only in process context */
be_cmd_unlock(struct be_adapter * adapter)876 static void be_cmd_unlock(struct be_adapter *adapter)
877 {
878 if (use_mcc(adapter))
879 return mutex_unlock(&adapter->mcc_lock);
880 else
881 return mutex_unlock(&adapter->mbox_lock);
882 }
883
be_cmd_copy(struct be_adapter * adapter,struct be_mcc_wrb * wrb)884 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
885 struct be_mcc_wrb *wrb)
886 {
887 struct be_mcc_wrb *dest_wrb;
888
889 if (use_mcc(adapter)) {
890 dest_wrb = wrb_from_mccq(adapter);
891 if (!dest_wrb)
892 return NULL;
893 } else {
894 dest_wrb = wrb_from_mbox(adapter);
895 }
896
897 memcpy(dest_wrb, wrb, sizeof(*wrb));
898 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
899 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
900
901 return dest_wrb;
902 }
903
904 /* Must be used only in process context */
be_cmd_notify_wait(struct be_adapter * adapter,struct be_mcc_wrb * wrb)905 static int be_cmd_notify_wait(struct be_adapter *adapter,
906 struct be_mcc_wrb *wrb)
907 {
908 struct be_mcc_wrb *dest_wrb;
909 int status;
910
911 status = be_cmd_lock(adapter);
912 if (status)
913 return status;
914
915 dest_wrb = be_cmd_copy(adapter, wrb);
916 if (!dest_wrb) {
917 status = -EBUSY;
918 goto unlock;
919 }
920
921 if (use_mcc(adapter))
922 status = be_mcc_notify_wait(adapter);
923 else
924 status = be_mbox_notify_wait(adapter);
925
926 if (!status)
927 memcpy(wrb, dest_wrb, sizeof(*wrb));
928
929 unlock:
930 be_cmd_unlock(adapter);
931 return status;
932 }
933
934 /* Tell fw we're about to start firing cmds by writing a
935 * special pattern across the wrb hdr; uses mbox
936 */
be_cmd_fw_init(struct be_adapter * adapter)937 int be_cmd_fw_init(struct be_adapter *adapter)
938 {
939 u8 *wrb;
940 int status;
941
942 if (lancer_chip(adapter))
943 return 0;
944
945 if (mutex_lock_interruptible(&adapter->mbox_lock))
946 return -1;
947
948 wrb = (u8 *)wrb_from_mbox(adapter);
949 *wrb++ = 0xFF;
950 *wrb++ = 0x12;
951 *wrb++ = 0x34;
952 *wrb++ = 0xFF;
953 *wrb++ = 0xFF;
954 *wrb++ = 0x56;
955 *wrb++ = 0x78;
956 *wrb = 0xFF;
957
958 status = be_mbox_notify_wait(adapter);
959
960 mutex_unlock(&adapter->mbox_lock);
961 return status;
962 }
963
964 /* Tell fw we're done with firing cmds by writing a
965 * special pattern across the wrb hdr; uses mbox
966 */
be_cmd_fw_clean(struct be_adapter * adapter)967 int be_cmd_fw_clean(struct be_adapter *adapter)
968 {
969 u8 *wrb;
970 int status;
971
972 if (lancer_chip(adapter))
973 return 0;
974
975 if (mutex_lock_interruptible(&adapter->mbox_lock))
976 return -1;
977
978 wrb = (u8 *)wrb_from_mbox(adapter);
979 *wrb++ = 0xFF;
980 *wrb++ = 0xAA;
981 *wrb++ = 0xBB;
982 *wrb++ = 0xFF;
983 *wrb++ = 0xFF;
984 *wrb++ = 0xCC;
985 *wrb++ = 0xDD;
986 *wrb = 0xFF;
987
988 status = be_mbox_notify_wait(adapter);
989
990 mutex_unlock(&adapter->mbox_lock);
991 return status;
992 }
993
be_cmd_eq_create(struct be_adapter * adapter,struct be_eq_obj * eqo)994 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
995 {
996 struct be_mcc_wrb *wrb;
997 struct be_cmd_req_eq_create *req;
998 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
999 int status, ver = 0;
1000
1001 if (mutex_lock_interruptible(&adapter->mbox_lock))
1002 return -1;
1003
1004 wrb = wrb_from_mbox(adapter);
1005 req = embedded_payload(wrb);
1006
1007 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1008 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
1009 NULL);
1010
1011 /* Support for EQ_CREATEv2 available only SH-R onwards */
1012 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
1013 ver = 2;
1014
1015 req->hdr.version = ver;
1016 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1017
1018 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
1019 /* 4byte eqe*/
1020 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
1021 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
1022 __ilog2_u32(eqo->q.len / 256));
1023 be_dws_cpu_to_le(req->context, sizeof(req->context));
1024
1025 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1026
1027 status = be_mbox_notify_wait(adapter);
1028 if (!status) {
1029 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
1030
1031 eqo->q.id = le16_to_cpu(resp->eq_id);
1032 eqo->msix_idx =
1033 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
1034 eqo->q.created = true;
1035 }
1036
1037 mutex_unlock(&adapter->mbox_lock);
1038 return status;
1039 }
1040
1041 /* Use MCC */
be_cmd_mac_addr_query(struct be_adapter * adapter,u8 * mac_addr,bool permanent,u32 if_handle,u32 pmac_id)1042 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1043 bool permanent, u32 if_handle, u32 pmac_id)
1044 {
1045 struct be_mcc_wrb *wrb;
1046 struct be_cmd_req_mac_query *req;
1047 int status;
1048
1049 mutex_lock(&adapter->mcc_lock);
1050
1051 wrb = wrb_from_mccq(adapter);
1052 if (!wrb) {
1053 status = -EBUSY;
1054 goto err;
1055 }
1056 req = embedded_payload(wrb);
1057
1058 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1059 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
1060 NULL);
1061 req->type = MAC_ADDRESS_TYPE_NETWORK;
1062 if (permanent) {
1063 req->permanent = 1;
1064 } else {
1065 req->if_id = cpu_to_le16((u16)if_handle);
1066 req->pmac_id = cpu_to_le32(pmac_id);
1067 req->permanent = 0;
1068 }
1069
1070 status = be_mcc_notify_wait(adapter);
1071 if (!status) {
1072 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
1073
1074 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1075 }
1076
1077 err:
1078 mutex_unlock(&adapter->mcc_lock);
1079 return status;
1080 }
1081
1082 /* Uses synchronous MCCQ */
be_cmd_pmac_add(struct be_adapter * adapter,const u8 * mac_addr,u32 if_id,u32 * pmac_id,u32 domain)1083 int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
1084 u32 if_id, u32 *pmac_id, u32 domain)
1085 {
1086 struct be_mcc_wrb *wrb;
1087 struct be_cmd_req_pmac_add *req;
1088 int status;
1089
1090 mutex_lock(&adapter->mcc_lock);
1091
1092 wrb = wrb_from_mccq(adapter);
1093 if (!wrb) {
1094 status = -EBUSY;
1095 goto err;
1096 }
1097 req = embedded_payload(wrb);
1098
1099 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1100 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1101 NULL);
1102
1103 req->hdr.domain = domain;
1104 req->if_id = cpu_to_le32(if_id);
1105 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1106
1107 status = be_mcc_notify_wait(adapter);
1108 if (!status) {
1109 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1110
1111 *pmac_id = le32_to_cpu(resp->pmac_id);
1112 }
1113
1114 err:
1115 mutex_unlock(&adapter->mcc_lock);
1116
1117 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
1118 status = -EPERM;
1119
1120 return status;
1121 }
1122
1123 /* Uses synchronous MCCQ */
be_cmd_pmac_del(struct be_adapter * adapter,u32 if_id,int pmac_id,u32 dom)1124 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1125 {
1126 struct be_mcc_wrb *wrb;
1127 struct be_cmd_req_pmac_del *req;
1128 int status;
1129
1130 if (pmac_id == -1)
1131 return 0;
1132
1133 mutex_lock(&adapter->mcc_lock);
1134
1135 wrb = wrb_from_mccq(adapter);
1136 if (!wrb) {
1137 status = -EBUSY;
1138 goto err;
1139 }
1140 req = embedded_payload(wrb);
1141
1142 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1143 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1144 wrb, NULL);
1145
1146 req->hdr.domain = dom;
1147 req->if_id = cpu_to_le32(if_id);
1148 req->pmac_id = cpu_to_le32(pmac_id);
1149
1150 status = be_mcc_notify_wait(adapter);
1151
1152 err:
1153 mutex_unlock(&adapter->mcc_lock);
1154 return status;
1155 }
1156
1157 /* Uses Mbox */
be_cmd_cq_create(struct be_adapter * adapter,struct be_queue_info * cq,struct be_queue_info * eq,bool no_delay,int coalesce_wm)1158 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1159 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1160 {
1161 struct be_mcc_wrb *wrb;
1162 struct be_cmd_req_cq_create *req;
1163 struct be_dma_mem *q_mem = &cq->dma_mem;
1164 void *ctxt;
1165 int status;
1166
1167 if (mutex_lock_interruptible(&adapter->mbox_lock))
1168 return -1;
1169
1170 wrb = wrb_from_mbox(adapter);
1171 req = embedded_payload(wrb);
1172 ctxt = &req->context;
1173
1174 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1175 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1176 NULL);
1177
1178 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1179
1180 if (BEx_chip(adapter)) {
1181 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1182 coalesce_wm);
1183 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1184 ctxt, no_delay);
1185 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1186 __ilog2_u32(cq->len / 256));
1187 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1188 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1189 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1190 } else {
1191 req->hdr.version = 2;
1192 req->page_size = 1; /* 1 for 4K */
1193
1194 /* coalesce-wm field in this cmd is not relevant to Lancer.
1195 * Lancer uses COMMON_MODIFY_CQ to set this field
1196 */
1197 if (!lancer_chip(adapter))
1198 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1199 ctxt, coalesce_wm);
1200 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1201 no_delay);
1202 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1203 __ilog2_u32(cq->len / 256));
1204 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1205 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1206 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1207 }
1208
1209 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1210
1211 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1212
1213 status = be_mbox_notify_wait(adapter);
1214 if (!status) {
1215 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1216
1217 cq->id = le16_to_cpu(resp->cq_id);
1218 cq->created = true;
1219 }
1220
1221 mutex_unlock(&adapter->mbox_lock);
1222
1223 return status;
1224 }
1225
be_encoded_q_len(int q_len)1226 static u32 be_encoded_q_len(int q_len)
1227 {
1228 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1229
1230 if (len_encoded == 16)
1231 len_encoded = 0;
1232 return len_encoded;
1233 }
1234
be_cmd_mccq_ext_create(struct be_adapter * adapter,struct be_queue_info * mccq,struct be_queue_info * cq)1235 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1236 struct be_queue_info *mccq,
1237 struct be_queue_info *cq)
1238 {
1239 struct be_mcc_wrb *wrb;
1240 struct be_cmd_req_mcc_ext_create *req;
1241 struct be_dma_mem *q_mem = &mccq->dma_mem;
1242 void *ctxt;
1243 int status;
1244
1245 if (mutex_lock_interruptible(&adapter->mbox_lock))
1246 return -1;
1247
1248 wrb = wrb_from_mbox(adapter);
1249 req = embedded_payload(wrb);
1250 ctxt = &req->context;
1251
1252 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1253 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1254 NULL);
1255
1256 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1257 if (BEx_chip(adapter)) {
1258 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1259 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1260 be_encoded_q_len(mccq->len));
1261 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1262 } else {
1263 req->hdr.version = 1;
1264 req->cq_id = cpu_to_le16(cq->id);
1265
1266 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1267 be_encoded_q_len(mccq->len));
1268 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1269 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1270 ctxt, cq->id);
1271 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1272 ctxt, 1);
1273 }
1274
1275 /* Subscribe to Link State, Sliport Event and Group 5 Events
1276 * (bits 1, 5 and 17 set)
1277 */
1278 req->async_event_bitmap[0] =
1279 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1280 BIT(ASYNC_EVENT_CODE_GRP_5) |
1281 BIT(ASYNC_EVENT_CODE_QNQ) |
1282 BIT(ASYNC_EVENT_CODE_SLIPORT));
1283
1284 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1285
1286 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1287
1288 status = be_mbox_notify_wait(adapter);
1289 if (!status) {
1290 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1291
1292 mccq->id = le16_to_cpu(resp->id);
1293 mccq->created = true;
1294 }
1295 mutex_unlock(&adapter->mbox_lock);
1296
1297 return status;
1298 }
1299
be_cmd_mccq_org_create(struct be_adapter * adapter,struct be_queue_info * mccq,struct be_queue_info * cq)1300 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1301 struct be_queue_info *mccq,
1302 struct be_queue_info *cq)
1303 {
1304 struct be_mcc_wrb *wrb;
1305 struct be_cmd_req_mcc_create *req;
1306 struct be_dma_mem *q_mem = &mccq->dma_mem;
1307 void *ctxt;
1308 int status;
1309
1310 if (mutex_lock_interruptible(&adapter->mbox_lock))
1311 return -1;
1312
1313 wrb = wrb_from_mbox(adapter);
1314 req = embedded_payload(wrb);
1315 ctxt = &req->context;
1316
1317 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1318 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1319 NULL);
1320
1321 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1322
1323 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1324 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1325 be_encoded_q_len(mccq->len));
1326 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1327
1328 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1329
1330 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1331
1332 status = be_mbox_notify_wait(adapter);
1333 if (!status) {
1334 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1335
1336 mccq->id = le16_to_cpu(resp->id);
1337 mccq->created = true;
1338 }
1339
1340 mutex_unlock(&adapter->mbox_lock);
1341 return status;
1342 }
1343
be_cmd_mccq_create(struct be_adapter * adapter,struct be_queue_info * mccq,struct be_queue_info * cq)1344 int be_cmd_mccq_create(struct be_adapter *adapter,
1345 struct be_queue_info *mccq, struct be_queue_info *cq)
1346 {
1347 int status;
1348
1349 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1350 if (status && BEx_chip(adapter)) {
1351 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1352 "or newer to avoid conflicting priorities between NIC "
1353 "and FCoE traffic");
1354 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1355 }
1356 return status;
1357 }
1358
be_cmd_txq_create(struct be_adapter * adapter,struct be_tx_obj * txo)1359 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1360 {
1361 struct be_mcc_wrb wrb = {0};
1362 struct be_cmd_req_eth_tx_create *req;
1363 struct be_queue_info *txq = &txo->q;
1364 struct be_queue_info *cq = &txo->cq;
1365 struct be_dma_mem *q_mem = &txq->dma_mem;
1366 int status, ver = 0;
1367
1368 req = embedded_payload(&wrb);
1369 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1370 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1371
1372 if (lancer_chip(adapter)) {
1373 req->hdr.version = 1;
1374 } else if (BEx_chip(adapter)) {
1375 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1376 req->hdr.version = 2;
1377 } else { /* For SH */
1378 req->hdr.version = 2;
1379 }
1380
1381 if (req->hdr.version > 0)
1382 req->if_id = cpu_to_le16(adapter->if_handle);
1383 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1384 req->ulp_num = BE_ULP1_NUM;
1385 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1386 req->cq_id = cpu_to_le16(cq->id);
1387 req->queue_size = be_encoded_q_len(txq->len);
1388 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1389 ver = req->hdr.version;
1390
1391 status = be_cmd_notify_wait(adapter, &wrb);
1392 if (!status) {
1393 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1394
1395 txq->id = le16_to_cpu(resp->cid);
1396 if (ver == 2)
1397 txo->db_offset = le32_to_cpu(resp->db_offset);
1398 else
1399 txo->db_offset = DB_TXULP1_OFFSET;
1400 txq->created = true;
1401 }
1402
1403 return status;
1404 }
1405
1406 /* Uses MCC */
be_cmd_rxq_create(struct be_adapter * adapter,struct be_queue_info * rxq,u16 cq_id,u16 frag_size,u32 if_id,u32 rss,u8 * rss_id)1407 int be_cmd_rxq_create(struct be_adapter *adapter,
1408 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1409 u32 if_id, u32 rss, u8 *rss_id)
1410 {
1411 struct be_mcc_wrb *wrb;
1412 struct be_cmd_req_eth_rx_create *req;
1413 struct be_dma_mem *q_mem = &rxq->dma_mem;
1414 int status;
1415
1416 mutex_lock(&adapter->mcc_lock);
1417
1418 wrb = wrb_from_mccq(adapter);
1419 if (!wrb) {
1420 status = -EBUSY;
1421 goto err;
1422 }
1423 req = embedded_payload(wrb);
1424
1425 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1426 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1427
1428 req->cq_id = cpu_to_le16(cq_id);
1429 req->frag_size = fls(frag_size) - 1;
1430 req->num_pages = 2;
1431 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1432 req->interface_id = cpu_to_le32(if_id);
1433 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1434 req->rss_queue = cpu_to_le32(rss);
1435
1436 status = be_mcc_notify_wait(adapter);
1437 if (!status) {
1438 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1439
1440 rxq->id = le16_to_cpu(resp->id);
1441 rxq->created = true;
1442 *rss_id = resp->rss_id;
1443 }
1444
1445 err:
1446 mutex_unlock(&adapter->mcc_lock);
1447 return status;
1448 }
1449
1450 /* Generic destroyer function for all types of queues
1451 * Uses Mbox
1452 */
be_cmd_q_destroy(struct be_adapter * adapter,struct be_queue_info * q,int queue_type)1453 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1454 int queue_type)
1455 {
1456 struct be_mcc_wrb *wrb;
1457 struct be_cmd_req_q_destroy *req;
1458 u8 subsys = 0, opcode = 0;
1459 int status;
1460
1461 if (mutex_lock_interruptible(&adapter->mbox_lock))
1462 return -1;
1463
1464 wrb = wrb_from_mbox(adapter);
1465 req = embedded_payload(wrb);
1466
1467 switch (queue_type) {
1468 case QTYPE_EQ:
1469 subsys = CMD_SUBSYSTEM_COMMON;
1470 opcode = OPCODE_COMMON_EQ_DESTROY;
1471 break;
1472 case QTYPE_CQ:
1473 subsys = CMD_SUBSYSTEM_COMMON;
1474 opcode = OPCODE_COMMON_CQ_DESTROY;
1475 break;
1476 case QTYPE_TXQ:
1477 subsys = CMD_SUBSYSTEM_ETH;
1478 opcode = OPCODE_ETH_TX_DESTROY;
1479 break;
1480 case QTYPE_RXQ:
1481 subsys = CMD_SUBSYSTEM_ETH;
1482 opcode = OPCODE_ETH_RX_DESTROY;
1483 break;
1484 case QTYPE_MCCQ:
1485 subsys = CMD_SUBSYSTEM_COMMON;
1486 opcode = OPCODE_COMMON_MCC_DESTROY;
1487 break;
1488 default:
1489 BUG();
1490 }
1491
1492 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1493 NULL);
1494 req->id = cpu_to_le16(q->id);
1495
1496 status = be_mbox_notify_wait(adapter);
1497 q->created = false;
1498
1499 mutex_unlock(&adapter->mbox_lock);
1500 return status;
1501 }
1502
1503 /* Uses MCC */
be_cmd_rxq_destroy(struct be_adapter * adapter,struct be_queue_info * q)1504 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1505 {
1506 struct be_mcc_wrb *wrb;
1507 struct be_cmd_req_q_destroy *req;
1508 int status;
1509
1510 mutex_lock(&adapter->mcc_lock);
1511
1512 wrb = wrb_from_mccq(adapter);
1513 if (!wrb) {
1514 status = -EBUSY;
1515 goto err;
1516 }
1517 req = embedded_payload(wrb);
1518
1519 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1520 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1521 req->id = cpu_to_le16(q->id);
1522
1523 status = be_mcc_notify_wait(adapter);
1524 q->created = false;
1525
1526 err:
1527 mutex_unlock(&adapter->mcc_lock);
1528 return status;
1529 }
1530
1531 /* Create an rx filtering policy configuration on an i/f
1532 * Will use MBOX only if MCCQ has not been created.
1533 */
be_cmd_if_create(struct be_adapter * adapter,u32 cap_flags,u32 en_flags,u32 * if_handle,u32 domain)1534 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1535 u32 *if_handle, u32 domain)
1536 {
1537 struct be_mcc_wrb wrb = {0};
1538 struct be_cmd_req_if_create *req;
1539 int status;
1540
1541 req = embedded_payload(&wrb);
1542 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1543 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1544 sizeof(*req), &wrb, NULL);
1545 req->hdr.domain = domain;
1546 req->capability_flags = cpu_to_le32(cap_flags);
1547 req->enable_flags = cpu_to_le32(en_flags);
1548 req->pmac_invalid = true;
1549
1550 status = be_cmd_notify_wait(adapter, &wrb);
1551 if (!status) {
1552 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1553
1554 *if_handle = le32_to_cpu(resp->interface_id);
1555
1556 /* Hack to retrieve VF's pmac-id on BE3 */
1557 if (BE3_chip(adapter) && be_virtfn(adapter))
1558 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1559 }
1560 return status;
1561 }
1562
1563 /* Uses MCCQ if available else MBOX */
be_cmd_if_destroy(struct be_adapter * adapter,int interface_id,u32 domain)1564 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1565 {
1566 struct be_mcc_wrb wrb = {0};
1567 struct be_cmd_req_if_destroy *req;
1568 int status;
1569
1570 if (interface_id == -1)
1571 return 0;
1572
1573 req = embedded_payload(&wrb);
1574
1575 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1576 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1577 sizeof(*req), &wrb, NULL);
1578 req->hdr.domain = domain;
1579 req->interface_id = cpu_to_le32(interface_id);
1580
1581 status = be_cmd_notify_wait(adapter, &wrb);
1582 return status;
1583 }
1584
1585 /* Get stats is a non embedded command: the request is not embedded inside
1586 * WRB but is a separate dma memory block
1587 * Uses asynchronous MCC
1588 */
be_cmd_get_stats(struct be_adapter * adapter,struct be_dma_mem * nonemb_cmd)1589 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1590 {
1591 struct be_mcc_wrb *wrb;
1592 struct be_cmd_req_hdr *hdr;
1593 int status = 0;
1594
1595 mutex_lock(&adapter->mcc_lock);
1596
1597 wrb = wrb_from_mccq(adapter);
1598 if (!wrb) {
1599 status = -EBUSY;
1600 goto err;
1601 }
1602 hdr = nonemb_cmd->va;
1603
1604 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1605 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1606 nonemb_cmd);
1607
1608 /* version 1 of the cmd is not supported only by BE2 */
1609 if (BE2_chip(adapter))
1610 hdr->version = 0;
1611 if (BE3_chip(adapter) || lancer_chip(adapter))
1612 hdr->version = 1;
1613 else
1614 hdr->version = 2;
1615
1616 status = be_mcc_notify(adapter);
1617 if (status)
1618 goto err;
1619
1620 adapter->stats_cmd_sent = true;
1621
1622 err:
1623 mutex_unlock(&adapter->mcc_lock);
1624 return status;
1625 }
1626
1627 /* Lancer Stats */
lancer_cmd_get_pport_stats(struct be_adapter * adapter,struct be_dma_mem * nonemb_cmd)1628 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1629 struct be_dma_mem *nonemb_cmd)
1630 {
1631 struct be_mcc_wrb *wrb;
1632 struct lancer_cmd_req_pport_stats *req;
1633 int status = 0;
1634
1635 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1636 CMD_SUBSYSTEM_ETH))
1637 return -EPERM;
1638
1639 mutex_lock(&adapter->mcc_lock);
1640
1641 wrb = wrb_from_mccq(adapter);
1642 if (!wrb) {
1643 status = -EBUSY;
1644 goto err;
1645 }
1646 req = nonemb_cmd->va;
1647
1648 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1649 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1650 wrb, nonemb_cmd);
1651
1652 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1653 req->cmd_params.params.reset_stats = 0;
1654
1655 status = be_mcc_notify(adapter);
1656 if (status)
1657 goto err;
1658
1659 adapter->stats_cmd_sent = true;
1660
1661 err:
1662 mutex_unlock(&adapter->mcc_lock);
1663 return status;
1664 }
1665
be_mac_to_link_speed(int mac_speed)1666 static int be_mac_to_link_speed(int mac_speed)
1667 {
1668 switch (mac_speed) {
1669 case PHY_LINK_SPEED_ZERO:
1670 return 0;
1671 case PHY_LINK_SPEED_10MBPS:
1672 return 10;
1673 case PHY_LINK_SPEED_100MBPS:
1674 return 100;
1675 case PHY_LINK_SPEED_1GBPS:
1676 return 1000;
1677 case PHY_LINK_SPEED_10GBPS:
1678 return 10000;
1679 case PHY_LINK_SPEED_20GBPS:
1680 return 20000;
1681 case PHY_LINK_SPEED_25GBPS:
1682 return 25000;
1683 case PHY_LINK_SPEED_40GBPS:
1684 return 40000;
1685 }
1686 return 0;
1687 }
1688
1689 /* Uses synchronous mcc
1690 * Returns link_speed in Mbps
1691 */
be_cmd_link_status_query(struct be_adapter * adapter,u16 * link_speed,u8 * link_status,u32 dom)1692 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1693 u8 *link_status, u32 dom)
1694 {
1695 struct be_mcc_wrb *wrb;
1696 struct be_cmd_req_link_status *req;
1697 int status;
1698
1699 mutex_lock(&adapter->mcc_lock);
1700
1701 if (link_status)
1702 *link_status = LINK_DOWN;
1703
1704 wrb = wrb_from_mccq(adapter);
1705 if (!wrb) {
1706 status = -EBUSY;
1707 goto err;
1708 }
1709 req = embedded_payload(wrb);
1710
1711 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1712 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1713 sizeof(*req), wrb, NULL);
1714
1715 /* version 1 of the cmd is not supported only by BE2 */
1716 if (!BE2_chip(adapter))
1717 req->hdr.version = 1;
1718
1719 req->hdr.domain = dom;
1720
1721 status = be_mcc_notify_wait(adapter);
1722 if (!status) {
1723 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1724
1725 if (link_speed) {
1726 *link_speed = resp->link_speed ?
1727 le16_to_cpu(resp->link_speed) * 10 :
1728 be_mac_to_link_speed(resp->mac_speed);
1729
1730 if (!resp->logical_link_status)
1731 *link_speed = 0;
1732 }
1733 if (link_status)
1734 *link_status = resp->logical_link_status;
1735 }
1736
1737 err:
1738 mutex_unlock(&adapter->mcc_lock);
1739 return status;
1740 }
1741
1742 /* Uses synchronous mcc */
be_cmd_get_die_temperature(struct be_adapter * adapter)1743 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1744 {
1745 struct be_mcc_wrb *wrb;
1746 struct be_cmd_req_get_cntl_addnl_attribs *req;
1747 int status = 0;
1748
1749 mutex_lock(&adapter->mcc_lock);
1750
1751 wrb = wrb_from_mccq(adapter);
1752 if (!wrb) {
1753 status = -EBUSY;
1754 goto err;
1755 }
1756 req = embedded_payload(wrb);
1757
1758 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1759 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1760 sizeof(*req), wrb, NULL);
1761
1762 status = be_mcc_notify(adapter);
1763 err:
1764 mutex_unlock(&adapter->mcc_lock);
1765 return status;
1766 }
1767
1768 /* Uses synchronous mcc */
be_cmd_get_fat_dump_len(struct be_adapter * adapter,u32 * dump_size)1769 int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
1770 {
1771 struct be_mcc_wrb wrb = {0};
1772 struct be_cmd_req_get_fat *req;
1773 int status;
1774
1775 req = embedded_payload(&wrb);
1776
1777 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1778 OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
1779 &wrb, NULL);
1780 req->fat_operation = cpu_to_le32(QUERY_FAT);
1781 status = be_cmd_notify_wait(adapter, &wrb);
1782 if (!status) {
1783 struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
1784
1785 if (dump_size && resp->log_size)
1786 *dump_size = le32_to_cpu(resp->log_size) -
1787 sizeof(u32);
1788 }
1789 return status;
1790 }
1791
be_cmd_get_fat_dump(struct be_adapter * adapter,u32 buf_len,void * buf)1792 int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
1793 {
1794 struct be_dma_mem get_fat_cmd;
1795 struct be_mcc_wrb *wrb;
1796 struct be_cmd_req_get_fat *req;
1797 u32 offset = 0, total_size, buf_size,
1798 log_offset = sizeof(u32), payload_len;
1799 int status;
1800
1801 if (buf_len == 0)
1802 return 0;
1803
1804 total_size = buf_len;
1805
1806 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1807 get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1808 get_fat_cmd.size,
1809 &get_fat_cmd.dma, GFP_ATOMIC);
1810 if (!get_fat_cmd.va)
1811 return -ENOMEM;
1812
1813 mutex_lock(&adapter->mcc_lock);
1814
1815 while (total_size) {
1816 buf_size = min(total_size, (u32)60*1024);
1817 total_size -= buf_size;
1818
1819 wrb = wrb_from_mccq(adapter);
1820 if (!wrb) {
1821 status = -EBUSY;
1822 goto err;
1823 }
1824 req = get_fat_cmd.va;
1825
1826 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1827 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1828 OPCODE_COMMON_MANAGE_FAT, payload_len,
1829 wrb, &get_fat_cmd);
1830
1831 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1832 req->read_log_offset = cpu_to_le32(log_offset);
1833 req->read_log_length = cpu_to_le32(buf_size);
1834 req->data_buffer_size = cpu_to_le32(buf_size);
1835
1836 status = be_mcc_notify_wait(adapter);
1837 if (!status) {
1838 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1839
1840 memcpy(buf + offset,
1841 resp->data_buffer,
1842 le32_to_cpu(resp->read_log_length));
1843 } else {
1844 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1845 goto err;
1846 }
1847 offset += buf_size;
1848 log_offset += buf_size;
1849 }
1850 err:
1851 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1852 get_fat_cmd.va, get_fat_cmd.dma);
1853 mutex_unlock(&adapter->mcc_lock);
1854 return status;
1855 }
1856
1857 /* Uses synchronous mcc */
be_cmd_get_fw_ver(struct be_adapter * adapter)1858 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1859 {
1860 struct be_mcc_wrb *wrb;
1861 struct be_cmd_req_get_fw_version *req;
1862 int status;
1863
1864 mutex_lock(&adapter->mcc_lock);
1865
1866 wrb = wrb_from_mccq(adapter);
1867 if (!wrb) {
1868 status = -EBUSY;
1869 goto err;
1870 }
1871
1872 req = embedded_payload(wrb);
1873
1874 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1875 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1876 NULL);
1877 status = be_mcc_notify_wait(adapter);
1878 if (!status) {
1879 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1880
1881 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1882 sizeof(adapter->fw_ver));
1883 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1884 sizeof(adapter->fw_on_flash));
1885 }
1886 err:
1887 mutex_unlock(&adapter->mcc_lock);
1888 return status;
1889 }
1890
1891 /* set the EQ delay interval of an EQ to specified value
1892 * Uses async mcc
1893 */
__be_cmd_modify_eqd(struct be_adapter * adapter,struct be_set_eqd * set_eqd,int num)1894 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1895 struct be_set_eqd *set_eqd, int num)
1896 {
1897 struct be_mcc_wrb *wrb;
1898 struct be_cmd_req_modify_eq_delay *req;
1899 int status = 0, i;
1900
1901 mutex_lock(&adapter->mcc_lock);
1902
1903 wrb = wrb_from_mccq(adapter);
1904 if (!wrb) {
1905 status = -EBUSY;
1906 goto err;
1907 }
1908 req = embedded_payload(wrb);
1909
1910 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1911 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1912 NULL);
1913
1914 req->num_eq = cpu_to_le32(num);
1915 for (i = 0; i < num; i++) {
1916 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1917 req->set_eqd[i].phase = 0;
1918 req->set_eqd[i].delay_multiplier =
1919 cpu_to_le32(set_eqd[i].delay_multiplier);
1920 }
1921
1922 status = be_mcc_notify(adapter);
1923 err:
1924 mutex_unlock(&adapter->mcc_lock);
1925 return status;
1926 }
1927
be_cmd_modify_eqd(struct be_adapter * adapter,struct be_set_eqd * set_eqd,int num)1928 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1929 int num)
1930 {
1931 int num_eqs, i = 0;
1932
1933 while (num) {
1934 num_eqs = min(num, 8);
1935 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1936 i += num_eqs;
1937 num -= num_eqs;
1938 }
1939
1940 return 0;
1941 }
1942
1943 /* Uses sycnhronous mcc */
be_cmd_vlan_config(struct be_adapter * adapter,u32 if_id,u16 * vtag_array,u32 num,u32 domain)1944 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1945 u32 num, u32 domain)
1946 {
1947 struct be_mcc_wrb *wrb;
1948 struct be_cmd_req_vlan_config *req;
1949 int status;
1950
1951 mutex_lock(&adapter->mcc_lock);
1952
1953 wrb = wrb_from_mccq(adapter);
1954 if (!wrb) {
1955 status = -EBUSY;
1956 goto err;
1957 }
1958 req = embedded_payload(wrb);
1959
1960 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1961 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1962 wrb, NULL);
1963 req->hdr.domain = domain;
1964
1965 req->interface_id = if_id;
1966 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1967 req->num_vlan = num;
1968 memcpy(req->normal_vlan, vtag_array,
1969 req->num_vlan * sizeof(vtag_array[0]));
1970
1971 status = be_mcc_notify_wait(adapter);
1972 err:
1973 mutex_unlock(&adapter->mcc_lock);
1974 return status;
1975 }
1976
__be_cmd_rx_filter(struct be_adapter * adapter,u32 flags,u32 value)1977 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1978 {
1979 struct be_mcc_wrb *wrb;
1980 struct be_dma_mem *mem = &adapter->rx_filter;
1981 struct be_cmd_req_rx_filter *req = mem->va;
1982 int status;
1983
1984 mutex_lock(&adapter->mcc_lock);
1985
1986 wrb = wrb_from_mccq(adapter);
1987 if (!wrb) {
1988 status = -EBUSY;
1989 goto err;
1990 }
1991 memset(req, 0, sizeof(*req));
1992 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1993 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1994 wrb, mem);
1995
1996 req->if_id = cpu_to_le32(adapter->if_handle);
1997 req->if_flags_mask = cpu_to_le32(flags);
1998 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1999
2000 if (flags & BE_IF_FLAGS_MULTICAST) {
2001 int i;
2002
2003 /* Reset mcast promisc mode if already set by setting mask
2004 * and not setting flags field
2005 */
2006 req->if_flags_mask |=
2007 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
2008 be_if_cap_flags(adapter));
2009 req->mcast_num = cpu_to_le32(adapter->mc_count);
2010 for (i = 0; i < adapter->mc_count; i++)
2011 ether_addr_copy(req->mcast_mac[i].byte,
2012 adapter->mc_list[i].mac);
2013 }
2014
2015 status = be_mcc_notify_wait(adapter);
2016 err:
2017 mutex_unlock(&adapter->mcc_lock);
2018 return status;
2019 }
2020
be_cmd_rx_filter(struct be_adapter * adapter,u32 flags,u32 value)2021 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
2022 {
2023 struct device *dev = &adapter->pdev->dev;
2024
2025 if ((flags & be_if_cap_flags(adapter)) != flags) {
2026 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2027 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2028 be_if_cap_flags(adapter));
2029 }
2030 flags &= be_if_cap_flags(adapter);
2031 if (!flags)
2032 return -ENOTSUPP;
2033
2034 return __be_cmd_rx_filter(adapter, flags, value);
2035 }
2036
2037 /* Uses synchrounous mcc */
be_cmd_set_flow_control(struct be_adapter * adapter,u32 tx_fc,u32 rx_fc)2038 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
2039 {
2040 struct be_mcc_wrb *wrb;
2041 struct be_cmd_req_set_flow_control *req;
2042 int status;
2043
2044 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
2045 CMD_SUBSYSTEM_COMMON))
2046 return -EPERM;
2047
2048 mutex_lock(&adapter->mcc_lock);
2049
2050 wrb = wrb_from_mccq(adapter);
2051 if (!wrb) {
2052 status = -EBUSY;
2053 goto err;
2054 }
2055 req = embedded_payload(wrb);
2056
2057 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2058 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2059 wrb, NULL);
2060
2061 req->hdr.version = 1;
2062 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2063 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2064
2065 status = be_mcc_notify_wait(adapter);
2066
2067 err:
2068 mutex_unlock(&adapter->mcc_lock);
2069
2070 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2071 return -EOPNOTSUPP;
2072
2073 return status;
2074 }
2075
2076 /* Uses sycn mcc */
be_cmd_get_flow_control(struct be_adapter * adapter,u32 * tx_fc,u32 * rx_fc)2077 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2078 {
2079 struct be_mcc_wrb *wrb;
2080 struct be_cmd_req_get_flow_control *req;
2081 int status;
2082
2083 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2084 CMD_SUBSYSTEM_COMMON))
2085 return -EPERM;
2086
2087 mutex_lock(&adapter->mcc_lock);
2088
2089 wrb = wrb_from_mccq(adapter);
2090 if (!wrb) {
2091 status = -EBUSY;
2092 goto err;
2093 }
2094 req = embedded_payload(wrb);
2095
2096 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2097 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2098 wrb, NULL);
2099
2100 status = be_mcc_notify_wait(adapter);
2101 if (!status) {
2102 struct be_cmd_resp_get_flow_control *resp =
2103 embedded_payload(wrb);
2104
2105 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2106 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2107 }
2108
2109 err:
2110 mutex_unlock(&adapter->mcc_lock);
2111 return status;
2112 }
2113
2114 /* Uses mbox */
be_cmd_query_fw_cfg(struct be_adapter * adapter)2115 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2116 {
2117 struct be_mcc_wrb *wrb;
2118 struct be_cmd_req_query_fw_cfg *req;
2119 int status;
2120
2121 if (mutex_lock_interruptible(&adapter->mbox_lock))
2122 return -1;
2123
2124 wrb = wrb_from_mbox(adapter);
2125 req = embedded_payload(wrb);
2126
2127 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2128 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2129 sizeof(*req), wrb, NULL);
2130
2131 status = be_mbox_notify_wait(adapter);
2132 if (!status) {
2133 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2134
2135 adapter->port_num = le32_to_cpu(resp->phys_port);
2136 adapter->function_mode = le32_to_cpu(resp->function_mode);
2137 adapter->function_caps = le32_to_cpu(resp->function_caps);
2138 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2139 dev_info(&adapter->pdev->dev,
2140 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2141 adapter->function_mode, adapter->function_caps);
2142 }
2143
2144 mutex_unlock(&adapter->mbox_lock);
2145 return status;
2146 }
2147
2148 /* Uses mbox */
be_cmd_reset_function(struct be_adapter * adapter)2149 int be_cmd_reset_function(struct be_adapter *adapter)
2150 {
2151 struct be_mcc_wrb *wrb;
2152 struct be_cmd_req_hdr *req;
2153 int status;
2154
2155 if (lancer_chip(adapter)) {
2156 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2157 adapter->db + SLIPORT_CONTROL_OFFSET);
2158 status = lancer_wait_ready(adapter);
2159 if (status)
2160 dev_err(&adapter->pdev->dev,
2161 "Adapter in non recoverable error\n");
2162 return status;
2163 }
2164
2165 if (mutex_lock_interruptible(&adapter->mbox_lock))
2166 return -1;
2167
2168 wrb = wrb_from_mbox(adapter);
2169 req = embedded_payload(wrb);
2170
2171 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2172 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2173 NULL);
2174
2175 status = be_mbox_notify_wait(adapter);
2176
2177 mutex_unlock(&adapter->mbox_lock);
2178 return status;
2179 }
2180
be_cmd_rss_config(struct be_adapter * adapter,u8 * rsstable,u32 rss_hash_opts,u16 table_size,const u8 * rss_hkey)2181 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2182 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2183 {
2184 struct be_mcc_wrb *wrb;
2185 struct be_cmd_req_rss_config *req;
2186 int status;
2187
2188 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2189 return 0;
2190
2191 mutex_lock(&adapter->mcc_lock);
2192
2193 wrb = wrb_from_mccq(adapter);
2194 if (!wrb) {
2195 status = -EBUSY;
2196 goto err;
2197 }
2198 req = embedded_payload(wrb);
2199
2200 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2201 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2202
2203 req->if_id = cpu_to_le32(adapter->if_handle);
2204 req->enable_rss = cpu_to_le16(rss_hash_opts);
2205 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2206
2207 if (!BEx_chip(adapter))
2208 req->hdr.version = 1;
2209
2210 memcpy(req->cpu_table, rsstable, table_size);
2211 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2212 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2213
2214 status = be_mcc_notify_wait(adapter);
2215 err:
2216 mutex_unlock(&adapter->mcc_lock);
2217 return status;
2218 }
2219
2220 /* Uses sync mcc */
be_cmd_set_beacon_state(struct be_adapter * adapter,u8 port_num,u8 bcn,u8 sts,u8 state)2221 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2222 u8 bcn, u8 sts, u8 state)
2223 {
2224 struct be_mcc_wrb *wrb;
2225 struct be_cmd_req_enable_disable_beacon *req;
2226 int status;
2227
2228 mutex_lock(&adapter->mcc_lock);
2229
2230 wrb = wrb_from_mccq(adapter);
2231 if (!wrb) {
2232 status = -EBUSY;
2233 goto err;
2234 }
2235 req = embedded_payload(wrb);
2236
2237 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2238 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2239 sizeof(*req), wrb, NULL);
2240
2241 req->port_num = port_num;
2242 req->beacon_state = state;
2243 req->beacon_duration = bcn;
2244 req->status_duration = sts;
2245
2246 status = be_mcc_notify_wait(adapter);
2247
2248 err:
2249 mutex_unlock(&adapter->mcc_lock);
2250 return status;
2251 }
2252
2253 /* Uses sync mcc */
be_cmd_get_beacon_state(struct be_adapter * adapter,u8 port_num,u32 * state)2254 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2255 {
2256 struct be_mcc_wrb *wrb;
2257 struct be_cmd_req_get_beacon_state *req;
2258 int status;
2259
2260 mutex_lock(&adapter->mcc_lock);
2261
2262 wrb = wrb_from_mccq(adapter);
2263 if (!wrb) {
2264 status = -EBUSY;
2265 goto err;
2266 }
2267 req = embedded_payload(wrb);
2268
2269 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2270 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2271 wrb, NULL);
2272
2273 req->port_num = port_num;
2274
2275 status = be_mcc_notify_wait(adapter);
2276 if (!status) {
2277 struct be_cmd_resp_get_beacon_state *resp =
2278 embedded_payload(wrb);
2279
2280 *state = resp->beacon_state;
2281 }
2282
2283 err:
2284 mutex_unlock(&adapter->mcc_lock);
2285 return status;
2286 }
2287
2288 /* Uses sync mcc */
be_cmd_read_port_transceiver_data(struct be_adapter * adapter,u8 page_num,u32 off,u32 len,u8 * data)2289 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2290 u8 page_num, u32 off, u32 len, u8 *data)
2291 {
2292 struct be_dma_mem cmd;
2293 struct be_mcc_wrb *wrb;
2294 struct be_cmd_req_port_type *req;
2295 int status;
2296
2297 if (page_num > TR_PAGE_A2)
2298 return -EINVAL;
2299
2300 cmd.size = sizeof(struct be_cmd_resp_port_type);
2301 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2302 GFP_ATOMIC);
2303 if (!cmd.va) {
2304 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2305 return -ENOMEM;
2306 }
2307
2308 mutex_lock(&adapter->mcc_lock);
2309
2310 wrb = wrb_from_mccq(adapter);
2311 if (!wrb) {
2312 status = -EBUSY;
2313 goto err;
2314 }
2315 req = cmd.va;
2316
2317 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2318 OPCODE_COMMON_READ_TRANSRECV_DATA,
2319 cmd.size, wrb, &cmd);
2320
2321 req->port = cpu_to_le32(adapter->hba_port_num);
2322 req->page_num = cpu_to_le32(page_num);
2323 status = be_mcc_notify_wait(adapter);
2324 if (!status && len > 0) {
2325 struct be_cmd_resp_port_type *resp = cmd.va;
2326
2327 memcpy(data, resp->page_data + off, len);
2328 }
2329 err:
2330 mutex_unlock(&adapter->mcc_lock);
2331 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2332 return status;
2333 }
2334
lancer_cmd_write_object(struct be_adapter * adapter,struct be_dma_mem * cmd,u32 data_size,u32 data_offset,const char * obj_name,u32 * data_written,u8 * change_status,u8 * addn_status)2335 static int lancer_cmd_write_object(struct be_adapter *adapter,
2336 struct be_dma_mem *cmd, u32 data_size,
2337 u32 data_offset, const char *obj_name,
2338 u32 *data_written, u8 *change_status,
2339 u8 *addn_status)
2340 {
2341 struct be_mcc_wrb *wrb;
2342 struct lancer_cmd_req_write_object *req;
2343 struct lancer_cmd_resp_write_object *resp;
2344 void *ctxt = NULL;
2345 int status;
2346
2347 mutex_lock(&adapter->mcc_lock);
2348 adapter->flash_status = 0;
2349
2350 wrb = wrb_from_mccq(adapter);
2351 if (!wrb) {
2352 status = -EBUSY;
2353 goto err_unlock;
2354 }
2355
2356 req = embedded_payload(wrb);
2357
2358 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2359 OPCODE_COMMON_WRITE_OBJECT,
2360 sizeof(struct lancer_cmd_req_write_object), wrb,
2361 NULL);
2362
2363 ctxt = &req->context;
2364 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2365 write_length, ctxt, data_size);
2366
2367 if (data_size == 0)
2368 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2369 eof, ctxt, 1);
2370 else
2371 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2372 eof, ctxt, 0);
2373
2374 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2375 req->write_offset = cpu_to_le32(data_offset);
2376 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2377 req->descriptor_count = cpu_to_le32(1);
2378 req->buf_len = cpu_to_le32(data_size);
2379 req->addr_low = cpu_to_le32((cmd->dma +
2380 sizeof(struct lancer_cmd_req_write_object))
2381 & 0xFFFFFFFF);
2382 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2383 sizeof(struct lancer_cmd_req_write_object)));
2384
2385 status = be_mcc_notify(adapter);
2386 if (status)
2387 goto err_unlock;
2388
2389 mutex_unlock(&adapter->mcc_lock);
2390
2391 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2392 msecs_to_jiffies(60000)))
2393 status = -ETIMEDOUT;
2394 else
2395 status = adapter->flash_status;
2396
2397 resp = embedded_payload(wrb);
2398 if (!status) {
2399 *data_written = le32_to_cpu(resp->actual_write_len);
2400 *change_status = resp->change_status;
2401 } else {
2402 *addn_status = resp->additional_status;
2403 }
2404
2405 return status;
2406
2407 err_unlock:
2408 mutex_unlock(&adapter->mcc_lock);
2409 return status;
2410 }
2411
be_cmd_query_cable_type(struct be_adapter * adapter)2412 int be_cmd_query_cable_type(struct be_adapter *adapter)
2413 {
2414 u8 page_data[PAGE_DATA_LEN];
2415 int status;
2416
2417 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2418 0, PAGE_DATA_LEN, page_data);
2419 if (!status) {
2420 switch (adapter->phy.interface_type) {
2421 case PHY_TYPE_QSFP:
2422 adapter->phy.cable_type =
2423 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2424 break;
2425 case PHY_TYPE_SFP_PLUS_10GB:
2426 adapter->phy.cable_type =
2427 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2428 break;
2429 default:
2430 adapter->phy.cable_type = 0;
2431 break;
2432 }
2433 }
2434 return status;
2435 }
2436
be_cmd_query_sfp_info(struct be_adapter * adapter)2437 int be_cmd_query_sfp_info(struct be_adapter *adapter)
2438 {
2439 u8 page_data[PAGE_DATA_LEN];
2440 int status;
2441
2442 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2443 0, PAGE_DATA_LEN, page_data);
2444 if (!status) {
2445 strlcpy(adapter->phy.vendor_name, page_data +
2446 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2447 strlcpy(adapter->phy.vendor_pn,
2448 page_data + SFP_VENDOR_PN_OFFSET,
2449 SFP_VENDOR_NAME_LEN - 1);
2450 }
2451
2452 return status;
2453 }
2454
lancer_cmd_delete_object(struct be_adapter * adapter,const char * obj_name)2455 static int lancer_cmd_delete_object(struct be_adapter *adapter,
2456 const char *obj_name)
2457 {
2458 struct lancer_cmd_req_delete_object *req;
2459 struct be_mcc_wrb *wrb;
2460 int status;
2461
2462 mutex_lock(&adapter->mcc_lock);
2463
2464 wrb = wrb_from_mccq(adapter);
2465 if (!wrb) {
2466 status = -EBUSY;
2467 goto err;
2468 }
2469
2470 req = embedded_payload(wrb);
2471
2472 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2473 OPCODE_COMMON_DELETE_OBJECT,
2474 sizeof(*req), wrb, NULL);
2475
2476 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2477
2478 status = be_mcc_notify_wait(adapter);
2479 err:
2480 mutex_unlock(&adapter->mcc_lock);
2481 return status;
2482 }
2483
lancer_cmd_read_object(struct be_adapter * adapter,struct be_dma_mem * cmd,u32 data_size,u32 data_offset,const char * obj_name,u32 * data_read,u32 * eof,u8 * addn_status)2484 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2485 u32 data_size, u32 data_offset, const char *obj_name,
2486 u32 *data_read, u32 *eof, u8 *addn_status)
2487 {
2488 struct be_mcc_wrb *wrb;
2489 struct lancer_cmd_req_read_object *req;
2490 struct lancer_cmd_resp_read_object *resp;
2491 int status;
2492
2493 mutex_lock(&adapter->mcc_lock);
2494
2495 wrb = wrb_from_mccq(adapter);
2496 if (!wrb) {
2497 status = -EBUSY;
2498 goto err_unlock;
2499 }
2500
2501 req = embedded_payload(wrb);
2502
2503 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2504 OPCODE_COMMON_READ_OBJECT,
2505 sizeof(struct lancer_cmd_req_read_object), wrb,
2506 NULL);
2507
2508 req->desired_read_len = cpu_to_le32(data_size);
2509 req->read_offset = cpu_to_le32(data_offset);
2510 strcpy(req->object_name, obj_name);
2511 req->descriptor_count = cpu_to_le32(1);
2512 req->buf_len = cpu_to_le32(data_size);
2513 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2514 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2515
2516 status = be_mcc_notify_wait(adapter);
2517
2518 resp = embedded_payload(wrb);
2519 if (!status) {
2520 *data_read = le32_to_cpu(resp->actual_read_len);
2521 *eof = le32_to_cpu(resp->eof);
2522 } else {
2523 *addn_status = resp->additional_status;
2524 }
2525
2526 err_unlock:
2527 mutex_unlock(&adapter->mcc_lock);
2528 return status;
2529 }
2530
be_cmd_write_flashrom(struct be_adapter * adapter,struct be_dma_mem * cmd,u32 flash_type,u32 flash_opcode,u32 img_offset,u32 buf_size)2531 static int be_cmd_write_flashrom(struct be_adapter *adapter,
2532 struct be_dma_mem *cmd, u32 flash_type,
2533 u32 flash_opcode, u32 img_offset, u32 buf_size)
2534 {
2535 struct be_mcc_wrb *wrb;
2536 struct be_cmd_write_flashrom *req;
2537 int status;
2538
2539 mutex_lock(&adapter->mcc_lock);
2540 adapter->flash_status = 0;
2541
2542 wrb = wrb_from_mccq(adapter);
2543 if (!wrb) {
2544 status = -EBUSY;
2545 goto err_unlock;
2546 }
2547 req = cmd->va;
2548
2549 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2550 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2551 cmd);
2552
2553 req->params.op_type = cpu_to_le32(flash_type);
2554 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2555 req->params.offset = cpu_to_le32(img_offset);
2556
2557 req->params.op_code = cpu_to_le32(flash_opcode);
2558 req->params.data_buf_size = cpu_to_le32(buf_size);
2559
2560 status = be_mcc_notify(adapter);
2561 if (status)
2562 goto err_unlock;
2563
2564 mutex_unlock(&adapter->mcc_lock);
2565
2566 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2567 msecs_to_jiffies(40000)))
2568 status = -ETIMEDOUT;
2569 else
2570 status = adapter->flash_status;
2571
2572 return status;
2573
2574 err_unlock:
2575 mutex_unlock(&adapter->mcc_lock);
2576 return status;
2577 }
2578
be_cmd_get_flash_crc(struct be_adapter * adapter,u8 * flashed_crc,u16 img_optype,u32 img_offset,u32 crc_offset)2579 static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2580 u16 img_optype, u32 img_offset, u32 crc_offset)
2581 {
2582 struct be_cmd_read_flash_crc *req;
2583 struct be_mcc_wrb *wrb;
2584 int status;
2585
2586 mutex_lock(&adapter->mcc_lock);
2587
2588 wrb = wrb_from_mccq(adapter);
2589 if (!wrb) {
2590 status = -EBUSY;
2591 goto err;
2592 }
2593 req = embedded_payload(wrb);
2594
2595 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2596 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2597 wrb, NULL);
2598
2599 req->params.op_type = cpu_to_le32(img_optype);
2600 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2601 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2602 else
2603 req->params.offset = cpu_to_le32(crc_offset);
2604
2605 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2606 req->params.data_buf_size = cpu_to_le32(0x4);
2607
2608 status = be_mcc_notify_wait(adapter);
2609 if (!status)
2610 memcpy(flashed_crc, req->crc, 4);
2611
2612 err:
2613 mutex_unlock(&adapter->mcc_lock);
2614 return status;
2615 }
2616
2617 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2618
phy_flashing_required(struct be_adapter * adapter)2619 static bool phy_flashing_required(struct be_adapter *adapter)
2620 {
2621 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
2622 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2623 }
2624
is_comp_in_ufi(struct be_adapter * adapter,struct flash_section_info * fsec,int type)2625 static bool is_comp_in_ufi(struct be_adapter *adapter,
2626 struct flash_section_info *fsec, int type)
2627 {
2628 int i = 0, img_type = 0;
2629 struct flash_section_info_g2 *fsec_g2 = NULL;
2630
2631 if (BE2_chip(adapter))
2632 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2633
2634 for (i = 0; i < MAX_FLASH_COMP; i++) {
2635 if (fsec_g2)
2636 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2637 else
2638 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2639
2640 if (img_type == type)
2641 return true;
2642 }
2643 return false;
2644 }
2645
get_fsec_info(struct be_adapter * adapter,int header_size,const struct firmware * fw)2646 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2647 int header_size,
2648 const struct firmware *fw)
2649 {
2650 struct flash_section_info *fsec = NULL;
2651 const u8 *p = fw->data;
2652
2653 p += header_size;
2654 while (p < (fw->data + fw->size)) {
2655 fsec = (struct flash_section_info *)p;
2656 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2657 return fsec;
2658 p += 32;
2659 }
2660 return NULL;
2661 }
2662
be_check_flash_crc(struct be_adapter * adapter,const u8 * p,u32 img_offset,u32 img_size,int hdr_size,u16 img_optype,bool * crc_match)2663 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
2664 u32 img_offset, u32 img_size, int hdr_size,
2665 u16 img_optype, bool *crc_match)
2666 {
2667 u32 crc_offset;
2668 int status;
2669 u8 crc[4];
2670
2671 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
2672 img_size - 4);
2673 if (status)
2674 return status;
2675
2676 crc_offset = hdr_size + img_offset + img_size - 4;
2677
2678 /* Skip flashing, if crc of flashed region matches */
2679 if (!memcmp(crc, p + crc_offset, 4))
2680 *crc_match = true;
2681 else
2682 *crc_match = false;
2683
2684 return status;
2685 }
2686
be_flash(struct be_adapter * adapter,const u8 * img,struct be_dma_mem * flash_cmd,int optype,int img_size,u32 img_offset)2687 static int be_flash(struct be_adapter *adapter, const u8 *img,
2688 struct be_dma_mem *flash_cmd, int optype, int img_size,
2689 u32 img_offset)
2690 {
2691 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
2692 struct be_cmd_write_flashrom *req = flash_cmd->va;
2693 int status;
2694
2695 while (total_bytes) {
2696 num_bytes = min_t(u32, 32 * 1024, total_bytes);
2697
2698 total_bytes -= num_bytes;
2699
2700 if (!total_bytes) {
2701 if (optype == OPTYPE_PHY_FW)
2702 flash_op = FLASHROM_OPER_PHY_FLASH;
2703 else
2704 flash_op = FLASHROM_OPER_FLASH;
2705 } else {
2706 if (optype == OPTYPE_PHY_FW)
2707 flash_op = FLASHROM_OPER_PHY_SAVE;
2708 else
2709 flash_op = FLASHROM_OPER_SAVE;
2710 }
2711
2712 memcpy(req->data_buf, img, num_bytes);
2713 img += num_bytes;
2714 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
2715 flash_op, img_offset +
2716 bytes_sent, num_bytes);
2717 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
2718 optype == OPTYPE_PHY_FW)
2719 break;
2720 else if (status)
2721 return status;
2722
2723 bytes_sent += num_bytes;
2724 }
2725 return 0;
2726 }
2727
2728 #define NCSI_UPDATE_LOG "NCSI section update is not supported in FW ver %s\n"
be_fw_ncsi_supported(char * ver)2729 static bool be_fw_ncsi_supported(char *ver)
2730 {
2731 int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */
2732 int v2[4];
2733 int i;
2734
2735 if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4)
2736 return false;
2737
2738 for (i = 0; i < 4; i++) {
2739 if (v1[i] < v2[i])
2740 return true;
2741 else if (v1[i] > v2[i])
2742 return false;
2743 }
2744
2745 return true;
2746 }
2747
2748 /* For BE2, BE3 and BE3-R */
be_flash_BEx(struct be_adapter * adapter,const struct firmware * fw,struct be_dma_mem * flash_cmd,int num_of_images)2749 static int be_flash_BEx(struct be_adapter *adapter,
2750 const struct firmware *fw,
2751 struct be_dma_mem *flash_cmd, int num_of_images)
2752 {
2753 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2754 struct device *dev = &adapter->pdev->dev;
2755 struct flash_section_info *fsec = NULL;
2756 int status, i, filehdr_size, num_comp;
2757 const struct flash_comp *pflashcomp;
2758 bool crc_match;
2759 const u8 *p;
2760
2761 static const struct flash_comp gen3_flash_types[] = {
2762 { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2763 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2764 { BE3_REDBOOT_START, OPTYPE_REDBOOT,
2765 BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2766 { BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
2767 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2768 { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2769 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2770 { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2771 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2772 { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2773 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2774 { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2775 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2776 { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2777 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
2778 { BE3_NCSI_START, OPTYPE_NCSI_FW,
2779 BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
2780 { BE3_PHY_FW_START, OPTYPE_PHY_FW,
2781 BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
2782 };
2783
2784 static const struct flash_comp gen2_flash_types[] = {
2785 { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2786 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2787 { BE2_REDBOOT_START, OPTYPE_REDBOOT,
2788 BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2789 { BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
2790 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2791 { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2792 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2793 { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2794 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2795 { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2796 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2797 { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2798 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2799 { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2800 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
2801 };
2802
2803 if (BE3_chip(adapter)) {
2804 pflashcomp = gen3_flash_types;
2805 filehdr_size = sizeof(struct flash_file_hdr_g3);
2806 num_comp = ARRAY_SIZE(gen3_flash_types);
2807 } else {
2808 pflashcomp = gen2_flash_types;
2809 filehdr_size = sizeof(struct flash_file_hdr_g2);
2810 num_comp = ARRAY_SIZE(gen2_flash_types);
2811 img_hdrs_size = 0;
2812 }
2813
2814 /* Get flash section info*/
2815 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2816 if (!fsec) {
2817 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2818 return -1;
2819 }
2820 for (i = 0; i < num_comp; i++) {
2821 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2822 continue;
2823
2824 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2825 !be_fw_ncsi_supported(adapter->fw_ver)) {
2826 dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver);
2827 continue;
2828 }
2829
2830 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
2831 !phy_flashing_required(adapter))
2832 continue;
2833
2834 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
2835 status = be_check_flash_crc(adapter, fw->data,
2836 pflashcomp[i].offset,
2837 pflashcomp[i].size,
2838 filehdr_size +
2839 img_hdrs_size,
2840 OPTYPE_REDBOOT, &crc_match);
2841 if (status) {
2842 dev_err(dev,
2843 "Could not get CRC for 0x%x region\n",
2844 pflashcomp[i].optype);
2845 continue;
2846 }
2847
2848 if (crc_match)
2849 continue;
2850 }
2851
2852 p = fw->data + filehdr_size + pflashcomp[i].offset +
2853 img_hdrs_size;
2854 if (p + pflashcomp[i].size > fw->data + fw->size)
2855 return -1;
2856
2857 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
2858 pflashcomp[i].size, 0);
2859 if (status) {
2860 dev_err(dev, "Flashing section type 0x%x failed\n",
2861 pflashcomp[i].img_type);
2862 return status;
2863 }
2864 }
2865 return 0;
2866 }
2867
be_get_img_optype(struct flash_section_entry fsec_entry)2868 static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
2869 {
2870 u32 img_type = le32_to_cpu(fsec_entry.type);
2871 u16 img_optype = le16_to_cpu(fsec_entry.optype);
2872
2873 if (img_optype != 0xFFFF)
2874 return img_optype;
2875
2876 switch (img_type) {
2877 case IMAGE_FIRMWARE_ISCSI:
2878 img_optype = OPTYPE_ISCSI_ACTIVE;
2879 break;
2880 case IMAGE_BOOT_CODE:
2881 img_optype = OPTYPE_REDBOOT;
2882 break;
2883 case IMAGE_OPTION_ROM_ISCSI:
2884 img_optype = OPTYPE_BIOS;
2885 break;
2886 case IMAGE_OPTION_ROM_PXE:
2887 img_optype = OPTYPE_PXE_BIOS;
2888 break;
2889 case IMAGE_OPTION_ROM_FCOE:
2890 img_optype = OPTYPE_FCOE_BIOS;
2891 break;
2892 case IMAGE_FIRMWARE_BACKUP_ISCSI:
2893 img_optype = OPTYPE_ISCSI_BACKUP;
2894 break;
2895 case IMAGE_NCSI:
2896 img_optype = OPTYPE_NCSI_FW;
2897 break;
2898 case IMAGE_FLASHISM_JUMPVECTOR:
2899 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
2900 break;
2901 case IMAGE_FIRMWARE_PHY:
2902 img_optype = OPTYPE_SH_PHY_FW;
2903 break;
2904 case IMAGE_REDBOOT_DIR:
2905 img_optype = OPTYPE_REDBOOT_DIR;
2906 break;
2907 case IMAGE_REDBOOT_CONFIG:
2908 img_optype = OPTYPE_REDBOOT_CONFIG;
2909 break;
2910 case IMAGE_UFI_DIR:
2911 img_optype = OPTYPE_UFI_DIR;
2912 break;
2913 default:
2914 break;
2915 }
2916
2917 return img_optype;
2918 }
2919
be_flash_skyhawk(struct be_adapter * adapter,const struct firmware * fw,struct be_dma_mem * flash_cmd,int num_of_images)2920 static int be_flash_skyhawk(struct be_adapter *adapter,
2921 const struct firmware *fw,
2922 struct be_dma_mem *flash_cmd, int num_of_images)
2923 {
2924 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
2925 bool crc_match, old_fw_img, flash_offset_support = true;
2926 struct device *dev = &adapter->pdev->dev;
2927 struct flash_section_info *fsec = NULL;
2928 u32 img_offset, img_size, img_type;
2929 u16 img_optype, flash_optype;
2930 int status, i, filehdr_size;
2931 const u8 *p;
2932
2933 filehdr_size = sizeof(struct flash_file_hdr_g3);
2934 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2935 if (!fsec) {
2936 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2937 return -EINVAL;
2938 }
2939
2940 retry_flash:
2941 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
2942 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
2943 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
2944 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2945 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
2946 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
2947
2948 if (img_optype == 0xFFFF)
2949 continue;
2950
2951 if (flash_offset_support)
2952 flash_optype = OPTYPE_OFFSET_SPECIFIED;
2953 else
2954 flash_optype = img_optype;
2955
2956 /* Don't bother verifying CRC if an old FW image is being
2957 * flashed
2958 */
2959 if (old_fw_img)
2960 goto flash;
2961
2962 status = be_check_flash_crc(adapter, fw->data, img_offset,
2963 img_size, filehdr_size +
2964 img_hdrs_size, flash_optype,
2965 &crc_match);
2966 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
2967 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
2968 /* The current FW image on the card does not support
2969 * OFFSET based flashing. Retry using older mechanism
2970 * of OPTYPE based flashing
2971 */
2972 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2973 flash_offset_support = false;
2974 goto retry_flash;
2975 }
2976
2977 /* The current FW image on the card does not recognize
2978 * the new FLASH op_type. The FW download is partially
2979 * complete. Reboot the server now to enable FW image
2980 * to recognize the new FLASH op_type. To complete the
2981 * remaining process, download the same FW again after
2982 * the reboot.
2983 */
2984 dev_err(dev, "Flash incomplete. Reset the server\n");
2985 dev_err(dev, "Download FW image again after reset\n");
2986 return -EAGAIN;
2987 } else if (status) {
2988 dev_err(dev, "Could not get CRC for 0x%x region\n",
2989 img_optype);
2990 return -EFAULT;
2991 }
2992
2993 if (crc_match)
2994 continue;
2995
2996 flash:
2997 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
2998 if (p + img_size > fw->data + fw->size)
2999 return -1;
3000
3001 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
3002 img_offset);
3003
3004 /* The current FW image on the card does not support OFFSET
3005 * based flashing. Retry using older mechanism of OPTYPE based
3006 * flashing
3007 */
3008 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
3009 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
3010 flash_offset_support = false;
3011 goto retry_flash;
3012 }
3013
3014 /* For old FW images ignore ILLEGAL_FIELD error or errors on
3015 * UFI_DIR region
3016 */
3017 if (old_fw_img &&
3018 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
3019 (img_optype == OPTYPE_UFI_DIR &&
3020 base_status(status) == MCC_STATUS_FAILED))) {
3021 continue;
3022 } else if (status) {
3023 dev_err(dev, "Flashing section type 0x%x failed\n",
3024 img_type);
3025
3026 switch (addl_status(status)) {
3027 case MCC_ADDL_STATUS_MISSING_SIGNATURE:
3028 dev_err(dev,
3029 "Digital signature missing in FW\n");
3030 return -EINVAL;
3031 case MCC_ADDL_STATUS_INVALID_SIGNATURE:
3032 dev_err(dev,
3033 "Invalid digital signature in FW\n");
3034 return -EINVAL;
3035 default:
3036 return -EFAULT;
3037 }
3038 }
3039 }
3040 return 0;
3041 }
3042
lancer_fw_download(struct be_adapter * adapter,const struct firmware * fw)3043 int lancer_fw_download(struct be_adapter *adapter,
3044 const struct firmware *fw)
3045 {
3046 struct device *dev = &adapter->pdev->dev;
3047 struct be_dma_mem flash_cmd;
3048 const u8 *data_ptr = NULL;
3049 u8 *dest_image_ptr = NULL;
3050 size_t image_size = 0;
3051 u32 chunk_size = 0;
3052 u32 data_written = 0;
3053 u32 offset = 0;
3054 int status = 0;
3055 u8 add_status = 0;
3056 u8 change_status;
3057
3058 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3059 dev_err(dev, "FW image size should be multiple of 4\n");
3060 return -EINVAL;
3061 }
3062
3063 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3064 + LANCER_FW_DOWNLOAD_CHUNK;
3065 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3066 GFP_KERNEL);
3067 if (!flash_cmd.va)
3068 return -ENOMEM;
3069
3070 dest_image_ptr = flash_cmd.va +
3071 sizeof(struct lancer_cmd_req_write_object);
3072 image_size = fw->size;
3073 data_ptr = fw->data;
3074
3075 while (image_size) {
3076 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3077
3078 /* Copy the image chunk content. */
3079 memcpy(dest_image_ptr, data_ptr, chunk_size);
3080
3081 status = lancer_cmd_write_object(adapter, &flash_cmd,
3082 chunk_size, offset,
3083 LANCER_FW_DOWNLOAD_LOCATION,
3084 &data_written, &change_status,
3085 &add_status);
3086 if (status)
3087 break;
3088
3089 offset += data_written;
3090 data_ptr += data_written;
3091 image_size -= data_written;
3092 }
3093
3094 if (!status) {
3095 /* Commit the FW written */
3096 status = lancer_cmd_write_object(adapter, &flash_cmd,
3097 0, offset,
3098 LANCER_FW_DOWNLOAD_LOCATION,
3099 &data_written, &change_status,
3100 &add_status);
3101 }
3102
3103 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3104 if (status) {
3105 dev_err(dev, "Firmware load error\n");
3106 return be_cmd_status(status);
3107 }
3108
3109 dev_info(dev, "Firmware flashed successfully\n");
3110
3111 if (change_status == LANCER_FW_RESET_NEEDED) {
3112 dev_info(dev, "Resetting adapter to activate new FW\n");
3113 status = lancer_physdev_ctrl(adapter,
3114 PHYSDEV_CONTROL_FW_RESET_MASK);
3115 if (status) {
3116 dev_err(dev, "Adapter busy, could not reset FW\n");
3117 dev_err(dev, "Reboot server to activate new FW\n");
3118 }
3119 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3120 dev_info(dev, "Reboot server to activate new FW\n");
3121 }
3122
3123 return 0;
3124 }
3125
3126 /* Check if the flash image file is compatible with the adapter that
3127 * is being flashed.
3128 */
be_check_ufi_compatibility(struct be_adapter * adapter,struct flash_file_hdr_g3 * fhdr)3129 static bool be_check_ufi_compatibility(struct be_adapter *adapter,
3130 struct flash_file_hdr_g3 *fhdr)
3131 {
3132 if (!fhdr) {
3133 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
3134 return false;
3135 }
3136
3137 /* First letter of the build version is used to identify
3138 * which chip this image file is meant for.
3139 */
3140 switch (fhdr->build[0]) {
3141 case BLD_STR_UFI_TYPE_SH:
3142 if (!skyhawk_chip(adapter))
3143 return false;
3144 break;
3145 case BLD_STR_UFI_TYPE_BE3:
3146 if (!BE3_chip(adapter))
3147 return false;
3148 break;
3149 case BLD_STR_UFI_TYPE_BE2:
3150 if (!BE2_chip(adapter))
3151 return false;
3152 break;
3153 default:
3154 return false;
3155 }
3156
3157 /* In BE3 FW images the "asic_type_rev" field doesn't track the
3158 * asic_rev of the chips it is compatible with.
3159 * When asic_type_rev is 0 the image is compatible only with
3160 * pre-BE3-R chips (asic_rev < 0x10)
3161 */
3162 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
3163 return adapter->asic_rev < 0x10;
3164 else
3165 return (fhdr->asic_type_rev >= adapter->asic_rev);
3166 }
3167
be_fw_download(struct be_adapter * adapter,const struct firmware * fw)3168 int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3169 {
3170 struct device *dev = &adapter->pdev->dev;
3171 struct flash_file_hdr_g3 *fhdr3;
3172 struct image_hdr *img_hdr_ptr;
3173 int status = 0, i, num_imgs;
3174 struct be_dma_mem flash_cmd;
3175
3176 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3177 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
3178 dev_err(dev, "Flash image is not compatible with adapter\n");
3179 return -EINVAL;
3180 }
3181
3182 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3183 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3184 GFP_KERNEL);
3185 if (!flash_cmd.va)
3186 return -ENOMEM;
3187
3188 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3189 for (i = 0; i < num_imgs; i++) {
3190 img_hdr_ptr = (struct image_hdr *)(fw->data +
3191 (sizeof(struct flash_file_hdr_g3) +
3192 i * sizeof(struct image_hdr)));
3193 if (!BE2_chip(adapter) &&
3194 le32_to_cpu(img_hdr_ptr->imageid) != 1)
3195 continue;
3196
3197 if (skyhawk_chip(adapter))
3198 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
3199 num_imgs);
3200 else
3201 status = be_flash_BEx(adapter, fw, &flash_cmd,
3202 num_imgs);
3203 }
3204
3205 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3206 if (!status)
3207 dev_info(dev, "Firmware flashed successfully\n");
3208
3209 return status;
3210 }
3211
be_cmd_enable_magic_wol(struct be_adapter * adapter,u8 * mac,struct be_dma_mem * nonemb_cmd)3212 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
3213 struct be_dma_mem *nonemb_cmd)
3214 {
3215 struct be_mcc_wrb *wrb;
3216 struct be_cmd_req_acpi_wol_magic_config *req;
3217 int status;
3218
3219 mutex_lock(&adapter->mcc_lock);
3220
3221 wrb = wrb_from_mccq(adapter);
3222 if (!wrb) {
3223 status = -EBUSY;
3224 goto err;
3225 }
3226 req = nonemb_cmd->va;
3227
3228 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3229 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
3230 wrb, nonemb_cmd);
3231 memcpy(req->magic_mac, mac, ETH_ALEN);
3232
3233 status = be_mcc_notify_wait(adapter);
3234
3235 err:
3236 mutex_unlock(&adapter->mcc_lock);
3237 return status;
3238 }
3239
be_cmd_set_loopback(struct be_adapter * adapter,u8 port_num,u8 loopback_type,u8 enable)3240 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
3241 u8 loopback_type, u8 enable)
3242 {
3243 struct be_mcc_wrb *wrb;
3244 struct be_cmd_req_set_lmode *req;
3245 int status;
3246
3247 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
3248 CMD_SUBSYSTEM_LOWLEVEL))
3249 return -EPERM;
3250
3251 mutex_lock(&adapter->mcc_lock);
3252
3253 wrb = wrb_from_mccq(adapter);
3254 if (!wrb) {
3255 status = -EBUSY;
3256 goto err_unlock;
3257 }
3258
3259 req = embedded_payload(wrb);
3260
3261 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3262 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
3263 wrb, NULL);
3264
3265 req->src_port = port_num;
3266 req->dest_port = port_num;
3267 req->loopback_type = loopback_type;
3268 req->loopback_state = enable;
3269
3270 status = be_mcc_notify(adapter);
3271 if (status)
3272 goto err_unlock;
3273
3274 mutex_unlock(&adapter->mcc_lock);
3275
3276 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
3277 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
3278 status = -ETIMEDOUT;
3279
3280 return status;
3281
3282 err_unlock:
3283 mutex_unlock(&adapter->mcc_lock);
3284 return status;
3285 }
3286
be_cmd_loopback_test(struct be_adapter * adapter,u32 port_num,u32 loopback_type,u32 pkt_size,u32 num_pkts,u64 pattern)3287 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
3288 u32 loopback_type, u32 pkt_size, u32 num_pkts,
3289 u64 pattern)
3290 {
3291 struct be_mcc_wrb *wrb;
3292 struct be_cmd_req_loopback_test *req;
3293 struct be_cmd_resp_loopback_test *resp;
3294 int status;
3295
3296 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
3297 CMD_SUBSYSTEM_LOWLEVEL))
3298 return -EPERM;
3299
3300 mutex_lock(&adapter->mcc_lock);
3301
3302 wrb = wrb_from_mccq(adapter);
3303 if (!wrb) {
3304 status = -EBUSY;
3305 goto err;
3306 }
3307
3308 req = embedded_payload(wrb);
3309
3310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3311 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
3312 NULL);
3313
3314 req->hdr.timeout = cpu_to_le32(15);
3315 req->pattern = cpu_to_le64(pattern);
3316 req->src_port = cpu_to_le32(port_num);
3317 req->dest_port = cpu_to_le32(port_num);
3318 req->pkt_size = cpu_to_le32(pkt_size);
3319 req->num_pkts = cpu_to_le32(num_pkts);
3320 req->loopback_type = cpu_to_le32(loopback_type);
3321
3322 status = be_mcc_notify(adapter);
3323 if (status)
3324 goto err;
3325
3326 mutex_unlock(&adapter->mcc_lock);
3327
3328 wait_for_completion(&adapter->et_cmd_compl);
3329 resp = embedded_payload(wrb);
3330 status = le32_to_cpu(resp->status);
3331
3332 return status;
3333 err:
3334 mutex_unlock(&adapter->mcc_lock);
3335 return status;
3336 }
3337
be_cmd_ddr_dma_test(struct be_adapter * adapter,u64 pattern,u32 byte_cnt,struct be_dma_mem * cmd)3338 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
3339 u32 byte_cnt, struct be_dma_mem *cmd)
3340 {
3341 struct be_mcc_wrb *wrb;
3342 struct be_cmd_req_ddrdma_test *req;
3343 int status;
3344 int i, j = 0;
3345
3346 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
3347 CMD_SUBSYSTEM_LOWLEVEL))
3348 return -EPERM;
3349
3350 mutex_lock(&adapter->mcc_lock);
3351
3352 wrb = wrb_from_mccq(adapter);
3353 if (!wrb) {
3354 status = -EBUSY;
3355 goto err;
3356 }
3357 req = cmd->va;
3358 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3359 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
3360 cmd);
3361
3362 req->pattern = cpu_to_le64(pattern);
3363 req->byte_count = cpu_to_le32(byte_cnt);
3364 for (i = 0; i < byte_cnt; i++) {
3365 req->snd_buff[i] = (u8)(pattern >> (j*8));
3366 j++;
3367 if (j > 7)
3368 j = 0;
3369 }
3370
3371 status = be_mcc_notify_wait(adapter);
3372
3373 if (!status) {
3374 struct be_cmd_resp_ddrdma_test *resp;
3375
3376 resp = cmd->va;
3377 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
3378 resp->snd_err) {
3379 status = -1;
3380 }
3381 }
3382
3383 err:
3384 mutex_unlock(&adapter->mcc_lock);
3385 return status;
3386 }
3387
be_cmd_get_seeprom_data(struct be_adapter * adapter,struct be_dma_mem * nonemb_cmd)3388 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
3389 struct be_dma_mem *nonemb_cmd)
3390 {
3391 struct be_mcc_wrb *wrb;
3392 struct be_cmd_req_seeprom_read *req;
3393 int status;
3394
3395 mutex_lock(&adapter->mcc_lock);
3396
3397 wrb = wrb_from_mccq(adapter);
3398 if (!wrb) {
3399 status = -EBUSY;
3400 goto err;
3401 }
3402 req = nonemb_cmd->va;
3403
3404 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3405 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
3406 nonemb_cmd);
3407
3408 status = be_mcc_notify_wait(adapter);
3409
3410 err:
3411 mutex_unlock(&adapter->mcc_lock);
3412 return status;
3413 }
3414
be_cmd_get_phy_info(struct be_adapter * adapter)3415 int be_cmd_get_phy_info(struct be_adapter *adapter)
3416 {
3417 struct be_mcc_wrb *wrb;
3418 struct be_cmd_req_get_phy_info *req;
3419 struct be_dma_mem cmd;
3420 int status;
3421
3422 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
3423 CMD_SUBSYSTEM_COMMON))
3424 return -EPERM;
3425
3426 mutex_lock(&adapter->mcc_lock);
3427
3428 wrb = wrb_from_mccq(adapter);
3429 if (!wrb) {
3430 status = -EBUSY;
3431 goto err;
3432 }
3433 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
3434 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3435 GFP_ATOMIC);
3436 if (!cmd.va) {
3437 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3438 status = -ENOMEM;
3439 goto err;
3440 }
3441
3442 req = cmd.va;
3443
3444 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3445 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
3446 wrb, &cmd);
3447
3448 status = be_mcc_notify_wait(adapter);
3449 if (!status) {
3450 struct be_phy_info *resp_phy_info =
3451 cmd.va + sizeof(struct be_cmd_req_hdr);
3452
3453 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
3454 adapter->phy.interface_type =
3455 le16_to_cpu(resp_phy_info->interface_type);
3456 adapter->phy.auto_speeds_supported =
3457 le16_to_cpu(resp_phy_info->auto_speeds_supported);
3458 adapter->phy.fixed_speeds_supported =
3459 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
3460 adapter->phy.misc_params =
3461 le32_to_cpu(resp_phy_info->misc_params);
3462
3463 if (BE2_chip(adapter)) {
3464 adapter->phy.fixed_speeds_supported =
3465 BE_SUPPORTED_SPEED_10GBPS |
3466 BE_SUPPORTED_SPEED_1GBPS;
3467 }
3468 }
3469 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3470 err:
3471 mutex_unlock(&adapter->mcc_lock);
3472 return status;
3473 }
3474
be_cmd_set_qos(struct be_adapter * adapter,u32 bps,u32 domain)3475 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
3476 {
3477 struct be_mcc_wrb *wrb;
3478 struct be_cmd_req_set_qos *req;
3479 int status;
3480
3481 mutex_lock(&adapter->mcc_lock);
3482
3483 wrb = wrb_from_mccq(adapter);
3484 if (!wrb) {
3485 status = -EBUSY;
3486 goto err;
3487 }
3488
3489 req = embedded_payload(wrb);
3490
3491 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3492 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
3493
3494 req->hdr.domain = domain;
3495 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
3496 req->max_bps_nic = cpu_to_le32(bps);
3497
3498 status = be_mcc_notify_wait(adapter);
3499
3500 err:
3501 mutex_unlock(&adapter->mcc_lock);
3502 return status;
3503 }
3504
be_cmd_get_cntl_attributes(struct be_adapter * adapter)3505 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3506 {
3507 struct be_mcc_wrb *wrb;
3508 struct be_cmd_req_cntl_attribs *req;
3509 struct be_cmd_resp_cntl_attribs *resp;
3510 int status, i;
3511 int payload_len = max(sizeof(*req), sizeof(*resp));
3512 struct mgmt_controller_attrib *attribs;
3513 struct be_dma_mem attribs_cmd;
3514 u32 *serial_num;
3515
3516 if (mutex_lock_interruptible(&adapter->mbox_lock))
3517 return -1;
3518
3519 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3520 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
3521 attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3522 attribs_cmd.size,
3523 &attribs_cmd.dma, GFP_ATOMIC);
3524 if (!attribs_cmd.va) {
3525 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3526 status = -ENOMEM;
3527 goto err;
3528 }
3529
3530 wrb = wrb_from_mbox(adapter);
3531 if (!wrb) {
3532 status = -EBUSY;
3533 goto err;
3534 }
3535 req = attribs_cmd.va;
3536
3537 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3538 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
3539 wrb, &attribs_cmd);
3540
3541 status = be_mbox_notify_wait(adapter);
3542 if (!status) {
3543 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
3544 adapter->hba_port_num = attribs->hba_attribs.phy_port;
3545 serial_num = attribs->hba_attribs.controller_serial_number;
3546 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
3547 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
3548 (BIT_MASK(16) - 1);
3549 /* For BEx, since GET_FUNC_CONFIG command is not
3550 * supported, we read funcnum here as a workaround.
3551 */
3552 if (BEx_chip(adapter))
3553 adapter->pf_num = attribs->hba_attribs.pci_funcnum;
3554 }
3555
3556 err:
3557 mutex_unlock(&adapter->mbox_lock);
3558 if (attribs_cmd.va)
3559 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
3560 attribs_cmd.va, attribs_cmd.dma);
3561 return status;
3562 }
3563
3564 /* Uses mbox */
be_cmd_req_native_mode(struct be_adapter * adapter)3565 int be_cmd_req_native_mode(struct be_adapter *adapter)
3566 {
3567 struct be_mcc_wrb *wrb;
3568 struct be_cmd_req_set_func_cap *req;
3569 int status;
3570
3571 if (mutex_lock_interruptible(&adapter->mbox_lock))
3572 return -1;
3573
3574 wrb = wrb_from_mbox(adapter);
3575 if (!wrb) {
3576 status = -EBUSY;
3577 goto err;
3578 }
3579
3580 req = embedded_payload(wrb);
3581
3582 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3583 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
3584 sizeof(*req), wrb, NULL);
3585
3586 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
3587 CAPABILITY_BE3_NATIVE_ERX_API);
3588 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
3589
3590 status = be_mbox_notify_wait(adapter);
3591 if (!status) {
3592 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
3593
3594 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
3595 CAPABILITY_BE3_NATIVE_ERX_API;
3596 if (!adapter->be3_native)
3597 dev_warn(&adapter->pdev->dev,
3598 "adapter not in advanced mode\n");
3599 }
3600 err:
3601 mutex_unlock(&adapter->mbox_lock);
3602 return status;
3603 }
3604
3605 /* Get privilege(s) for a function */
be_cmd_get_fn_privileges(struct be_adapter * adapter,u32 * privilege,u32 domain)3606 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
3607 u32 domain)
3608 {
3609 struct be_mcc_wrb *wrb;
3610 struct be_cmd_req_get_fn_privileges *req;
3611 int status;
3612
3613 mutex_lock(&adapter->mcc_lock);
3614
3615 wrb = wrb_from_mccq(adapter);
3616 if (!wrb) {
3617 status = -EBUSY;
3618 goto err;
3619 }
3620
3621 req = embedded_payload(wrb);
3622
3623 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3624 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
3625 wrb, NULL);
3626
3627 req->hdr.domain = domain;
3628
3629 status = be_mcc_notify_wait(adapter);
3630 if (!status) {
3631 struct be_cmd_resp_get_fn_privileges *resp =
3632 embedded_payload(wrb);
3633
3634 *privilege = le32_to_cpu(resp->privilege_mask);
3635
3636 /* In UMC mode FW does not return right privileges.
3637 * Override with correct privilege equivalent to PF.
3638 */
3639 if (BEx_chip(adapter) && be_is_mc(adapter) &&
3640 be_physfn(adapter))
3641 *privilege = MAX_PRIVILEGES;
3642 }
3643
3644 err:
3645 mutex_unlock(&adapter->mcc_lock);
3646 return status;
3647 }
3648
3649 /* Set privilege(s) for a function */
be_cmd_set_fn_privileges(struct be_adapter * adapter,u32 privileges,u32 domain)3650 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
3651 u32 domain)
3652 {
3653 struct be_mcc_wrb *wrb;
3654 struct be_cmd_req_set_fn_privileges *req;
3655 int status;
3656
3657 mutex_lock(&adapter->mcc_lock);
3658
3659 wrb = wrb_from_mccq(adapter);
3660 if (!wrb) {
3661 status = -EBUSY;
3662 goto err;
3663 }
3664
3665 req = embedded_payload(wrb);
3666 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3667 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3668 wrb, NULL);
3669 req->hdr.domain = domain;
3670 if (lancer_chip(adapter))
3671 req->privileges_lancer = cpu_to_le32(privileges);
3672 else
3673 req->privileges = cpu_to_le32(privileges);
3674
3675 status = be_mcc_notify_wait(adapter);
3676 err:
3677 mutex_unlock(&adapter->mcc_lock);
3678 return status;
3679 }
3680
3681 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3682 * pmac_id_valid: false => pmac_id or MAC address is requested.
3683 * If pmac_id is returned, pmac_id_valid is returned as true
3684 */
be_cmd_get_mac_from_list(struct be_adapter * adapter,u8 * mac,bool * pmac_id_valid,u32 * pmac_id,u32 if_handle,u8 domain)3685 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3686 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3687 u8 domain)
3688 {
3689 struct be_mcc_wrb *wrb;
3690 struct be_cmd_req_get_mac_list *req;
3691 int status;
3692 int mac_count;
3693 struct be_dma_mem get_mac_list_cmd;
3694 int i;
3695
3696 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3697 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3698 get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3699 get_mac_list_cmd.size,
3700 &get_mac_list_cmd.dma,
3701 GFP_ATOMIC);
3702
3703 if (!get_mac_list_cmd.va) {
3704 dev_err(&adapter->pdev->dev,
3705 "Memory allocation failure during GET_MAC_LIST\n");
3706 return -ENOMEM;
3707 }
3708
3709 mutex_lock(&adapter->mcc_lock);
3710
3711 wrb = wrb_from_mccq(adapter);
3712 if (!wrb) {
3713 status = -EBUSY;
3714 goto out;
3715 }
3716
3717 req = get_mac_list_cmd.va;
3718
3719 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3720 OPCODE_COMMON_GET_MAC_LIST,
3721 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3722 req->hdr.domain = domain;
3723 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3724 if (*pmac_id_valid) {
3725 req->mac_id = cpu_to_le32(*pmac_id);
3726 req->iface_id = cpu_to_le16(if_handle);
3727 req->perm_override = 0;
3728 } else {
3729 req->perm_override = 1;
3730 }
3731
3732 status = be_mcc_notify_wait(adapter);
3733 if (!status) {
3734 struct be_cmd_resp_get_mac_list *resp =
3735 get_mac_list_cmd.va;
3736
3737 if (*pmac_id_valid) {
3738 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3739 ETH_ALEN);
3740 goto out;
3741 }
3742
3743 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3744 /* Mac list returned could contain one or more active mac_ids
3745 * or one or more true or pseudo permanent mac addresses.
3746 * If an active mac_id is present, return first active mac_id
3747 * found.
3748 */
3749 for (i = 0; i < mac_count; i++) {
3750 struct get_list_macaddr *mac_entry;
3751 u16 mac_addr_size;
3752 u32 mac_id;
3753
3754 mac_entry = &resp->macaddr_list[i];
3755 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3756 /* mac_id is a 32 bit value and mac_addr size
3757 * is 6 bytes
3758 */
3759 if (mac_addr_size == sizeof(u32)) {
3760 *pmac_id_valid = true;
3761 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3762 *pmac_id = le32_to_cpu(mac_id);
3763 goto out;
3764 }
3765 }
3766 /* If no active mac_id found, return first mac addr */
3767 *pmac_id_valid = false;
3768 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3769 ETH_ALEN);
3770 }
3771
3772 out:
3773 mutex_unlock(&adapter->mcc_lock);
3774 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3775 get_mac_list_cmd.va, get_mac_list_cmd.dma);
3776 return status;
3777 }
3778
be_cmd_get_active_mac(struct be_adapter * adapter,u32 curr_pmac_id,u8 * mac,u32 if_handle,bool active,u32 domain)3779 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3780 u8 *mac, u32 if_handle, bool active, u32 domain)
3781 {
3782 if (!active)
3783 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3784 if_handle, domain);
3785 if (BEx_chip(adapter))
3786 return be_cmd_mac_addr_query(adapter, mac, false,
3787 if_handle, curr_pmac_id);
3788 else
3789 /* Fetch the MAC address using pmac_id */
3790 return be_cmd_get_mac_from_list(adapter, mac, &active,
3791 &curr_pmac_id,
3792 if_handle, domain);
3793 }
3794
be_cmd_get_perm_mac(struct be_adapter * adapter,u8 * mac)3795 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3796 {
3797 int status;
3798 bool pmac_valid = false;
3799
3800 eth_zero_addr(mac);
3801
3802 if (BEx_chip(adapter)) {
3803 if (be_physfn(adapter))
3804 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3805 0);
3806 else
3807 status = be_cmd_mac_addr_query(adapter, mac, false,
3808 adapter->if_handle, 0);
3809 } else {
3810 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3811 NULL, adapter->if_handle, 0);
3812 }
3813
3814 return status;
3815 }
3816
3817 /* Uses synchronous MCCQ */
be_cmd_set_mac_list(struct be_adapter * adapter,u8 * mac_array,u8 mac_count,u32 domain)3818 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3819 u8 mac_count, u32 domain)
3820 {
3821 struct be_mcc_wrb *wrb;
3822 struct be_cmd_req_set_mac_list *req;
3823 int status;
3824 struct be_dma_mem cmd;
3825
3826 memset(&cmd, 0, sizeof(struct be_dma_mem));
3827 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3828 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3829 GFP_KERNEL);
3830 if (!cmd.va)
3831 return -ENOMEM;
3832
3833 mutex_lock(&adapter->mcc_lock);
3834
3835 wrb = wrb_from_mccq(adapter);
3836 if (!wrb) {
3837 status = -EBUSY;
3838 goto err;
3839 }
3840
3841 req = cmd.va;
3842 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3843 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3844 wrb, &cmd);
3845
3846 req->hdr.domain = domain;
3847 req->mac_count = mac_count;
3848 if (mac_count)
3849 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3850
3851 status = be_mcc_notify_wait(adapter);
3852
3853 err:
3854 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3855 mutex_unlock(&adapter->mcc_lock);
3856 return status;
3857 }
3858
3859 /* Wrapper to delete any active MACs and provision the new mac.
3860 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3861 * current list are active.
3862 */
be_cmd_set_mac(struct be_adapter * adapter,u8 * mac,int if_id,u32 dom)3863 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3864 {
3865 bool active_mac = false;
3866 u8 old_mac[ETH_ALEN];
3867 u32 pmac_id;
3868 int status;
3869
3870 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3871 &pmac_id, if_id, dom);
3872
3873 if (!status && active_mac)
3874 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3875
3876 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3877 }
3878
be_cmd_set_hsw_config(struct be_adapter * adapter,u16 pvid,u32 domain,u16 intf_id,u16 hsw_mode,u8 spoofchk)3879 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3880 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3881 {
3882 struct be_mcc_wrb *wrb;
3883 struct be_cmd_req_set_hsw_config *req;
3884 void *ctxt;
3885 int status;
3886
3887 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
3888 CMD_SUBSYSTEM_COMMON))
3889 return -EPERM;
3890
3891 mutex_lock(&adapter->mcc_lock);
3892
3893 wrb = wrb_from_mccq(adapter);
3894 if (!wrb) {
3895 status = -EBUSY;
3896 goto err;
3897 }
3898
3899 req = embedded_payload(wrb);
3900 ctxt = &req->context;
3901
3902 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3903 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3904 NULL);
3905
3906 req->hdr.domain = domain;
3907 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3908 if (pvid) {
3909 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3910 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3911 }
3912 if (hsw_mode) {
3913 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3914 ctxt, adapter->hba_port_num);
3915 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3916 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3917 ctxt, hsw_mode);
3918 }
3919
3920 /* Enable/disable both mac and vlan spoof checking */
3921 if (!BEx_chip(adapter) && spoofchk) {
3922 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3923 ctxt, spoofchk);
3924 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3925 ctxt, spoofchk);
3926 }
3927
3928 be_dws_cpu_to_le(req->context, sizeof(req->context));
3929 status = be_mcc_notify_wait(adapter);
3930
3931 err:
3932 mutex_unlock(&adapter->mcc_lock);
3933 return status;
3934 }
3935
3936 /* Get Hyper switch config */
be_cmd_get_hsw_config(struct be_adapter * adapter,u16 * pvid,u32 domain,u16 intf_id,u8 * mode,bool * spoofchk)3937 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3938 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3939 {
3940 struct be_mcc_wrb *wrb;
3941 struct be_cmd_req_get_hsw_config *req;
3942 void *ctxt;
3943 int status;
3944 u16 vid;
3945
3946 mutex_lock(&adapter->mcc_lock);
3947
3948 wrb = wrb_from_mccq(adapter);
3949 if (!wrb) {
3950 status = -EBUSY;
3951 goto err;
3952 }
3953
3954 req = embedded_payload(wrb);
3955 ctxt = &req->context;
3956
3957 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3958 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3959 NULL);
3960
3961 req->hdr.domain = domain;
3962 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3963 ctxt, intf_id);
3964 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3965
3966 if (!BEx_chip(adapter) && mode) {
3967 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3968 ctxt, adapter->hba_port_num);
3969 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3970 }
3971 be_dws_cpu_to_le(req->context, sizeof(req->context));
3972
3973 status = be_mcc_notify_wait(adapter);
3974 if (!status) {
3975 struct be_cmd_resp_get_hsw_config *resp =
3976 embedded_payload(wrb);
3977
3978 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3979 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3980 pvid, &resp->context);
3981 if (pvid)
3982 *pvid = le16_to_cpu(vid);
3983 if (mode)
3984 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3985 port_fwd_type, &resp->context);
3986 if (spoofchk)
3987 *spoofchk =
3988 AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3989 spoofchk, &resp->context);
3990 }
3991
3992 err:
3993 mutex_unlock(&adapter->mcc_lock);
3994 return status;
3995 }
3996
be_is_wol_excluded(struct be_adapter * adapter)3997 static bool be_is_wol_excluded(struct be_adapter *adapter)
3998 {
3999 struct pci_dev *pdev = adapter->pdev;
4000
4001 if (be_virtfn(adapter))
4002 return true;
4003
4004 switch (pdev->subsystem_device) {
4005 case OC_SUBSYS_DEVICE_ID1:
4006 case OC_SUBSYS_DEVICE_ID2:
4007 case OC_SUBSYS_DEVICE_ID3:
4008 case OC_SUBSYS_DEVICE_ID4:
4009 return true;
4010 default:
4011 return false;
4012 }
4013 }
4014
be_cmd_get_acpi_wol_cap(struct be_adapter * adapter)4015 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
4016 {
4017 struct be_mcc_wrb *wrb;
4018 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
4019 int status = 0;
4020 struct be_dma_mem cmd;
4021
4022 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
4023 CMD_SUBSYSTEM_ETH))
4024 return -EPERM;
4025
4026 if (be_is_wol_excluded(adapter))
4027 return status;
4028
4029 if (mutex_lock_interruptible(&adapter->mbox_lock))
4030 return -1;
4031
4032 memset(&cmd, 0, sizeof(struct be_dma_mem));
4033 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
4034 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4035 GFP_ATOMIC);
4036 if (!cmd.va) {
4037 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
4038 status = -ENOMEM;
4039 goto err;
4040 }
4041
4042 wrb = wrb_from_mbox(adapter);
4043 if (!wrb) {
4044 status = -EBUSY;
4045 goto err;
4046 }
4047
4048 req = cmd.va;
4049
4050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
4051 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
4052 sizeof(*req), wrb, &cmd);
4053
4054 req->hdr.version = 1;
4055 req->query_options = BE_GET_WOL_CAP;
4056
4057 status = be_mbox_notify_wait(adapter);
4058 if (!status) {
4059 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
4060
4061 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
4062
4063 adapter->wol_cap = resp->wol_settings;
4064
4065 /* Non-zero macaddr indicates WOL is enabled */
4066 if (adapter->wol_cap & BE_WOL_CAP &&
4067 !is_zero_ether_addr(resp->magic_mac))
4068 adapter->wol_en = true;
4069 }
4070 err:
4071 mutex_unlock(&adapter->mbox_lock);
4072 if (cmd.va)
4073 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4074 cmd.dma);
4075 return status;
4076
4077 }
4078
be_cmd_set_fw_log_level(struct be_adapter * adapter,u32 level)4079 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4080 {
4081 struct be_dma_mem extfat_cmd;
4082 struct be_fat_conf_params *cfgs;
4083 int status;
4084 int i, j;
4085
4086 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4087 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4088 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4089 extfat_cmd.size, &extfat_cmd.dma,
4090 GFP_ATOMIC);
4091 if (!extfat_cmd.va)
4092 return -ENOMEM;
4093
4094 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4095 if (status)
4096 goto err;
4097
4098 cfgs = (struct be_fat_conf_params *)
4099 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
4100 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
4101 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
4102
4103 for (j = 0; j < num_modes; j++) {
4104 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
4105 cfgs->module[i].trace_lvl[j].dbg_lvl =
4106 cpu_to_le32(level);
4107 }
4108 }
4109
4110 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
4111 err:
4112 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4113 extfat_cmd.dma);
4114 return status;
4115 }
4116
be_cmd_get_fw_log_level(struct be_adapter * adapter)4117 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4118 {
4119 struct be_dma_mem extfat_cmd;
4120 struct be_fat_conf_params *cfgs;
4121 int status, j;
4122 int level = 0;
4123
4124 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4125 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4126 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4127 extfat_cmd.size, &extfat_cmd.dma,
4128 GFP_ATOMIC);
4129
4130 if (!extfat_cmd.va) {
4131 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4132 __func__);
4133 goto err;
4134 }
4135
4136 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4137 if (!status) {
4138 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4139 sizeof(struct be_cmd_resp_hdr));
4140
4141 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4142 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4143 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4144 }
4145 }
4146 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4147 extfat_cmd.dma);
4148 err:
4149 return level;
4150 }
4151
be_cmd_get_ext_fat_capabilites(struct be_adapter * adapter,struct be_dma_mem * cmd)4152 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
4153 struct be_dma_mem *cmd)
4154 {
4155 struct be_mcc_wrb *wrb;
4156 struct be_cmd_req_get_ext_fat_caps *req;
4157 int status;
4158
4159 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
4160 CMD_SUBSYSTEM_COMMON))
4161 return -EPERM;
4162
4163 if (mutex_lock_interruptible(&adapter->mbox_lock))
4164 return -1;
4165
4166 wrb = wrb_from_mbox(adapter);
4167 if (!wrb) {
4168 status = -EBUSY;
4169 goto err;
4170 }
4171
4172 req = cmd->va;
4173 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4174 OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
4175 cmd->size, wrb, cmd);
4176 req->parameter_type = cpu_to_le32(1);
4177
4178 status = be_mbox_notify_wait(adapter);
4179 err:
4180 mutex_unlock(&adapter->mbox_lock);
4181 return status;
4182 }
4183
be_cmd_set_ext_fat_capabilites(struct be_adapter * adapter,struct be_dma_mem * cmd,struct be_fat_conf_params * configs)4184 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
4185 struct be_dma_mem *cmd,
4186 struct be_fat_conf_params *configs)
4187 {
4188 struct be_mcc_wrb *wrb;
4189 struct be_cmd_req_set_ext_fat_caps *req;
4190 int status;
4191
4192 mutex_lock(&adapter->mcc_lock);
4193
4194 wrb = wrb_from_mccq(adapter);
4195 if (!wrb) {
4196 status = -EBUSY;
4197 goto err;
4198 }
4199
4200 req = cmd->va;
4201 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
4202 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4203 OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES,
4204 cmd->size, wrb, cmd);
4205
4206 status = be_mcc_notify_wait(adapter);
4207 err:
4208 mutex_unlock(&adapter->mcc_lock);
4209 return status;
4210 }
4211
be_cmd_query_port_name(struct be_adapter * adapter)4212 int be_cmd_query_port_name(struct be_adapter *adapter)
4213 {
4214 struct be_cmd_req_get_port_name *req;
4215 struct be_mcc_wrb *wrb;
4216 int status;
4217
4218 if (mutex_lock_interruptible(&adapter->mbox_lock))
4219 return -1;
4220
4221 wrb = wrb_from_mbox(adapter);
4222 req = embedded_payload(wrb);
4223
4224 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4225 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
4226 NULL);
4227 if (!BEx_chip(adapter))
4228 req->hdr.version = 1;
4229
4230 status = be_mbox_notify_wait(adapter);
4231 if (!status) {
4232 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
4233
4234 adapter->port_name = resp->port_name[adapter->hba_port_num];
4235 } else {
4236 adapter->port_name = adapter->hba_port_num + '0';
4237 }
4238
4239 mutex_unlock(&adapter->mbox_lock);
4240 return status;
4241 }
4242
4243 /* When more than 1 NIC descriptor is present in the descriptor list,
4244 * the caller must specify the pf_num to obtain the NIC descriptor
4245 * corresponding to its pci function.
4246 * get_vft must be true when the caller wants the VF-template desc of the
4247 * PF-pool.
4248 * The pf_num should be set to PF_NUM_IGNORE when the caller knows
4249 * that only it's NIC descriptor is present in the descriptor list.
4250 */
be_get_nic_desc(u8 * buf,u32 desc_count,bool get_vft,u8 pf_num)4251 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
4252 bool get_vft, u8 pf_num)
4253 {
4254 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4255 struct be_nic_res_desc *nic;
4256 int i;
4257
4258 for (i = 0; i < desc_count; i++) {
4259 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
4260 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
4261 nic = (struct be_nic_res_desc *)hdr;
4262
4263 if ((pf_num == PF_NUM_IGNORE ||
4264 nic->pf_num == pf_num) &&
4265 (!get_vft || nic->flags & BIT(VFT_SHIFT)))
4266 return nic;
4267 }
4268 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4269 hdr = (void *)hdr + hdr->desc_len;
4270 }
4271 return NULL;
4272 }
4273
be_get_vft_desc(u8 * buf,u32 desc_count,u8 pf_num)4274 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
4275 u8 pf_num)
4276 {
4277 return be_get_nic_desc(buf, desc_count, true, pf_num);
4278 }
4279
be_get_func_nic_desc(u8 * buf,u32 desc_count,u8 pf_num)4280 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
4281 u8 pf_num)
4282 {
4283 return be_get_nic_desc(buf, desc_count, false, pf_num);
4284 }
4285
be_get_pcie_desc(u8 * buf,u32 desc_count,u8 pf_num)4286 static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
4287 u8 pf_num)
4288 {
4289 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4290 struct be_pcie_res_desc *pcie;
4291 int i;
4292
4293 for (i = 0; i < desc_count; i++) {
4294 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4295 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4296 pcie = (struct be_pcie_res_desc *)hdr;
4297 if (pcie->pf_num == pf_num)
4298 return pcie;
4299 }
4300
4301 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4302 hdr = (void *)hdr + hdr->desc_len;
4303 }
4304 return NULL;
4305 }
4306
be_get_port_desc(u8 * buf,u32 desc_count)4307 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
4308 {
4309 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4310 int i;
4311
4312 for (i = 0; i < desc_count; i++) {
4313 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
4314 return (struct be_port_res_desc *)hdr;
4315
4316 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4317 hdr = (void *)hdr + hdr->desc_len;
4318 }
4319 return NULL;
4320 }
4321
be_copy_nic_desc(struct be_resources * res,struct be_nic_res_desc * desc)4322 static void be_copy_nic_desc(struct be_resources *res,
4323 struct be_nic_res_desc *desc)
4324 {
4325 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
4326 res->max_vlans = le16_to_cpu(desc->vlan_count);
4327 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
4328 res->max_tx_qs = le16_to_cpu(desc->txq_count);
4329 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
4330 res->max_rx_qs = le16_to_cpu(desc->rq_count);
4331 res->max_evt_qs = le16_to_cpu(desc->eq_count);
4332 res->max_cq_count = le16_to_cpu(desc->cq_count);
4333 res->max_iface_count = le16_to_cpu(desc->iface_count);
4334 res->max_mcc_count = le16_to_cpu(desc->mcc_count);
4335 /* Clear flags that driver is not interested in */
4336 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
4337 BE_IF_CAP_FLAGS_WANT;
4338 }
4339
4340 /* Uses Mbox */
be_cmd_get_func_config(struct be_adapter * adapter,struct be_resources * res)4341 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
4342 {
4343 struct be_mcc_wrb *wrb;
4344 struct be_cmd_req_get_func_config *req;
4345 int status;
4346 struct be_dma_mem cmd;
4347
4348 if (mutex_lock_interruptible(&adapter->mbox_lock))
4349 return -1;
4350
4351 memset(&cmd, 0, sizeof(struct be_dma_mem));
4352 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
4353 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4354 GFP_ATOMIC);
4355 if (!cmd.va) {
4356 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
4357 status = -ENOMEM;
4358 goto err;
4359 }
4360
4361 wrb = wrb_from_mbox(adapter);
4362 if (!wrb) {
4363 status = -EBUSY;
4364 goto err;
4365 }
4366
4367 req = cmd.va;
4368
4369 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4370 OPCODE_COMMON_GET_FUNC_CONFIG,
4371 cmd.size, wrb, &cmd);
4372
4373 if (skyhawk_chip(adapter))
4374 req->hdr.version = 1;
4375
4376 status = be_mbox_notify_wait(adapter);
4377 if (!status) {
4378 struct be_cmd_resp_get_func_config *resp = cmd.va;
4379 u32 desc_count = le32_to_cpu(resp->desc_count);
4380 struct be_nic_res_desc *desc;
4381
4382 /* GET_FUNC_CONFIG returns resource descriptors of the
4383 * current function only. So, pf_num should be set to
4384 * PF_NUM_IGNORE.
4385 */
4386 desc = be_get_func_nic_desc(resp->func_param, desc_count,
4387 PF_NUM_IGNORE);
4388 if (!desc) {
4389 status = -EINVAL;
4390 goto err;
4391 }
4392
4393 /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
4394 adapter->pf_num = desc->pf_num;
4395 adapter->vf_num = desc->vf_num;
4396
4397 if (res)
4398 be_copy_nic_desc(res, desc);
4399 }
4400 err:
4401 mutex_unlock(&adapter->mbox_lock);
4402 if (cmd.va)
4403 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4404 cmd.dma);
4405 return status;
4406 }
4407
4408 /* This routine returns a list of all the NIC PF_nums in the adapter */
be_get_nic_pf_num_list(u8 * buf,u32 desc_count,u16 * nic_pf_nums)4409 static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
4410 {
4411 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4412 struct be_pcie_res_desc *pcie = NULL;
4413 int i;
4414 u16 nic_pf_count = 0;
4415
4416 for (i = 0; i < desc_count; i++) {
4417 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4418 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4419 pcie = (struct be_pcie_res_desc *)hdr;
4420 if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
4421 pcie->pf_type == MISSION_RDMA)) {
4422 nic_pf_nums[nic_pf_count++] = pcie->pf_num;
4423 }
4424 }
4425
4426 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4427 hdr = (void *)hdr + hdr->desc_len;
4428 }
4429 return nic_pf_count;
4430 }
4431
4432 /* Will use MBOX only if MCCQ has not been created */
be_cmd_get_profile_config(struct be_adapter * adapter,struct be_resources * res,struct be_port_resources * port_res,u8 profile_type,u8 query,u8 domain)4433 int be_cmd_get_profile_config(struct be_adapter *adapter,
4434 struct be_resources *res,
4435 struct be_port_resources *port_res,
4436 u8 profile_type, u8 query, u8 domain)
4437 {
4438 struct be_cmd_resp_get_profile_config *resp;
4439 struct be_cmd_req_get_profile_config *req;
4440 struct be_nic_res_desc *vf_res;
4441 struct be_pcie_res_desc *pcie;
4442 struct be_port_res_desc *port;
4443 struct be_nic_res_desc *nic;
4444 struct be_mcc_wrb wrb = {0};
4445 struct be_dma_mem cmd;
4446 u16 desc_count;
4447 int status;
4448
4449 memset(&cmd, 0, sizeof(struct be_dma_mem));
4450 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
4451 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4452 GFP_ATOMIC);
4453 if (!cmd.va)
4454 return -ENOMEM;
4455
4456 req = cmd.va;
4457 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4458 OPCODE_COMMON_GET_PROFILE_CONFIG,
4459 cmd.size, &wrb, &cmd);
4460
4461 if (!lancer_chip(adapter))
4462 req->hdr.version = 1;
4463 req->type = profile_type;
4464 req->hdr.domain = domain;
4465
4466 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
4467 * descriptors with all bits set to "1" for the fields which can be
4468 * modified using SET_PROFILE_CONFIG cmd.
4469 */
4470 if (query == RESOURCE_MODIFIABLE)
4471 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
4472
4473 status = be_cmd_notify_wait(adapter, &wrb);
4474 if (status)
4475 goto err;
4476
4477 resp = cmd.va;
4478 desc_count = le16_to_cpu(resp->desc_count);
4479
4480 if (port_res) {
4481 u16 nic_pf_cnt = 0, i;
4482 u16 nic_pf_num_list[MAX_NIC_FUNCS];
4483
4484 nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
4485 desc_count,
4486 nic_pf_num_list);
4487
4488 for (i = 0; i < nic_pf_cnt; i++) {
4489 nic = be_get_func_nic_desc(resp->func_param, desc_count,
4490 nic_pf_num_list[i]);
4491 if (nic->link_param == adapter->port_num) {
4492 port_res->nic_pfs++;
4493 pcie = be_get_pcie_desc(resp->func_param,
4494 desc_count,
4495 nic_pf_num_list[i]);
4496 port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
4497 }
4498 }
4499 goto err;
4500 }
4501
4502 pcie = be_get_pcie_desc(resp->func_param, desc_count,
4503 adapter->pf_num);
4504 if (pcie)
4505 res->max_vfs = le16_to_cpu(pcie->num_vfs);
4506
4507 port = be_get_port_desc(resp->func_param, desc_count);
4508 if (port)
4509 adapter->mc_type = port->mc_type;
4510
4511 nic = be_get_func_nic_desc(resp->func_param, desc_count,
4512 adapter->pf_num);
4513 if (nic)
4514 be_copy_nic_desc(res, nic);
4515
4516 vf_res = be_get_vft_desc(resp->func_param, desc_count,
4517 adapter->pf_num);
4518 if (vf_res)
4519 res->vf_if_cap_flags = vf_res->cap_flags;
4520 err:
4521 if (cmd.va)
4522 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4523 cmd.dma);
4524 return status;
4525 }
4526
4527 /* Will use MBOX only if MCCQ has not been created */
be_cmd_set_profile_config(struct be_adapter * adapter,void * desc,int size,int count,u8 version,u8 domain)4528 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4529 int size, int count, u8 version, u8 domain)
4530 {
4531 struct be_cmd_req_set_profile_config *req;
4532 struct be_mcc_wrb wrb = {0};
4533 struct be_dma_mem cmd;
4534 int status;
4535
4536 memset(&cmd, 0, sizeof(struct be_dma_mem));
4537 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
4538 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4539 GFP_ATOMIC);
4540 if (!cmd.va)
4541 return -ENOMEM;
4542
4543 req = cmd.va;
4544 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4545 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
4546 &wrb, &cmd);
4547 req->hdr.version = version;
4548 req->hdr.domain = domain;
4549 req->desc_count = cpu_to_le32(count);
4550 memcpy(req->desc, desc, size);
4551
4552 status = be_cmd_notify_wait(adapter, &wrb);
4553
4554 if (cmd.va)
4555 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4556 cmd.dma);
4557 return status;
4558 }
4559
4560 /* Mark all fields invalid */
be_reset_nic_desc(struct be_nic_res_desc * nic)4561 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
4562 {
4563 memset(nic, 0, sizeof(*nic));
4564 nic->unicast_mac_count = 0xFFFF;
4565 nic->mcc_count = 0xFFFF;
4566 nic->vlan_count = 0xFFFF;
4567 nic->mcast_mac_count = 0xFFFF;
4568 nic->txq_count = 0xFFFF;
4569 nic->rq_count = 0xFFFF;
4570 nic->rssq_count = 0xFFFF;
4571 nic->lro_count = 0xFFFF;
4572 nic->cq_count = 0xFFFF;
4573 nic->toe_conn_count = 0xFFFF;
4574 nic->eq_count = 0xFFFF;
4575 nic->iface_count = 0xFFFF;
4576 nic->link_param = 0xFF;
4577 nic->channel_id_param = cpu_to_le16(0xF000);
4578 nic->acpi_params = 0xFF;
4579 nic->wol_param = 0x0F;
4580 nic->tunnel_iface_count = 0xFFFF;
4581 nic->direct_tenant_iface_count = 0xFFFF;
4582 nic->bw_min = 0xFFFFFFFF;
4583 nic->bw_max = 0xFFFFFFFF;
4584 }
4585
4586 /* Mark all fields invalid */
be_reset_pcie_desc(struct be_pcie_res_desc * pcie)4587 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
4588 {
4589 memset(pcie, 0, sizeof(*pcie));
4590 pcie->sriov_state = 0xFF;
4591 pcie->pf_state = 0xFF;
4592 pcie->pf_type = 0xFF;
4593 pcie->num_vfs = 0xFFFF;
4594 }
4595
be_cmd_config_qos(struct be_adapter * adapter,u32 max_rate,u16 link_speed,u8 domain)4596 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
4597 u8 domain)
4598 {
4599 struct be_nic_res_desc nic_desc;
4600 u32 bw_percent;
4601 u16 version = 0;
4602
4603 if (BE3_chip(adapter))
4604 return be_cmd_set_qos(adapter, max_rate / 10, domain);
4605
4606 be_reset_nic_desc(&nic_desc);
4607 nic_desc.pf_num = adapter->pf_num;
4608 nic_desc.vf_num = domain;
4609 nic_desc.bw_min = 0;
4610 if (lancer_chip(adapter)) {
4611 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
4612 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
4613 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
4614 (1 << NOSV_SHIFT);
4615 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
4616 } else {
4617 version = 1;
4618 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4619 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4620 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4621 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
4622 nic_desc.bw_max = cpu_to_le32(bw_percent);
4623 }
4624
4625 return be_cmd_set_profile_config(adapter, &nic_desc,
4626 nic_desc.hdr.desc_len,
4627 1, version, domain);
4628 }
4629
be_cmd_set_sriov_config(struct be_adapter * adapter,struct be_resources pool_res,u16 num_vfs,struct be_resources * vft_res)4630 int be_cmd_set_sriov_config(struct be_adapter *adapter,
4631 struct be_resources pool_res, u16 num_vfs,
4632 struct be_resources *vft_res)
4633 {
4634 struct {
4635 struct be_pcie_res_desc pcie;
4636 struct be_nic_res_desc nic_vft;
4637 } __packed desc;
4638
4639 /* PF PCIE descriptor */
4640 be_reset_pcie_desc(&desc.pcie);
4641 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
4642 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4643 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4644 desc.pcie.pf_num = adapter->pdev->devfn;
4645 desc.pcie.sriov_state = num_vfs ? 1 : 0;
4646 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
4647
4648 /* VF NIC Template descriptor */
4649 be_reset_nic_desc(&desc.nic_vft);
4650 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4651 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4652 desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
4653 BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4654 desc.nic_vft.pf_num = adapter->pdev->devfn;
4655 desc.nic_vft.vf_num = 0;
4656 desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
4657 desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
4658 desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
4659 desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
4660 desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
4661
4662 if (vft_res->max_uc_mac)
4663 desc.nic_vft.unicast_mac_count =
4664 cpu_to_le16(vft_res->max_uc_mac);
4665 if (vft_res->max_vlans)
4666 desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
4667 if (vft_res->max_iface_count)
4668 desc.nic_vft.iface_count =
4669 cpu_to_le16(vft_res->max_iface_count);
4670 if (vft_res->max_mcc_count)
4671 desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
4672
4673 return be_cmd_set_profile_config(adapter, &desc,
4674 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
4675 }
4676
be_cmd_manage_iface(struct be_adapter * adapter,u32 iface,u8 op)4677 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
4678 {
4679 struct be_mcc_wrb *wrb;
4680 struct be_cmd_req_manage_iface_filters *req;
4681 int status;
4682
4683 if (iface == 0xFFFFFFFF)
4684 return -1;
4685
4686 mutex_lock(&adapter->mcc_lock);
4687
4688 wrb = wrb_from_mccq(adapter);
4689 if (!wrb) {
4690 status = -EBUSY;
4691 goto err;
4692 }
4693 req = embedded_payload(wrb);
4694
4695 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4696 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
4697 wrb, NULL);
4698 req->op = op;
4699 req->target_iface_id = cpu_to_le32(iface);
4700
4701 status = be_mcc_notify_wait(adapter);
4702 err:
4703 mutex_unlock(&adapter->mcc_lock);
4704 return status;
4705 }
4706
be_cmd_set_vxlan_port(struct be_adapter * adapter,__be16 port)4707 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
4708 {
4709 struct be_port_res_desc port_desc;
4710
4711 memset(&port_desc, 0, sizeof(port_desc));
4712 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
4713 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4714 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4715 port_desc.link_num = adapter->hba_port_num;
4716 if (port) {
4717 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
4718 (1 << RCVID_SHIFT);
4719 port_desc.nv_port = swab16(port);
4720 } else {
4721 port_desc.nv_flags = NV_TYPE_DISABLED;
4722 port_desc.nv_port = 0;
4723 }
4724
4725 return be_cmd_set_profile_config(adapter, &port_desc,
4726 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
4727 }
4728
be_cmd_get_if_id(struct be_adapter * adapter,struct be_vf_cfg * vf_cfg,int vf_num)4729 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4730 int vf_num)
4731 {
4732 struct be_mcc_wrb *wrb;
4733 struct be_cmd_req_get_iface_list *req;
4734 struct be_cmd_resp_get_iface_list *resp;
4735 int status;
4736
4737 mutex_lock(&adapter->mcc_lock);
4738
4739 wrb = wrb_from_mccq(adapter);
4740 if (!wrb) {
4741 status = -EBUSY;
4742 goto err;
4743 }
4744 req = embedded_payload(wrb);
4745
4746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4747 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4748 wrb, NULL);
4749 req->hdr.domain = vf_num + 1;
4750
4751 status = be_mcc_notify_wait(adapter);
4752 if (!status) {
4753 resp = (struct be_cmd_resp_get_iface_list *)req;
4754 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4755 }
4756
4757 err:
4758 mutex_unlock(&adapter->mcc_lock);
4759 return status;
4760 }
4761
lancer_wait_idle(struct be_adapter * adapter)4762 static int lancer_wait_idle(struct be_adapter *adapter)
4763 {
4764 #define SLIPORT_IDLE_TIMEOUT 30
4765 u32 reg_val;
4766 int status = 0, i;
4767
4768 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4769 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4770 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4771 break;
4772
4773 ssleep(1);
4774 }
4775
4776 if (i == SLIPORT_IDLE_TIMEOUT)
4777 status = -1;
4778
4779 return status;
4780 }
4781
lancer_physdev_ctrl(struct be_adapter * adapter,u32 mask)4782 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4783 {
4784 int status = 0;
4785
4786 status = lancer_wait_idle(adapter);
4787 if (status)
4788 return status;
4789
4790 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4791
4792 return status;
4793 }
4794
4795 /* Routine to check whether dump image is present or not */
dump_present(struct be_adapter * adapter)4796 bool dump_present(struct be_adapter *adapter)
4797 {
4798 u32 sliport_status = 0;
4799
4800 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4801 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4802 }
4803
lancer_initiate_dump(struct be_adapter * adapter)4804 int lancer_initiate_dump(struct be_adapter *adapter)
4805 {
4806 struct device *dev = &adapter->pdev->dev;
4807 int status;
4808
4809 if (dump_present(adapter)) {
4810 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4811 return -EEXIST;
4812 }
4813
4814 /* give firmware reset and diagnostic dump */
4815 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4816 PHYSDEV_CONTROL_DD_MASK);
4817 if (status < 0) {
4818 dev_err(dev, "FW reset failed\n");
4819 return status;
4820 }
4821
4822 status = lancer_wait_idle(adapter);
4823 if (status)
4824 return status;
4825
4826 if (!dump_present(adapter)) {
4827 dev_err(dev, "FW dump not generated\n");
4828 return -EIO;
4829 }
4830
4831 return 0;
4832 }
4833
lancer_delete_dump(struct be_adapter * adapter)4834 int lancer_delete_dump(struct be_adapter *adapter)
4835 {
4836 int status;
4837
4838 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4839 return be_cmd_status(status);
4840 }
4841
4842 /* Uses sync mcc */
be_cmd_enable_vf(struct be_adapter * adapter,u8 domain)4843 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4844 {
4845 struct be_mcc_wrb *wrb;
4846 struct be_cmd_enable_disable_vf *req;
4847 int status;
4848
4849 if (BEx_chip(adapter))
4850 return 0;
4851
4852 mutex_lock(&adapter->mcc_lock);
4853
4854 wrb = wrb_from_mccq(adapter);
4855 if (!wrb) {
4856 status = -EBUSY;
4857 goto err;
4858 }
4859
4860 req = embedded_payload(wrb);
4861
4862 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4863 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4864 wrb, NULL);
4865
4866 req->hdr.domain = domain;
4867 req->enable = 1;
4868 status = be_mcc_notify_wait(adapter);
4869 err:
4870 mutex_unlock(&adapter->mcc_lock);
4871 return status;
4872 }
4873
be_cmd_intr_set(struct be_adapter * adapter,bool intr_enable)4874 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4875 {
4876 struct be_mcc_wrb *wrb;
4877 struct be_cmd_req_intr_set *req;
4878 int status;
4879
4880 if (mutex_lock_interruptible(&adapter->mbox_lock))
4881 return -1;
4882
4883 wrb = wrb_from_mbox(adapter);
4884
4885 req = embedded_payload(wrb);
4886
4887 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4888 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4889 wrb, NULL);
4890
4891 req->intr_enabled = intr_enable;
4892
4893 status = be_mbox_notify_wait(adapter);
4894
4895 mutex_unlock(&adapter->mbox_lock);
4896 return status;
4897 }
4898
4899 /* Uses MBOX */
be_cmd_get_active_profile(struct be_adapter * adapter,u16 * profile_id)4900 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4901 {
4902 struct be_cmd_req_get_active_profile *req;
4903 struct be_mcc_wrb *wrb;
4904 int status;
4905
4906 if (mutex_lock_interruptible(&adapter->mbox_lock))
4907 return -1;
4908
4909 wrb = wrb_from_mbox(adapter);
4910 if (!wrb) {
4911 status = -EBUSY;
4912 goto err;
4913 }
4914
4915 req = embedded_payload(wrb);
4916
4917 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4918 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4919 wrb, NULL);
4920
4921 status = be_mbox_notify_wait(adapter);
4922 if (!status) {
4923 struct be_cmd_resp_get_active_profile *resp =
4924 embedded_payload(wrb);
4925
4926 *profile_id = le16_to_cpu(resp->active_profile_id);
4927 }
4928
4929 err:
4930 mutex_unlock(&adapter->mbox_lock);
4931 return status;
4932 }
4933
4934 static int
__be_cmd_set_logical_link_config(struct be_adapter * adapter,int link_state,int version,u8 domain)4935 __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4936 int link_state, int version, u8 domain)
4937 {
4938 struct be_cmd_req_set_ll_link *req;
4939 struct be_mcc_wrb *wrb;
4940 u32 link_config = 0;
4941 int status;
4942
4943 mutex_lock(&adapter->mcc_lock);
4944
4945 wrb = wrb_from_mccq(adapter);
4946 if (!wrb) {
4947 status = -EBUSY;
4948 goto err;
4949 }
4950
4951 req = embedded_payload(wrb);
4952
4953 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4954 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4955 sizeof(*req), wrb, NULL);
4956
4957 req->hdr.version = version;
4958 req->hdr.domain = domain;
4959
4960 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4961 link_state == IFLA_VF_LINK_STATE_AUTO)
4962 link_config |= PLINK_ENABLE;
4963
4964 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4965 link_config |= PLINK_TRACK;
4966
4967 req->link_config = cpu_to_le32(link_config);
4968
4969 status = be_mcc_notify_wait(adapter);
4970 err:
4971 mutex_unlock(&adapter->mcc_lock);
4972 return status;
4973 }
4974
be_cmd_set_logical_link_config(struct be_adapter * adapter,int link_state,u8 domain)4975 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4976 int link_state, u8 domain)
4977 {
4978 int status;
4979
4980 if (BE2_chip(adapter))
4981 return -EOPNOTSUPP;
4982
4983 status = __be_cmd_set_logical_link_config(adapter, link_state,
4984 2, domain);
4985
4986 /* Version 2 of the command will not be recognized by older FW.
4987 * On such a failure issue version 1 of the command.
4988 */
4989 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
4990 status = __be_cmd_set_logical_link_config(adapter, link_state,
4991 1, domain);
4992 return status;
4993 }
4994
be_cmd_set_features(struct be_adapter * adapter)4995 int be_cmd_set_features(struct be_adapter *adapter)
4996 {
4997 struct be_cmd_resp_set_features *resp;
4998 struct be_cmd_req_set_features *req;
4999 struct be_mcc_wrb *wrb;
5000 int status;
5001
5002 if (mutex_lock_interruptible(&adapter->mcc_lock))
5003 return -1;
5004
5005 wrb = wrb_from_mccq(adapter);
5006 if (!wrb) {
5007 status = -EBUSY;
5008 goto err;
5009 }
5010
5011 req = embedded_payload(wrb);
5012
5013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
5014 OPCODE_COMMON_SET_FEATURES,
5015 sizeof(*req), wrb, NULL);
5016
5017 req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
5018 req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
5019 req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
5020
5021 status = be_mcc_notify_wait(adapter);
5022 if (status)
5023 goto err;
5024
5025 resp = embedded_payload(wrb);
5026
5027 adapter->error_recovery.ue_to_poll_time =
5028 le16_to_cpu(resp->parameter.resp.ue2rp);
5029 adapter->error_recovery.ue_to_reset_time =
5030 le16_to_cpu(resp->parameter.resp.ue2sr);
5031 adapter->error_recovery.recovery_supported = true;
5032 err:
5033 /* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW
5034 * returns this error in older firmware versions
5035 */
5036 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
5037 base_status(status) == MCC_STATUS_INVALID_LENGTH)
5038 dev_info(&adapter->pdev->dev,
5039 "Adapter does not support HW error recovery\n");
5040
5041 mutex_unlock(&adapter->mcc_lock);
5042 return status;
5043 }
5044
be_roce_mcc_cmd(void * netdev_handle,void * wrb_payload,int wrb_payload_size,u16 * cmd_status,u16 * ext_status)5045 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
5046 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
5047 {
5048 struct be_adapter *adapter = netdev_priv(netdev_handle);
5049 struct be_mcc_wrb *wrb;
5050 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
5051 struct be_cmd_req_hdr *req;
5052 struct be_cmd_resp_hdr *resp;
5053 int status;
5054
5055 mutex_lock(&adapter->mcc_lock);
5056
5057 wrb = wrb_from_mccq(adapter);
5058 if (!wrb) {
5059 status = -EBUSY;
5060 goto err;
5061 }
5062 req = embedded_payload(wrb);
5063 resp = embedded_payload(wrb);
5064
5065 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
5066 hdr->opcode, wrb_payload_size, wrb, NULL);
5067 memcpy(req, wrb_payload, wrb_payload_size);
5068 be_dws_cpu_to_le(req, wrb_payload_size);
5069
5070 status = be_mcc_notify_wait(adapter);
5071 if (cmd_status)
5072 *cmd_status = (status & 0xffff);
5073 if (ext_status)
5074 *ext_status = 0;
5075 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
5076 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
5077 err:
5078 mutex_unlock(&adapter->mcc_lock);
5079 return status;
5080 }
5081 EXPORT_SYMBOL(be_roce_mcc_cmd);
5082