1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
39 #ifdef CONFIG_X86
40 #include <asm/set_memory.h>
41 #endif
42
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
58
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65 } lpfc_iocb_type;
66
67
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *
74 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
75 struct lpfc_iocbq *rspiocbq);
76 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 struct hbq_dmabuf *);
78 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
79 struct hbq_dmabuf *dmabuf);
80 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
81 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
82 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 int);
84 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
85 struct lpfc_queue *eq,
86 struct lpfc_eqe *eqe);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 struct lpfc_queue *cq,
92 struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 struct lpfc_iocbq *pwqeq,
95 struct lpfc_sglq *sglq);
96
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101 /* Setup WQE templates for IOs */
lpfc_wqe_cmd_template(void)102 void lpfc_wqe_cmd_template(void)
103 {
104 union lpfc_wqe128 *wqe;
105
106 /* IREAD template */
107 wqe = &lpfc_iread_cmd_template;
108 memset(wqe, 0, sizeof(union lpfc_wqe128));
109
110 /* Word 0, 1, 2 - BDE is variable */
111
112 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
113
114 /* Word 4 - total_xfer_len is variable */
115
116 /* Word 5 - is zero */
117
118 /* Word 6 - ctxt_tag, xri_tag is variable */
119
120 /* Word 7 */
121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125
126 /* Word 8 - abort_tag is variable */
127
128 /* Word 9 - reqtag is variable */
129
130 /* Word 10 - dbde, wqes is variable */
131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136
137 /* Word 11 - pbde is variable */
138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141
142 /* Word 12 - is zero */
143
144 /* Word 13, 14, 15 - PBDE is variable */
145
146 /* IWRITE template */
147 wqe = &lpfc_iwrite_cmd_template;
148 memset(wqe, 0, sizeof(union lpfc_wqe128));
149
150 /* Word 0, 1, 2 - BDE is variable */
151
152 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
153
154 /* Word 4 - total_xfer_len is variable */
155
156 /* Word 5 - initial_xfer_len is variable */
157
158 /* Word 6 - ctxt_tag, xri_tag is variable */
159
160 /* Word 7 */
161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165
166 /* Word 8 - abort_tag is variable */
167
168 /* Word 9 - reqtag is variable */
169
170 /* Word 10 - dbde, wqes is variable */
171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176
177 /* Word 11 - pbde is variable */
178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181
182 /* Word 12 - is zero */
183
184 /* Word 13, 14, 15 - PBDE is variable */
185
186 /* ICMND template */
187 wqe = &lpfc_icmnd_cmd_template;
188 memset(wqe, 0, sizeof(union lpfc_wqe128));
189
190 /* Word 0, 1, 2 - BDE is variable */
191
192 /* Word 3 - payload_offset_len is variable */
193
194 /* Word 4, 5 - is zero */
195
196 /* Word 6 - ctxt_tag, xri_tag is variable */
197
198 /* Word 7 */
199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203
204 /* Word 8 - abort_tag is variable */
205
206 /* Word 9 - reqtag is variable */
207
208 /* Word 10 - dbde, wqes is variable */
209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214
215 /* Word 11 */
216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219
220 /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224 /**
225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226 * @srcp: Source memory pointer.
227 * @destp: Destination memory pointer.
228 * @cnt: Number of words required to be copied.
229 * Must be a multiple of sizeof(uint64_t)
230 *
231 * This function is used for copying data between driver memory
232 * and the SLI WQ. This function also changes the endianness
233 * of each word if native endianness is different from SLI
234 * endianness. This function can be called with or without
235 * lock.
236 **/
237 static void
lpfc_sli4_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239 {
240 uint64_t *src = srcp;
241 uint64_t *dest = destp;
242 int i;
243
244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245 *dest++ = *src++;
246 }
247 #else
248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249 #endif
250
251 /**
252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253 * @q: The Work Queue to operate on.
254 * @wqe: The work Queue Entry to put on the Work queue.
255 *
256 * This routine will copy the contents of @wqe to the next available entry on
257 * the @q. This function will then ring the Work Queue Doorbell to signal the
258 * HBA to start processing the Work Queue Entry. This function returns 0 if
259 * successful. If no entries are available on @q then this function will return
260 * -ENOMEM.
261 * The caller is expected to hold the hbalock when calling this routine.
262 **/
263 static int
lpfc_sli4_wq_put(struct lpfc_queue * q,union lpfc_wqe128 * wqe)264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265 {
266 union lpfc_wqe *temp_wqe;
267 struct lpfc_register doorbell;
268 uint32_t host_index;
269 uint32_t idx;
270 uint32_t i = 0;
271 uint8_t *tmp;
272 u32 if_type;
273
274 /* sanity check on queue memory */
275 if (unlikely(!q))
276 return -ENOMEM;
277
278 temp_wqe = lpfc_sli4_qe(q, q->host_index);
279
280 /* If the host has not yet processed the next entry then we are done */
281 idx = ((q->host_index + 1) % q->entry_count);
282 if (idx == q->hba_index) {
283 q->WQ_overflow++;
284 return -EBUSY;
285 }
286 q->WQ_posted++;
287 /* set consumption flag every once in a while */
288 if (!((q->host_index + 1) % q->notify_interval))
289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290 else
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296 /* write to DPP aperture taking advatage of Combined Writes */
297 tmp = (uint8_t *)temp_wqe;
298 #ifdef __raw_writeq
299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300 __raw_writeq(*((uint64_t *)(tmp + i)),
301 q->dpp_regaddr + i);
302 #else
303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304 __raw_writel(*((uint32_t *)(tmp + i)),
305 q->dpp_regaddr + i);
306 #endif
307 }
308 /* ensure WQE bcopy and DPP flushed before doorbell write */
309 wmb();
310
311 /* Update the host index before invoking device */
312 host_index = q->host_index;
313
314 q->host_index = idx;
315
316 /* Ring Doorbell */
317 doorbell.word0 = 0;
318 if (q->db_format == LPFC_DB_LIST_FORMAT) {
319 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323 q->dpp_id);
324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325 q->queue_id);
326 } else {
327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329
330 /* Leave bits <23:16> clear for if_type 6 dpp */
331 if_type = bf_get(lpfc_sli_intf_if_type,
332 &q->phba->sli4_hba.sli_intf);
333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335 host_index);
336 }
337 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340 } else {
341 return -EINVAL;
342 }
343 writel(doorbell.word0, q->db_regaddr);
344
345 return 0;
346 }
347
348 /**
349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
350 * @q: The Work Queue to operate on.
351 * @index: The index to advance the hba index to.
352 *
353 * This routine will update the HBA index of a queue to reflect consumption of
354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355 * an entry the host calls this function to update the queue's internal
356 * pointers.
357 **/
358 static void
lpfc_sli4_wq_release(struct lpfc_queue * q,uint32_t index)359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360 {
361 /* sanity check on queue memory */
362 if (unlikely(!q))
363 return;
364
365 q->hba_index = index;
366 }
367
368 /**
369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370 * @q: The Mailbox Queue to operate on.
371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
372 *
373 * This routine will copy the contents of @mqe to the next available entry on
374 * the @q. This function will then ring the Work Queue Doorbell to signal the
375 * HBA to start processing the Work Queue Entry. This function returns 0 if
376 * successful. If no entries are available on @q then this function will return
377 * -ENOMEM.
378 * The caller is expected to hold the hbalock when calling this routine.
379 **/
380 static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue * q,struct lpfc_mqe * mqe)381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382 {
383 struct lpfc_mqe *temp_mqe;
384 struct lpfc_register doorbell;
385
386 /* sanity check on queue memory */
387 if (unlikely(!q))
388 return -ENOMEM;
389 temp_mqe = lpfc_sli4_qe(q, q->host_index);
390
391 /* If the host has not yet processed the next entry then we are done */
392 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393 return -ENOMEM;
394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395 /* Save off the mailbox pointer for completion */
396 q->phba->mbox = (MAILBOX_t *)temp_mqe;
397
398 /* Update the host index before invoking device */
399 q->host_index = ((q->host_index + 1) % q->entry_count);
400
401 /* Ring Doorbell */
402 doorbell.word0 = 0;
403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406 return 0;
407 }
408
409 /**
410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
411 * @q: The Mailbox Queue to operate on.
412 *
413 * This routine will update the HBA index of a queue to reflect consumption of
414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415 * an entry the host calls this function to update the queue's internal
416 * pointers. This routine returns the number of entries that were consumed by
417 * the HBA.
418 **/
419 static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue * q)420 lpfc_sli4_mq_release(struct lpfc_queue *q)
421 {
422 /* sanity check on queue memory */
423 if (unlikely(!q))
424 return 0;
425
426 /* Clear the mailbox pointer for completion */
427 q->phba->mbox = NULL;
428 q->hba_index = ((q->hba_index + 1) % q->entry_count);
429 return 1;
430 }
431
432 /**
433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434 * @q: The Event Queue to get the first valid EQE from
435 *
436 * This routine will get the first valid Event Queue Entry from @q, update
437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
439 * processed, but not popped back to the HBA then this routine will return NULL.
440 **/
441 static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue * q)442 lpfc_sli4_eq_get(struct lpfc_queue *q)
443 {
444 struct lpfc_eqe *eqe;
445
446 /* sanity check on queue memory */
447 if (unlikely(!q))
448 return NULL;
449 eqe = lpfc_sli4_qe(q, q->host_index);
450
451 /* If the next EQE is not valid then we are done */
452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453 return NULL;
454
455 /*
456 * insert barrier for instruction interlock : data from the hardware
457 * must have the valid bit checked before it can be copied and acted
458 * upon. Speculative instructions were allowing a bcopy at the start
459 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460 * after our return, to copy data before the valid bit check above
461 * was done. As such, some of the copied data was stale. The barrier
462 * ensures the check is before any data is copied.
463 */
464 mb();
465 return eqe;
466 }
467
468 /**
469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470 * @q: The Event Queue to disable interrupts
471 *
472 **/
473 void
lpfc_sli4_eq_clr_intr(struct lpfc_queue * q)474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475 {
476 struct lpfc_register doorbell;
477
478 doorbell.word0 = 0;
479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485 }
486
487 /**
488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489 * @q: The Event Queue to disable interrupts
490 *
491 **/
492 void
lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue * q)493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494 {
495 struct lpfc_register doorbell;
496
497 doorbell.word0 = 0;
498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500 }
501
502 /**
503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504 * @phba: adapter with EQ
505 * @q: The Event Queue that the host has completed processing for.
506 * @count: Number of elements that have been consumed
507 * @arm: Indicates whether the host wants to arms this CQ.
508 *
509 * This routine will notify the HBA, by ringing the doorbell, that count
510 * number of EQEs have been processed. The @arm parameter indicates whether
511 * the queue should be rearmed when ringing the doorbell.
512 **/
513 void
lpfc_sli4_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515 uint32_t count, bool arm)
516 {
517 struct lpfc_register doorbell;
518
519 /* sanity check on queue memory */
520 if (unlikely(!q || (count == 0 && !arm)))
521 return;
522
523 /* ring doorbell for number popped */
524 doorbell.word0 = 0;
525 if (arm) {
526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528 }
529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537 readl(q->phba->sli4_hba.EQDBregaddr);
538 }
539
540 /**
541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542 * @phba: adapter with EQ
543 * @q: The Event Queue that the host has completed processing for.
544 * @count: Number of elements that have been consumed
545 * @arm: Indicates whether the host wants to arms this CQ.
546 *
547 * This routine will notify the HBA, by ringing the doorbell, that count
548 * number of EQEs have been processed. The @arm parameter indicates whether
549 * the queue should be rearmed when ringing the doorbell.
550 **/
551 void
lpfc_sli4_if6_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553 uint32_t count, bool arm)
554 {
555 struct lpfc_register doorbell;
556
557 /* sanity check on queue memory */
558 if (unlikely(!q || (count == 0 && !arm)))
559 return;
560
561 /* ring doorbell for number popped */
562 doorbell.word0 = 0;
563 if (arm)
564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570 readl(q->phba->sli4_hba.EQDBregaddr);
571 }
572
573 static void
__lpfc_sli4_consume_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe)574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575 struct lpfc_eqe *eqe)
576 {
577 if (!phba->sli4_hba.pc_sli4_params.eqav)
578 bf_set_le32(lpfc_eqe_valid, eqe, 0);
579
580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581
582 /* if the index wrapped around, toggle the valid bit */
583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585 }
586
587 static void
lpfc_sli4_eqcq_flush(struct lpfc_hba * phba,struct lpfc_queue * eq)588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589 {
590 struct lpfc_eqe *eqe = NULL;
591 u32 eq_count = 0, cq_count = 0;
592 struct lpfc_cqe *cqe = NULL;
593 struct lpfc_queue *cq = NULL, *childq = NULL;
594 int cqid = 0;
595
596 /* walk all the EQ entries and drop on the floor */
597 eqe = lpfc_sli4_eq_get(eq);
598 while (eqe) {
599 /* Get the reference to the corresponding CQ */
600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601 cq = NULL;
602
603 list_for_each_entry(childq, &eq->child_list, list) {
604 if (childq->queue_id == cqid) {
605 cq = childq;
606 break;
607 }
608 }
609 /* If CQ is valid, iterate through it and drop all the CQEs */
610 if (cq) {
611 cqe = lpfc_sli4_cq_get(cq);
612 while (cqe) {
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
614 cq_count++;
615 cqe = lpfc_sli4_cq_get(cq);
616 }
617 /* Clear and re-arm the CQ */
618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619 LPFC_QUEUE_REARM);
620 cq_count = 0;
621 }
622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
623 eq_count++;
624 eqe = lpfc_sli4_eq_get(eq);
625 }
626
627 /* Clear and re-arm the EQ */
628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629 }
630
631 static int
lpfc_sli4_process_eq(struct lpfc_hba * phba,struct lpfc_queue * eq,uint8_t rearm)632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633 uint8_t rearm)
634 {
635 struct lpfc_eqe *eqe;
636 int count = 0, consumed = 0;
637
638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639 goto rearm_and_exit;
640
641 eqe = lpfc_sli4_eq_get(eq);
642 while (eqe) {
643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
645
646 consumed++;
647 if (!(++count % eq->max_proc_limit))
648 break;
649
650 if (!(count % eq->notify_interval)) {
651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652 LPFC_QUEUE_NOARM);
653 consumed = 0;
654 }
655
656 eqe = lpfc_sli4_eq_get(eq);
657 }
658 eq->EQ_processed += count;
659
660 /* Track the max number of EQEs processed in 1 intr */
661 if (count > eq->EQ_max_eqe)
662 eq->EQ_max_eqe = count;
663
664 xchg(&eq->queue_claimed, 0);
665
666 rearm_and_exit:
667 /* Always clear the EQ. */
668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669
670 return count;
671 }
672
673 /**
674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675 * @q: The Completion Queue to get the first valid CQE from
676 *
677 * This routine will get the first valid Completion Queue Entry from @q, update
678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
680 * processed, but not popped back to the HBA then this routine will return NULL.
681 **/
682 static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue * q)683 lpfc_sli4_cq_get(struct lpfc_queue *q)
684 {
685 struct lpfc_cqe *cqe;
686
687 /* sanity check on queue memory */
688 if (unlikely(!q))
689 return NULL;
690 cqe = lpfc_sli4_qe(q, q->host_index);
691
692 /* If the next CQE is not valid then we are done */
693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694 return NULL;
695
696 /*
697 * insert barrier for instruction interlock : data from the hardware
698 * must have the valid bit checked before it can be copied and acted
699 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700 * instructions allowing action on content before valid bit checked,
701 * add barrier here as well. May not be needed as "content" is a
702 * single 32-bit entity here (vs multi word structure for cq's).
703 */
704 mb();
705 return cqe;
706 }
707
708 static void
__lpfc_sli4_consume_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710 struct lpfc_cqe *cqe)
711 {
712 if (!phba->sli4_hba.pc_sli4_params.cqav)
713 bf_set_le32(lpfc_cqe_valid, cqe, 0);
714
715 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716
717 /* if the index wrapped around, toggle the valid bit */
718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720 }
721
722 /**
723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724 * @phba: the adapter with the CQ
725 * @q: The Completion Queue that the host has completed processing for.
726 * @count: the number of elements that were consumed
727 * @arm: Indicates whether the host wants to arms this CQ.
728 *
729 * This routine will notify the HBA, by ringing the doorbell, that the
730 * CQEs have been processed. The @arm parameter specifies whether the
731 * queue should be rearmed when ringing the doorbell.
732 **/
733 void
lpfc_sli4_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735 uint32_t count, bool arm)
736 {
737 struct lpfc_register doorbell;
738
739 /* sanity check on queue memory */
740 if (unlikely(!q || (count == 0 && !arm)))
741 return;
742
743 /* ring doorbell for number popped */
744 doorbell.word0 = 0;
745 if (arm)
746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753 }
754
755 /**
756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757 * @phba: the adapter with the CQ
758 * @q: The Completion Queue that the host has completed processing for.
759 * @count: the number of elements that were consumed
760 * @arm: Indicates whether the host wants to arms this CQ.
761 *
762 * This routine will notify the HBA, by ringing the doorbell, that the
763 * CQEs have been processed. The @arm parameter specifies whether the
764 * queue should be rearmed when ringing the doorbell.
765 **/
766 void
lpfc_sli4_if6_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768 uint32_t count, bool arm)
769 {
770 struct lpfc_register doorbell;
771
772 /* sanity check on queue memory */
773 if (unlikely(!q || (count == 0 && !arm)))
774 return;
775
776 /* ring doorbell for number popped */
777 doorbell.word0 = 0;
778 if (arm)
779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783 }
784
785 /*
786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787 *
788 * This routine will copy the contents of @wqe to the next available entry on
789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
790 * HBA to start processing the Receive Queue Entry. This function returns the
791 * index that the rqe was copied to if successful. If no entries are available
792 * on @q then this function will return -ENOMEM.
793 * The caller is expected to hold the hbalock when calling this routine.
794 **/
795 int
lpfc_sli4_rq_put(struct lpfc_queue * hq,struct lpfc_queue * dq,struct lpfc_rqe * hrqe,struct lpfc_rqe * drqe)796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798 {
799 struct lpfc_rqe *temp_hrqe;
800 struct lpfc_rqe *temp_drqe;
801 struct lpfc_register doorbell;
802 int hq_put_index;
803 int dq_put_index;
804
805 /* sanity check on queue memory */
806 if (unlikely(!hq) || unlikely(!dq))
807 return -ENOMEM;
808 hq_put_index = hq->host_index;
809 dq_put_index = dq->host_index;
810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812
813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814 return -EINVAL;
815 if (hq_put_index != dq_put_index)
816 return -EINVAL;
817 /* If the host has not yet processed the next entry then we are done */
818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819 return -EBUSY;
820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822
823 /* Update the host index to point to the next slot */
824 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826 hq->RQ_buf_posted++;
827
828 /* Ring The Header Receive Queue Doorbell */
829 if (!(hq->host_index % hq->notify_interval)) {
830 doorbell.word0 = 0;
831 if (hq->db_format == LPFC_DB_RING_FORMAT) {
832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833 hq->notify_interval);
834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837 hq->notify_interval);
838 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839 hq->host_index);
840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841 } else {
842 return -EINVAL;
843 }
844 writel(doorbell.word0, hq->db_regaddr);
845 }
846 return hq_put_index;
847 }
848
849 /*
850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
851 *
852 * This routine will update the HBA index of a queue to reflect consumption of
853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854 * consumed an entry the host calls this function to update the queue's
855 * internal pointers. This routine returns the number of entries that were
856 * consumed by the HBA.
857 **/
858 static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue * hq,struct lpfc_queue * dq)859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860 {
861 /* sanity check on queue memory */
862 if (unlikely(!hq) || unlikely(!dq))
863 return 0;
864
865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866 return 0;
867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869 return 1;
870 }
871
872 /**
873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
874 * @phba: Pointer to HBA context object.
875 * @pring: Pointer to driver SLI ring object.
876 *
877 * This function returns pointer to next command iocb entry
878 * in the command ring. The caller must hold hbalock to prevent
879 * other threads consume the next command iocb.
880 * SLI-2/SLI-3 provide different sized iocbs.
881 **/
882 static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884 {
885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887 }
888
889 /**
890 * lpfc_resp_iocb - Get next response iocb entry in the ring
891 * @phba: Pointer to HBA context object.
892 * @pring: Pointer to driver SLI ring object.
893 *
894 * This function returns pointer to next response iocb entry
895 * in the response ring. The caller must hold hbalock to make sure
896 * that no other thread consume the next response iocb.
897 * SLI-2/SLI-3 provide different sized iocbs.
898 **/
899 static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901 {
902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904 }
905
906 /**
907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908 * @phba: Pointer to HBA context object.
909 *
910 * This function is called with hbalock held. This function
911 * allocates a new driver iocb object from the iocb pool. If the
912 * allocation is successful, it returns pointer to the newly
913 * allocated iocb object else it returns NULL.
914 **/
915 struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba * phba)916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917 {
918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919 struct lpfc_iocbq * iocbq = NULL;
920
921 lockdep_assert_held(&phba->hbalock);
922
923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924 if (iocbq)
925 phba->iocb_cnt++;
926 if (phba->iocb_cnt > phba->iocb_max)
927 phba->iocb_max = phba->iocb_cnt;
928 return iocbq;
929 }
930
931 /**
932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933 * @phba: Pointer to HBA context object.
934 * @xritag: XRI value.
935 *
936 * This function clears the sglq pointer from the array of active
937 * sglq's. The xritag that is passed in is used to index into the
938 * array. Before the xritag can be used it needs to be adjusted
939 * by subtracting the xribase.
940 *
941 * Returns sglq ponter = success, NULL = Failure.
942 **/
943 struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba * phba,uint16_t xritag)944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945 {
946 struct lpfc_sglq *sglq;
947
948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950 return sglq;
951 }
952
953 /**
954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955 * @phba: Pointer to HBA context object.
956 * @xritag: XRI value.
957 *
958 * This function returns the sglq pointer from the array of active
959 * sglq's. The xritag that is passed in is used to index into the
960 * array. Before the xritag can be used it needs to be adjusted
961 * by subtracting the xribase.
962 *
963 * Returns sglq ponter = success, NULL = Failure.
964 **/
965 struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba * phba,uint16_t xritag)966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967 {
968 struct lpfc_sglq *sglq;
969
970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
971 return sglq;
972 }
973
974 /**
975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976 * @phba: Pointer to HBA context object.
977 * @xritag: xri used in this exchange.
978 * @rrq: The RRQ to be cleared.
979 *
980 **/
981 void
lpfc_clr_rrq_active(struct lpfc_hba * phba,uint16_t xritag,struct lpfc_node_rrq * rrq)982 lpfc_clr_rrq_active(struct lpfc_hba *phba,
983 uint16_t xritag,
984 struct lpfc_node_rrq *rrq)
985 {
986 struct lpfc_nodelist *ndlp = NULL;
987
988 /* Lookup did to verify if did is still active on this vport */
989 if (rrq->vport)
990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991
992 if (!ndlp)
993 goto out;
994
995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996 rrq->send_rrq = 0;
997 rrq->xritag = 0;
998 rrq->rrq_stop_time = 0;
999 }
1000 out:
1001 mempool_free(rrq, phba->rrq_pool);
1002 }
1003
1004 /**
1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006 * @phba: Pointer to HBA context object.
1007 *
1008 * This function is called with hbalock held. This function
1009 * Checks if stop_time (ratov from setting rrq active) has
1010 * been reached, if it has and the send_rrq flag is set then
1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012 * then it will just call the routine to clear the rrq and
1013 * free the rrq resource.
1014 * The timer is set to the next rrq that is going to expire before
1015 * leaving the routine.
1016 *
1017 **/
1018 void
lpfc_handle_rrq_active(struct lpfc_hba * phba)1019 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020 {
1021 struct lpfc_node_rrq *rrq;
1022 struct lpfc_node_rrq *nextrrq;
1023 unsigned long next_time;
1024 unsigned long iflags;
1025 LIST_HEAD(send_rrq);
1026
1027 spin_lock_irqsave(&phba->hbalock, iflags);
1028 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030 list_for_each_entry_safe(rrq, nextrrq,
1031 &phba->active_rrq_list, list) {
1032 if (time_after(jiffies, rrq->rrq_stop_time))
1033 list_move(&rrq->list, &send_rrq);
1034 else if (time_before(rrq->rrq_stop_time, next_time))
1035 next_time = rrq->rrq_stop_time;
1036 }
1037 spin_unlock_irqrestore(&phba->hbalock, iflags);
1038 if ((!list_empty(&phba->active_rrq_list)) &&
1039 (!(phba->pport->load_flag & FC_UNLOADING)))
1040 mod_timer(&phba->rrq_tmr, next_time);
1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 list_del(&rrq->list);
1043 if (!rrq->send_rrq) {
1044 /* this call will free the rrq */
1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 } else if (lpfc_send_rrq(phba, rrq)) {
1047 /* if we send the rrq then the completion handler
1048 * will clear the bit in the xribitmap.
1049 */
1050 lpfc_clr_rrq_active(phba, rrq->xritag,
1051 rrq);
1052 }
1053 }
1054 }
1055
1056 /**
1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058 * @vport: Pointer to vport context object.
1059 * @xri: The xri used in the exchange.
1060 * @did: The targets DID for this exchange.
1061 *
1062 * returns NULL = rrq not found in the phba->active_rrq_list.
1063 * rrq = rrq for this xri and target.
1064 **/
1065 struct lpfc_node_rrq *
lpfc_get_active_rrq(struct lpfc_vport * vport,uint16_t xri,uint32_t did)1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067 {
1068 struct lpfc_hba *phba = vport->phba;
1069 struct lpfc_node_rrq *rrq;
1070 struct lpfc_node_rrq *nextrrq;
1071 unsigned long iflags;
1072
1073 if (phba->sli_rev != LPFC_SLI_REV4)
1074 return NULL;
1075 spin_lock_irqsave(&phba->hbalock, iflags);
1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 if (rrq->vport == vport && rrq->xritag == xri &&
1078 rrq->nlp_DID == did){
1079 list_del(&rrq->list);
1080 spin_unlock_irqrestore(&phba->hbalock, iflags);
1081 return rrq;
1082 }
1083 }
1084 spin_unlock_irqrestore(&phba->hbalock, iflags);
1085 return NULL;
1086 }
1087
1088 /**
1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090 * @vport: Pointer to vport context object.
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1093 * phba->active_rrq_list and clear the rrq.
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095 **/
1096 void
lpfc_cleanup_vports_rrqs(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099 {
1100 struct lpfc_hba *phba = vport->phba;
1101 struct lpfc_node_rrq *rrq;
1102 struct lpfc_node_rrq *nextrrq;
1103 unsigned long iflags;
1104 LIST_HEAD(rrq_list);
1105
1106 if (phba->sli_rev != LPFC_SLI_REV4)
1107 return;
1108 if (!ndlp) {
1109 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111 }
1112 spin_lock_irqsave(&phba->hbalock, iflags);
1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 if (rrq->vport != vport)
1115 continue;
1116
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 list_move(&rrq->list, &rrq_list);
1119
1120 }
1121 spin_unlock_irqrestore(&phba->hbalock, iflags);
1122
1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 list_del(&rrq->list);
1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126 }
1127 }
1128
1129 /**
1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131 * @phba: Pointer to HBA context object.
1132 * @ndlp: Targets nodelist pointer for this exchange.
1133 * @xritag: the xri in the bitmap to test.
1134 *
1135 * This function returns:
1136 * 0 = rrq not active for this xri
1137 * 1 = rrq is valid for this xri.
1138 **/
1139 int
lpfc_test_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag)1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141 uint16_t xritag)
1142 {
1143 if (!ndlp)
1144 return 0;
1145 if (!ndlp->active_rrqs_xri_bitmap)
1146 return 0;
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148 return 1;
1149 else
1150 return 0;
1151 }
1152
1153 /**
1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155 * @phba: Pointer to HBA context object.
1156 * @ndlp: nodelist pointer for this target.
1157 * @xritag: xri used in this exchange.
1158 * @rxid: Remote Exchange ID.
1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160 *
1161 * This function takes the hbalock.
1162 * The active bit is always set in the active rrq xri_bitmap even
1163 * if there is no slot avaiable for the other rrq information.
1164 *
1165 * returns 0 rrq actived for this xri
1166 * < 0 No memory or invalid ndlp.
1167 **/
1168 int
lpfc_set_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag,uint16_t rxid,uint16_t send_rrq)1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171 {
1172 unsigned long iflags;
1173 struct lpfc_node_rrq *rrq;
1174 int empty;
1175
1176 if (!ndlp)
1177 return -EINVAL;
1178
1179 if (!phba->cfg_enable_rrq)
1180 return -EINVAL;
1181
1182 spin_lock_irqsave(&phba->hbalock, iflags);
1183 if (phba->pport->load_flag & FC_UNLOADING) {
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185 goto out;
1186 }
1187
1188 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1189 goto out;
1190
1191 if (!ndlp->active_rrqs_xri_bitmap)
1192 goto out;
1193
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195 goto out;
1196
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199 if (!rrq) {
1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 " DID:0x%x Send:%d\n",
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1204 return -EINVAL;
1205 }
1206 if (phba->cfg_enable_rrq == 1)
1207 rrq->send_rrq = send_rrq;
1208 else
1209 rrq->send_rrq = 0;
1210 rrq->xritag = xritag;
1211 rrq->rrq_stop_time = jiffies +
1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1215 rrq->rxid = rxid;
1216 spin_lock_irqsave(&phba->hbalock, iflags);
1217 empty = list_empty(&phba->active_rrq_list);
1218 list_add_tail(&rrq->list, &phba->active_rrq_list);
1219 phba->hba_flag |= HBA_RRQ_ACTIVE;
1220 if (empty)
1221 lpfc_worker_wake_up(phba);
1222 spin_unlock_irqrestore(&phba->hbalock, iflags);
1223 return 0;
1224 out:
1225 spin_unlock_irqrestore(&phba->hbalock, iflags);
1226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228 " DID:0x%x Send:%d\n",
1229 xritag, rxid, ndlp->nlp_DID, send_rrq);
1230 return -EINVAL;
1231 }
1232
1233 /**
1234 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1235 * @phba: Pointer to HBA context object.
1236 * @piocbq: Pointer to the iocbq.
1237 *
1238 * The driver calls this function with either the nvme ls ring lock
1239 * or the fc els ring lock held depending on the iocb usage. This function
1240 * gets a new driver sglq object from the sglq list. If the list is not empty
1241 * then it is successful, it returns pointer to the newly allocated sglq
1242 * object else it returns NULL.
1243 **/
1244 static struct lpfc_sglq *
__lpfc_sli_get_els_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1245 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1246 {
1247 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248 struct lpfc_sglq *sglq = NULL;
1249 struct lpfc_sglq *start_sglq = NULL;
1250 struct lpfc_io_buf *lpfc_cmd;
1251 struct lpfc_nodelist *ndlp;
1252 int found = 0;
1253 u8 cmnd;
1254
1255 cmnd = get_job_cmnd(phba, piocbq);
1256
1257 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1258 lpfc_cmd = piocbq->io_buf;
1259 ndlp = lpfc_cmd->rdata->pnode;
1260 } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262 ndlp = piocbq->ndlp;
1263 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1265 ndlp = NULL;
1266 else
1267 ndlp = piocbq->ndlp;
1268 } else {
1269 ndlp = piocbq->ndlp;
1270 }
1271
1272 spin_lock(&phba->sli4_hba.sgl_list_lock);
1273 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1274 start_sglq = sglq;
1275 while (!found) {
1276 if (!sglq)
1277 break;
1278 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279 test_bit(sglq->sli4_lxritag,
1280 ndlp->active_rrqs_xri_bitmap)) {
1281 /* This xri has an rrq outstanding for this DID.
1282 * put it back in the list and get another xri.
1283 */
1284 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1285 sglq = NULL;
1286 list_remove_head(lpfc_els_sgl_list, sglq,
1287 struct lpfc_sglq, list);
1288 if (sglq == start_sglq) {
1289 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1290 sglq = NULL;
1291 break;
1292 } else
1293 continue;
1294 }
1295 sglq->ndlp = ndlp;
1296 found = 1;
1297 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298 sglq->state = SGL_ALLOCATED;
1299 }
1300 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1301 return sglq;
1302 }
1303
1304 /**
1305 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1306 * @phba: Pointer to HBA context object.
1307 * @piocbq: Pointer to the iocbq.
1308 *
1309 * This function is called with the sgl_list lock held. This function
1310 * gets a new driver sglq object from the sglq list. If the
1311 * list is not empty then it is successful, it returns pointer to the newly
1312 * allocated sglq object else it returns NULL.
1313 **/
1314 struct lpfc_sglq *
__lpfc_sli_get_nvmet_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1315 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1316 {
1317 struct list_head *lpfc_nvmet_sgl_list;
1318 struct lpfc_sglq *sglq = NULL;
1319
1320 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1321
1322 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1323
1324 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1325 if (!sglq)
1326 return NULL;
1327 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328 sglq->state = SGL_ALLOCATED;
1329 return sglq;
1330 }
1331
1332 /**
1333 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1334 * @phba: Pointer to HBA context object.
1335 *
1336 * This function is called with no lock held. This function
1337 * allocates a new driver iocb object from the iocb pool. If the
1338 * allocation is successful, it returns pointer to the newly
1339 * allocated iocb object else it returns NULL.
1340 **/
1341 struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba * phba)1342 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1343 {
1344 struct lpfc_iocbq * iocbq = NULL;
1345 unsigned long iflags;
1346
1347 spin_lock_irqsave(&phba->hbalock, iflags);
1348 iocbq = __lpfc_sli_get_iocbq(phba);
1349 spin_unlock_irqrestore(&phba->hbalock, iflags);
1350 return iocbq;
1351 }
1352
1353 /**
1354 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1357 *
1358 * This function is called to release the driver iocb object
1359 * to the iocb pool. The iotag in the iocb object
1360 * does not change for each use of the iocb object. This function
1361 * clears all other fields of the iocb object when it is freed.
1362 * The sqlq structure that holds the xritag and phys and virtual
1363 * mappings for the scatter gather list is retrieved from the
1364 * active array of sglq. The get of the sglq pointer also clears
1365 * the entry in the array. If the status of the IO indiactes that
1366 * this IO was aborted then the sglq entry it put on the
1367 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1368 * IO has good status or fails for any other reason then the sglq
1369 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1370 * asserted held in the code path calling this routine.
1371 **/
1372 static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1373 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1374 {
1375 struct lpfc_sglq *sglq;
1376 size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
1377 unsigned long iflag = 0;
1378 struct lpfc_sli_ring *pring;
1379
1380 if (iocbq->sli4_xritag == NO_XRI)
1381 sglq = NULL;
1382 else
1383 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1384
1385
1386 if (sglq) {
1387 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1388 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1389 iflag);
1390 sglq->state = SGL_FREED;
1391 sglq->ndlp = NULL;
1392 list_add_tail(&sglq->list,
1393 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1394 spin_unlock_irqrestore(
1395 &phba->sli4_hba.sgl_list_lock, iflag);
1396 goto out;
1397 }
1398
1399 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1400 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1401 sglq->state != SGL_XRI_ABORTED) {
1402 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1403 iflag);
1404
1405 /* Check if we can get a reference on ndlp */
1406 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1407 sglq->ndlp = NULL;
1408
1409 list_add(&sglq->list,
1410 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1411 spin_unlock_irqrestore(
1412 &phba->sli4_hba.sgl_list_lock, iflag);
1413 } else {
1414 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1415 iflag);
1416 sglq->state = SGL_FREED;
1417 sglq->ndlp = NULL;
1418 list_add_tail(&sglq->list,
1419 &phba->sli4_hba.lpfc_els_sgl_list);
1420 spin_unlock_irqrestore(
1421 &phba->sli4_hba.sgl_list_lock, iflag);
1422 pring = lpfc_phba_elsring(phba);
1423 /* Check if TXQ queue needs to be serviced */
1424 if (pring && (!list_empty(&pring->txq)))
1425 lpfc_worker_wake_up(phba);
1426 }
1427 }
1428
1429 out:
1430 /*
1431 * Clean all volatile data fields, preserve iotag and node struct.
1432 */
1433 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1434 iocbq->sli4_lxritag = NO_XRI;
1435 iocbq->sli4_xritag = NO_XRI;
1436 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1437 LPFC_IO_NVME_LS);
1438 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1439 }
1440
1441
1442 /**
1443 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1444 * @phba: Pointer to HBA context object.
1445 * @iocbq: Pointer to driver iocb object.
1446 *
1447 * This function is called to release the driver iocb object to the
1448 * iocb pool. The iotag in the iocb object does not change for each
1449 * use of the iocb object. This function clears all other fields of
1450 * the iocb object when it is freed. The hbalock is asserted held in
1451 * the code path calling this routine.
1452 **/
1453 static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1454 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1455 {
1456 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1457
1458 /*
1459 * Clean all volatile data fields, preserve iotag and node struct.
1460 */
1461 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1462 iocbq->sli4_xritag = NO_XRI;
1463 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1464 }
1465
1466 /**
1467 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1468 * @phba: Pointer to HBA context object.
1469 * @iocbq: Pointer to driver iocb object.
1470 *
1471 * This function is called with hbalock held to release driver
1472 * iocb object to the iocb pool. The iotag in the iocb object
1473 * does not change for each use of the iocb object. This function
1474 * clears all other fields of the iocb object when it is freed.
1475 **/
1476 static void
__lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1477 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1478 {
1479 lockdep_assert_held(&phba->hbalock);
1480
1481 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1482 phba->iocb_cnt--;
1483 }
1484
1485 /**
1486 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1487 * @phba: Pointer to HBA context object.
1488 * @iocbq: Pointer to driver iocb object.
1489 *
1490 * This function is called with no lock held to release the iocb to
1491 * iocb pool.
1492 **/
1493 void
lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1494 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1495 {
1496 unsigned long iflags;
1497
1498 /*
1499 * Clean all volatile data fields, preserve iotag and node struct.
1500 */
1501 spin_lock_irqsave(&phba->hbalock, iflags);
1502 __lpfc_sli_release_iocbq(phba, iocbq);
1503 spin_unlock_irqrestore(&phba->hbalock, iflags);
1504 }
1505
1506 /**
1507 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1508 * @phba: Pointer to HBA context object.
1509 * @iocblist: List of IOCBs.
1510 * @ulpstatus: ULP status in IOCB command field.
1511 * @ulpWord4: ULP word-4 in IOCB command field.
1512 *
1513 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1514 * on the list by invoking the complete callback function associated with the
1515 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1516 * fields.
1517 **/
1518 void
lpfc_sli_cancel_iocbs(struct lpfc_hba * phba,struct list_head * iocblist,uint32_t ulpstatus,uint32_t ulpWord4)1519 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520 uint32_t ulpstatus, uint32_t ulpWord4)
1521 {
1522 struct lpfc_iocbq *piocb;
1523
1524 while (!list_empty(iocblist)) {
1525 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526 if (piocb->cmd_cmpl) {
1527 if (piocb->cmd_flag & LPFC_IO_NVME) {
1528 lpfc_nvme_cancel_iocb(phba, piocb,
1529 ulpstatus, ulpWord4);
1530 } else {
1531 if (phba->sli_rev == LPFC_SLI_REV4) {
1532 bf_set(lpfc_wcqe_c_status,
1533 &piocb->wcqe_cmpl, ulpstatus);
1534 piocb->wcqe_cmpl.parameter = ulpWord4;
1535 } else {
1536 piocb->iocb.ulpStatus = ulpstatus;
1537 piocb->iocb.un.ulpWord[4] = ulpWord4;
1538 }
1539 (piocb->cmd_cmpl) (phba, piocb, piocb);
1540 }
1541 } else {
1542 lpfc_sli_release_iocbq(phba, piocb);
1543 }
1544 }
1545 return;
1546 }
1547
1548 /**
1549 * lpfc_sli_iocb_cmd_type - Get the iocb type
1550 * @iocb_cmnd: iocb command code.
1551 *
1552 * This function is called by ring event handler function to get the iocb type.
1553 * This function translates the iocb command to an iocb command type used to
1554 * decide the final disposition of each completed IOCB.
1555 * The function returns
1556 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1557 * LPFC_SOL_IOCB if it is a solicited iocb completion
1558 * LPFC_ABORT_IOCB if it is an abort iocb
1559 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1560 *
1561 * The caller is not required to hold any lock.
1562 **/
1563 static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)1564 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1565 {
1566 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1567
1568 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1569 return 0;
1570
1571 switch (iocb_cmnd) {
1572 case CMD_XMIT_SEQUENCE_CR:
1573 case CMD_XMIT_SEQUENCE_CX:
1574 case CMD_XMIT_BCAST_CN:
1575 case CMD_XMIT_BCAST_CX:
1576 case CMD_ELS_REQUEST_CR:
1577 case CMD_ELS_REQUEST_CX:
1578 case CMD_CREATE_XRI_CR:
1579 case CMD_CREATE_XRI_CX:
1580 case CMD_GET_RPI_CN:
1581 case CMD_XMIT_ELS_RSP_CX:
1582 case CMD_GET_RPI_CR:
1583 case CMD_FCP_IWRITE_CR:
1584 case CMD_FCP_IWRITE_CX:
1585 case CMD_FCP_IREAD_CR:
1586 case CMD_FCP_IREAD_CX:
1587 case CMD_FCP_ICMND_CR:
1588 case CMD_FCP_ICMND_CX:
1589 case CMD_FCP_TSEND_CX:
1590 case CMD_FCP_TRSP_CX:
1591 case CMD_FCP_TRECEIVE_CX:
1592 case CMD_FCP_AUTO_TRSP_CX:
1593 case CMD_ADAPTER_MSG:
1594 case CMD_ADAPTER_DUMP:
1595 case CMD_XMIT_SEQUENCE64_CR:
1596 case CMD_XMIT_SEQUENCE64_CX:
1597 case CMD_XMIT_BCAST64_CN:
1598 case CMD_XMIT_BCAST64_CX:
1599 case CMD_ELS_REQUEST64_CR:
1600 case CMD_ELS_REQUEST64_CX:
1601 case CMD_FCP_IWRITE64_CR:
1602 case CMD_FCP_IWRITE64_CX:
1603 case CMD_FCP_IREAD64_CR:
1604 case CMD_FCP_IREAD64_CX:
1605 case CMD_FCP_ICMND64_CR:
1606 case CMD_FCP_ICMND64_CX:
1607 case CMD_FCP_TSEND64_CX:
1608 case CMD_FCP_TRSP64_CX:
1609 case CMD_FCP_TRECEIVE64_CX:
1610 case CMD_GEN_REQUEST64_CR:
1611 case CMD_GEN_REQUEST64_CX:
1612 case CMD_XMIT_ELS_RSP64_CX:
1613 case DSSCMD_IWRITE64_CR:
1614 case DSSCMD_IWRITE64_CX:
1615 case DSSCMD_IREAD64_CR:
1616 case DSSCMD_IREAD64_CX:
1617 case CMD_SEND_FRAME:
1618 type = LPFC_SOL_IOCB;
1619 break;
1620 case CMD_ABORT_XRI_CN:
1621 case CMD_ABORT_XRI_CX:
1622 case CMD_CLOSE_XRI_CN:
1623 case CMD_CLOSE_XRI_CX:
1624 case CMD_XRI_ABORTED_CX:
1625 case CMD_ABORT_MXRI64_CN:
1626 case CMD_XMIT_BLS_RSP64_CX:
1627 type = LPFC_ABORT_IOCB;
1628 break;
1629 case CMD_RCV_SEQUENCE_CX:
1630 case CMD_RCV_ELS_REQ_CX:
1631 case CMD_RCV_SEQUENCE64_CX:
1632 case CMD_RCV_ELS_REQ64_CX:
1633 case CMD_ASYNC_STATUS:
1634 case CMD_IOCB_RCV_SEQ64_CX:
1635 case CMD_IOCB_RCV_ELS64_CX:
1636 case CMD_IOCB_RCV_CONT64_CX:
1637 case CMD_IOCB_RET_XRI64_CX:
1638 type = LPFC_UNSOL_IOCB;
1639 break;
1640 case CMD_IOCB_XMIT_MSEQ64_CR:
1641 case CMD_IOCB_XMIT_MSEQ64_CX:
1642 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643 case CMD_IOCB_RCV_ELS_LIST64_CX:
1644 case CMD_IOCB_CLOSE_EXTENDED_CN:
1645 case CMD_IOCB_ABORT_EXTENDED_CN:
1646 case CMD_IOCB_RET_HBQE64_CN:
1647 case CMD_IOCB_FCP_IBIDIR64_CR:
1648 case CMD_IOCB_FCP_IBIDIR64_CX:
1649 case CMD_IOCB_FCP_ITASKMGT64_CX:
1650 case CMD_IOCB_LOGENTRY_CN:
1651 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652 printk("%s - Unhandled SLI-3 Command x%x\n",
1653 __func__, iocb_cmnd);
1654 type = LPFC_UNKNOWN_IOCB;
1655 break;
1656 default:
1657 type = LPFC_UNKNOWN_IOCB;
1658 break;
1659 }
1660
1661 return type;
1662 }
1663
1664 /**
1665 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1666 * @phba: Pointer to HBA context object.
1667 *
1668 * This function is called from SLI initialization code
1669 * to configure every ring of the HBA's SLI interface. The
1670 * caller is not required to hold any lock. This function issues
1671 * a config_ring mailbox command for each ring.
1672 * This function returns zero if successful else returns a negative
1673 * error code.
1674 **/
1675 static int
lpfc_sli_ring_map(struct lpfc_hba * phba)1676 lpfc_sli_ring_map(struct lpfc_hba *phba)
1677 {
1678 struct lpfc_sli *psli = &phba->sli;
1679 LPFC_MBOXQ_t *pmb;
1680 MAILBOX_t *pmbox;
1681 int i, rc, ret = 0;
1682
1683 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1684 if (!pmb)
1685 return -ENOMEM;
1686 pmbox = &pmb->u.mb;
1687 phba->link_state = LPFC_INIT_MBX_CMDS;
1688 for (i = 0; i < psli->num_rings; i++) {
1689 lpfc_config_ring(phba, i, pmb);
1690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691 if (rc != MBX_SUCCESS) {
1692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693 "0446 Adapter failed to init (%d), "
1694 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1695 "ring %d\n",
1696 rc, pmbox->mbxCommand,
1697 pmbox->mbxStatus, i);
1698 phba->link_state = LPFC_HBA_ERROR;
1699 ret = -ENXIO;
1700 break;
1701 }
1702 }
1703 mempool_free(pmb, phba->mbox_mem_pool);
1704 return ret;
1705 }
1706
1707 /**
1708 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1709 * @phba: Pointer to HBA context object.
1710 * @pring: Pointer to driver SLI ring object.
1711 * @piocb: Pointer to the driver iocb object.
1712 *
1713 * The driver calls this function with the hbalock held for SLI3 ports or
1714 * the ring lock held for SLI4 ports. The function adds the
1715 * new iocb to txcmplq of the given ring. This function always returns
1716 * 0. If this function is called for ELS ring, this function checks if
1717 * there is a vport associated with the ELS command. This function also
1718 * starts els_tmofunc timer if this is an ELS command.
1719 **/
1720 static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)1721 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722 struct lpfc_iocbq *piocb)
1723 {
1724 u32 ulp_command = 0;
1725
1726 BUG_ON(!piocb);
1727 ulp_command = get_job_cmnd(phba, piocb);
1728
1729 list_add_tail(&piocb->list, &pring->txcmplq);
1730 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731 pring->txcmplq_cnt++;
1732 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733 (ulp_command != CMD_ABORT_XRI_WQE) &&
1734 (ulp_command != CMD_ABORT_XRI_CN) &&
1735 (ulp_command != CMD_CLOSE_XRI_CN)) {
1736 BUG_ON(!piocb->vport);
1737 if (!(piocb->vport->load_flag & FC_UNLOADING))
1738 mod_timer(&piocb->vport->els_tmofunc,
1739 jiffies +
1740 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1741 }
1742
1743 return 0;
1744 }
1745
1746 /**
1747 * lpfc_sli_ringtx_get - Get first element of the txq
1748 * @phba: Pointer to HBA context object.
1749 * @pring: Pointer to driver SLI ring object.
1750 *
1751 * This function is called with hbalock held to get next
1752 * iocb in txq of the given ring. If there is any iocb in
1753 * the txq, the function returns first iocb in the list after
1754 * removing the iocb from the list, else it returns NULL.
1755 **/
1756 struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1758 {
1759 struct lpfc_iocbq *cmd_iocb;
1760
1761 lockdep_assert_held(&phba->hbalock);
1762
1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1764 return cmd_iocb;
1765 }
1766
1767 /**
1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1769 * @phba: Pointer to HBA context object.
1770 * @cmdiocb: Pointer to driver command iocb object.
1771 * @rspiocb: Pointer to driver response iocb object.
1772 *
1773 * This routine will inform the driver of any BW adjustments we need
1774 * to make. These changes will be picked up during the next CMF
1775 * timer interrupt. In addition, any BW changes will be logged
1776 * with LOG_CGN_MGMT.
1777 **/
1778 static void
lpfc_cmf_sync_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780 struct lpfc_iocbq *rspiocb)
1781 {
1782 union lpfc_wqe128 *wqe;
1783 uint32_t status, info;
1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785 uint64_t bw, bwdif, slop;
1786 uint64_t pcent, bwpcent;
1787 int asig, afpin, sigcnt, fpincnt;
1788 int wsigmax, wfpinmax, cg, tdp;
1789 char *s;
1790
1791 /* First check for error */
1792 status = bf_get(lpfc_wcqe_c_status, wcqe);
1793 if (status) {
1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795 "6211 CMF_SYNC_WQE Error "
1796 "req_tag x%x status x%x hwstatus x%x "
1797 "tdatap x%x parm x%x\n",
1798 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799 bf_get(lpfc_wcqe_c_status, wcqe),
1800 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801 wcqe->total_data_placed,
1802 wcqe->parameter);
1803 goto out;
1804 }
1805
1806 /* Gather congestion information on a successful cmpl */
1807 info = wcqe->parameter;
1808 phba->cmf_active_info = info;
1809
1810 /* See if firmware info count is valid or has changed */
1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1812 info = 0;
1813 else
1814 phba->cmf_info_per_interval = info;
1815
1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1818
1819 /* Get BW requirement from firmware */
1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1821 if (!bw) {
1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1825 goto out;
1826 }
1827
1828 /* Gather information needed for logging if a BW change is required */
1829 wqe = &cmdiocb->wqe;
1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834 if (phba->cmf_max_bytes_per_interval != bw ||
1835 (asig || afpin || sigcnt || fpincnt)) {
1836 /* Are we increasing or decreasing BW */
1837 if (phba->cmf_max_bytes_per_interval < bw) {
1838 bwdif = bw - phba->cmf_max_bytes_per_interval;
1839 s = "Increase";
1840 } else {
1841 bwdif = phba->cmf_max_bytes_per_interval - bw;
1842 s = "Decrease";
1843 }
1844
1845 /* What is the change percentage */
1846 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1847 pcent = div64_u64(bwdif * 100 + slop,
1848 phba->cmf_link_byte_count);
1849 bwpcent = div64_u64(bw * 100 + slop,
1850 phba->cmf_link_byte_count);
1851 if (asig) {
1852 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1853 "6237 BW Threshold %lld%% (%lld): "
1854 "%lld%% %s: Signal Alarm: cg:%d "
1855 "Info:%u\n",
1856 bwpcent, bw, pcent, s, cg,
1857 phba->cmf_active_info);
1858 } else if (afpin) {
1859 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1860 "6238 BW Threshold %lld%% (%lld): "
1861 "%lld%% %s: FPIN Alarm: cg:%d "
1862 "Info:%u\n",
1863 bwpcent, bw, pcent, s, cg,
1864 phba->cmf_active_info);
1865 } else if (sigcnt) {
1866 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1868 "6239 BW Threshold %lld%% (%lld): "
1869 "%lld%% %s: Signal Warning: "
1870 "Cnt %d Max %d: cg:%d Info:%u\n",
1871 bwpcent, bw, pcent, s, sigcnt,
1872 wsigmax, cg, phba->cmf_active_info);
1873 } else if (fpincnt) {
1874 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1875 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876 "6240 BW Threshold %lld%% (%lld): "
1877 "%lld%% %s: FPIN Warning: "
1878 "Cnt %d Max %d: cg:%d Info:%u\n",
1879 bwpcent, bw, pcent, s, fpincnt,
1880 wfpinmax, cg, phba->cmf_active_info);
1881 } else {
1882 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1883 "6241 BW Threshold %lld%% (%lld): "
1884 "CMF %lld%% %s: cg:%d Info:%u\n",
1885 bwpcent, bw, pcent, s, cg,
1886 phba->cmf_active_info);
1887 }
1888 } else if (info) {
1889 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1890 "6246 Info Threshold %u\n", info);
1891 }
1892
1893 /* Save BW change to be picked up during next timer interrupt */
1894 phba->cmf_last_sync_bw = bw;
1895 out:
1896 lpfc_sli_release_iocbq(phba, cmdiocb);
1897 }
1898
1899 /**
1900 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1901 * @phba: Pointer to HBA context object.
1902 * @ms: ms to set in WQE interval, 0 means use init op
1903 * @total: Total rcv bytes for this interval
1904 *
1905 * This routine is called every CMF timer interrupt. Its purpose is
1906 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1907 * that may indicate we have congestion (FPINs or Signals). Upon
1908 * completion, the firmware will indicate any BW restrictions the
1909 * driver may need to take.
1910 **/
1911 int
lpfc_issue_cmf_sync_wqe(struct lpfc_hba * phba,u32 ms,u64 total)1912 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1913 {
1914 union lpfc_wqe128 *wqe;
1915 struct lpfc_iocbq *sync_buf;
1916 unsigned long iflags;
1917 u32 ret_val;
1918 u32 atot, wtot, max;
1919 u16 warn_sync_period = 0;
1920
1921 /* First address any alarm / warning activity */
1922 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1923 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1924
1925 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1926 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1927 phba->link_state == LPFC_LINK_DOWN)
1928 return 0;
1929
1930 spin_lock_irqsave(&phba->hbalock, iflags);
1931 sync_buf = __lpfc_sli_get_iocbq(phba);
1932 if (!sync_buf) {
1933 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1934 "6244 No available WQEs for CMF_SYNC_WQE\n");
1935 ret_val = ENOMEM;
1936 goto out_unlock;
1937 }
1938
1939 wqe = &sync_buf->wqe;
1940
1941 /* WQEs are reused. Clear stale data and set key fields to zero */
1942 memset(wqe, 0, sizeof(*wqe));
1943
1944 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1945 if (!ms) {
1946 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1947 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1948 phba->fc_eventTag);
1949 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1950 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1951 goto initpath;
1952 }
1953
1954 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1955 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1956
1957 /* Check for alarms / warnings */
1958 if (atot) {
1959 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1960 /* We hit an Signal alarm condition */
1961 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1962 } else {
1963 /* We hit a FPIN alarm condition */
1964 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1965 }
1966 } else if (wtot) {
1967 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1968 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1969 /* We hit an Signal warning condition */
1970 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1971 lpfc_acqe_cgn_frequency;
1972 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1973 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1974 warn_sync_period = lpfc_acqe_cgn_frequency;
1975 } else {
1976 /* We hit a FPIN warning condition */
1977 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1978 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1979 if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1980 warn_sync_period =
1981 LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
1982 }
1983 }
1984
1985 /* Update total read blocks during previous timer interval */
1986 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1987
1988 initpath:
1989 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1990 wqe->cmf_sync.event_tag = phba->fc_eventTag;
1991 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1992
1993 /* Setup reqtag to match the wqe completion. */
1994 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1995
1996 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
1997 bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
1998
1999 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2000 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2001 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2002
2003 sync_buf->vport = phba->pport;
2004 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2005 sync_buf->cmd_dmabuf = NULL;
2006 sync_buf->rsp_dmabuf = NULL;
2007 sync_buf->bpl_dmabuf = NULL;
2008 sync_buf->sli4_xritag = NO_XRI;
2009
2010 sync_buf->cmd_flag |= LPFC_IO_CMF;
2011 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2012 if (ret_val) {
2013 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2014 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2015 ret_val);
2016 __lpfc_sli_release_iocbq(phba, sync_buf);
2017 }
2018 out_unlock:
2019 spin_unlock_irqrestore(&phba->hbalock, iflags);
2020 return ret_val;
2021 }
2022
2023 /**
2024 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2025 * @phba: Pointer to HBA context object.
2026 * @pring: Pointer to driver SLI ring object.
2027 *
2028 * This function is called with hbalock held and the caller must post the
2029 * iocb without releasing the lock. If the caller releases the lock,
2030 * iocb slot returned by the function is not guaranteed to be available.
2031 * The function returns pointer to the next available iocb slot if there
2032 * is available slot in the ring, else it returns NULL.
2033 * If the get index of the ring is ahead of the put index, the function
2034 * will post an error attention event to the worker thread to take the
2035 * HBA to offline state.
2036 **/
2037 static IOCB_t *
lpfc_sli_next_iocb_slot(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2038 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2039 {
2040 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2041 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2042
2043 lockdep_assert_held(&phba->hbalock);
2044
2045 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2046 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2047 pring->sli.sli3.next_cmdidx = 0;
2048
2049 if (unlikely(pring->sli.sli3.local_getidx ==
2050 pring->sli.sli3.next_cmdidx)) {
2051
2052 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2053
2054 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2055 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2056 "0315 Ring %d issue: portCmdGet %d "
2057 "is bigger than cmd ring %d\n",
2058 pring->ringno,
2059 pring->sli.sli3.local_getidx,
2060 max_cmd_idx);
2061
2062 phba->link_state = LPFC_HBA_ERROR;
2063 /*
2064 * All error attention handlers are posted to
2065 * worker thread
2066 */
2067 phba->work_ha |= HA_ERATT;
2068 phba->work_hs = HS_FFER3;
2069
2070 lpfc_worker_wake_up(phba);
2071
2072 return NULL;
2073 }
2074
2075 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2076 return NULL;
2077 }
2078
2079 return lpfc_cmd_iocb(phba, pring);
2080 }
2081
2082 /**
2083 * lpfc_sli_next_iotag - Get an iotag for the iocb
2084 * @phba: Pointer to HBA context object.
2085 * @iocbq: Pointer to driver iocb object.
2086 *
2087 * This function gets an iotag for the iocb. If there is no unused iotag and
2088 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2089 * array and assigns a new iotag.
2090 * The function returns the allocated iotag if successful, else returns zero.
2091 * Zero is not a valid iotag.
2092 * The caller is not required to hold any lock.
2093 **/
2094 uint16_t
lpfc_sli_next_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)2095 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2096 {
2097 struct lpfc_iocbq **new_arr;
2098 struct lpfc_iocbq **old_arr;
2099 size_t new_len;
2100 struct lpfc_sli *psli = &phba->sli;
2101 uint16_t iotag;
2102
2103 spin_lock_irq(&phba->hbalock);
2104 iotag = psli->last_iotag;
2105 if(++iotag < psli->iocbq_lookup_len) {
2106 psli->last_iotag = iotag;
2107 psli->iocbq_lookup[iotag] = iocbq;
2108 spin_unlock_irq(&phba->hbalock);
2109 iocbq->iotag = iotag;
2110 return iotag;
2111 } else if (psli->iocbq_lookup_len < (0xffff
2112 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2113 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2114 spin_unlock_irq(&phba->hbalock);
2115 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2116 GFP_KERNEL);
2117 if (new_arr) {
2118 spin_lock_irq(&phba->hbalock);
2119 old_arr = psli->iocbq_lookup;
2120 if (new_len <= psli->iocbq_lookup_len) {
2121 /* highly unprobable case */
2122 kfree(new_arr);
2123 iotag = psli->last_iotag;
2124 if(++iotag < psli->iocbq_lookup_len) {
2125 psli->last_iotag = iotag;
2126 psli->iocbq_lookup[iotag] = iocbq;
2127 spin_unlock_irq(&phba->hbalock);
2128 iocbq->iotag = iotag;
2129 return iotag;
2130 }
2131 spin_unlock_irq(&phba->hbalock);
2132 return 0;
2133 }
2134 if (psli->iocbq_lookup)
2135 memcpy(new_arr, old_arr,
2136 ((psli->last_iotag + 1) *
2137 sizeof (struct lpfc_iocbq *)));
2138 psli->iocbq_lookup = new_arr;
2139 psli->iocbq_lookup_len = new_len;
2140 psli->last_iotag = iotag;
2141 psli->iocbq_lookup[iotag] = iocbq;
2142 spin_unlock_irq(&phba->hbalock);
2143 iocbq->iotag = iotag;
2144 kfree(old_arr);
2145 return iotag;
2146 }
2147 } else
2148 spin_unlock_irq(&phba->hbalock);
2149
2150 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2151 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2152 psli->last_iotag);
2153
2154 return 0;
2155 }
2156
2157 /**
2158 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2159 * @phba: Pointer to HBA context object.
2160 * @pring: Pointer to driver SLI ring object.
2161 * @iocb: Pointer to iocb slot in the ring.
2162 * @nextiocb: Pointer to driver iocb object which need to be
2163 * posted to firmware.
2164 *
2165 * This function is called to post a new iocb to the firmware. This
2166 * function copies the new iocb to ring iocb slot and updates the
2167 * ring pointers. It adds the new iocb to txcmplq if there is
2168 * a completion call back for this iocb else the function will free the
2169 * iocb object. The hbalock is asserted held in the code path calling
2170 * this routine.
2171 **/
2172 static void
lpfc_sli_submit_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,IOCB_t * iocb,struct lpfc_iocbq * nextiocb)2173 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2174 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2175 {
2176 /*
2177 * Set up an iotag
2178 */
2179 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2180
2181
2182 if (pring->ringno == LPFC_ELS_RING) {
2183 lpfc_debugfs_slow_ring_trc(phba,
2184 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2185 *(((uint32_t *) &nextiocb->iocb) + 4),
2186 *(((uint32_t *) &nextiocb->iocb) + 6),
2187 *(((uint32_t *) &nextiocb->iocb) + 7));
2188 }
2189
2190 /*
2191 * Issue iocb command to adapter
2192 */
2193 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2194 wmb();
2195 pring->stats.iocb_cmd++;
2196
2197 /*
2198 * If there is no completion routine to call, we can release the
2199 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2200 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2201 */
2202 if (nextiocb->cmd_cmpl)
2203 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2204 else
2205 __lpfc_sli_release_iocbq(phba, nextiocb);
2206
2207 /*
2208 * Let the HBA know what IOCB slot will be the next one the
2209 * driver will put a command into.
2210 */
2211 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2212 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2213 }
2214
2215 /**
2216 * lpfc_sli_update_full_ring - Update the chip attention register
2217 * @phba: Pointer to HBA context object.
2218 * @pring: Pointer to driver SLI ring object.
2219 *
2220 * The caller is not required to hold any lock for calling this function.
2221 * This function updates the chip attention bits for the ring to inform firmware
2222 * that there are pending work to be done for this ring and requests an
2223 * interrupt when there is space available in the ring. This function is
2224 * called when the driver is unable to post more iocbs to the ring due
2225 * to unavailability of space in the ring.
2226 **/
2227 static void
lpfc_sli_update_full_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2228 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2229 {
2230 int ringno = pring->ringno;
2231
2232 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2233
2234 wmb();
2235
2236 /*
2237 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2238 * The HBA will tell us when an IOCB entry is available.
2239 */
2240 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2241 readl(phba->CAregaddr); /* flush */
2242
2243 pring->stats.iocb_cmd_full++;
2244 }
2245
2246 /**
2247 * lpfc_sli_update_ring - Update chip attention register
2248 * @phba: Pointer to HBA context object.
2249 * @pring: Pointer to driver SLI ring object.
2250 *
2251 * This function updates the chip attention register bit for the
2252 * given ring to inform HBA that there is more work to be done
2253 * in this ring. The caller is not required to hold any lock.
2254 **/
2255 static void
lpfc_sli_update_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2256 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2257 {
2258 int ringno = pring->ringno;
2259
2260 /*
2261 * Tell the HBA that there is work to do in this ring.
2262 */
2263 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2264 wmb();
2265 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2266 readl(phba->CAregaddr); /* flush */
2267 }
2268 }
2269
2270 /**
2271 * lpfc_sli_resume_iocb - Process iocbs in the txq
2272 * @phba: Pointer to HBA context object.
2273 * @pring: Pointer to driver SLI ring object.
2274 *
2275 * This function is called with hbalock held to post pending iocbs
2276 * in the txq to the firmware. This function is called when driver
2277 * detects space available in the ring.
2278 **/
2279 static void
lpfc_sli_resume_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2280 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2281 {
2282 IOCB_t *iocb;
2283 struct lpfc_iocbq *nextiocb;
2284
2285 lockdep_assert_held(&phba->hbalock);
2286
2287 /*
2288 * Check to see if:
2289 * (a) there is anything on the txq to send
2290 * (b) link is up
2291 * (c) link attention events can be processed (fcp ring only)
2292 * (d) IOCB processing is not blocked by the outstanding mbox command.
2293 */
2294
2295 if (lpfc_is_link_up(phba) &&
2296 (!list_empty(&pring->txq)) &&
2297 (pring->ringno != LPFC_FCP_RING ||
2298 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2299
2300 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2301 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2302 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2303
2304 if (iocb)
2305 lpfc_sli_update_ring(phba, pring);
2306 else
2307 lpfc_sli_update_full_ring(phba, pring);
2308 }
2309
2310 return;
2311 }
2312
2313 /**
2314 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2315 * @phba: Pointer to HBA context object.
2316 * @hbqno: HBQ number.
2317 *
2318 * This function is called with hbalock held to get the next
2319 * available slot for the given HBQ. If there is free slot
2320 * available for the HBQ it will return pointer to the next available
2321 * HBQ entry else it will return NULL.
2322 **/
2323 static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba * phba,uint32_t hbqno)2324 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2325 {
2326 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2327
2328 lockdep_assert_held(&phba->hbalock);
2329
2330 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2331 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2332 hbqp->next_hbqPutIdx = 0;
2333
2334 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2335 uint32_t raw_index = phba->hbq_get[hbqno];
2336 uint32_t getidx = le32_to_cpu(raw_index);
2337
2338 hbqp->local_hbqGetIdx = getidx;
2339
2340 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2342 "1802 HBQ %d: local_hbqGetIdx "
2343 "%u is > than hbqp->entry_count %u\n",
2344 hbqno, hbqp->local_hbqGetIdx,
2345 hbqp->entry_count);
2346
2347 phba->link_state = LPFC_HBA_ERROR;
2348 return NULL;
2349 }
2350
2351 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2352 return NULL;
2353 }
2354
2355 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2356 hbqp->hbqPutIdx;
2357 }
2358
2359 /**
2360 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2361 * @phba: Pointer to HBA context object.
2362 *
2363 * This function is called with no lock held to free all the
2364 * hbq buffers while uninitializing the SLI interface. It also
2365 * frees the HBQ buffers returned by the firmware but not yet
2366 * processed by the upper layers.
2367 **/
2368 void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba * phba)2369 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2370 {
2371 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2372 struct hbq_dmabuf *hbq_buf;
2373 unsigned long flags;
2374 int i, hbq_count;
2375
2376 hbq_count = lpfc_sli_hbq_count();
2377 /* Return all memory used by all HBQs */
2378 spin_lock_irqsave(&phba->hbalock, flags);
2379 for (i = 0; i < hbq_count; ++i) {
2380 list_for_each_entry_safe(dmabuf, next_dmabuf,
2381 &phba->hbqs[i].hbq_buffer_list, list) {
2382 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2383 list_del(&hbq_buf->dbuf.list);
2384 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2385 }
2386 phba->hbqs[i].buffer_count = 0;
2387 }
2388
2389 /* Mark the HBQs not in use */
2390 phba->hbq_in_use = 0;
2391 spin_unlock_irqrestore(&phba->hbalock, flags);
2392 }
2393
2394 /**
2395 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2396 * @phba: Pointer to HBA context object.
2397 * @hbqno: HBQ number.
2398 * @hbq_buf: Pointer to HBQ buffer.
2399 *
2400 * This function is called with the hbalock held to post a
2401 * hbq buffer to the firmware. If the function finds an empty
2402 * slot in the HBQ, it will post the buffer. The function will return
2403 * pointer to the hbq entry if it successfully post the buffer
2404 * else it will return NULL.
2405 **/
2406 static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2407 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2408 struct hbq_dmabuf *hbq_buf)
2409 {
2410 lockdep_assert_held(&phba->hbalock);
2411 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2412 }
2413
2414 /**
2415 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2416 * @phba: Pointer to HBA context object.
2417 * @hbqno: HBQ number.
2418 * @hbq_buf: Pointer to HBQ buffer.
2419 *
2420 * This function is called with the hbalock held to post a hbq buffer to the
2421 * firmware. If the function finds an empty slot in the HBQ, it will post the
2422 * buffer and place it on the hbq_buffer_list. The function will return zero if
2423 * it successfully post the buffer else it will return an error.
2424 **/
2425 static int
lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2426 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2427 struct hbq_dmabuf *hbq_buf)
2428 {
2429 struct lpfc_hbq_entry *hbqe;
2430 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2431
2432 lockdep_assert_held(&phba->hbalock);
2433 /* Get next HBQ entry slot to use */
2434 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2435 if (hbqe) {
2436 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2437
2438 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2439 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2440 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2441 hbqe->bde.tus.f.bdeFlags = 0;
2442 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2443 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2444 /* Sync SLIM */
2445 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2446 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2447 /* flush */
2448 readl(phba->hbq_put + hbqno);
2449 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2450 return 0;
2451 } else
2452 return -ENOMEM;
2453 }
2454
2455 /**
2456 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2457 * @phba: Pointer to HBA context object.
2458 * @hbqno: HBQ number.
2459 * @hbq_buf: Pointer to HBQ buffer.
2460 *
2461 * This function is called with the hbalock held to post an RQE to the SLI4
2462 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2463 * the hbq_buffer_list and return zero, otherwise it will return an error.
2464 **/
2465 static int
lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2466 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2467 struct hbq_dmabuf *hbq_buf)
2468 {
2469 int rc;
2470 struct lpfc_rqe hrqe;
2471 struct lpfc_rqe drqe;
2472 struct lpfc_queue *hrq;
2473 struct lpfc_queue *drq;
2474
2475 if (hbqno != LPFC_ELS_HBQ)
2476 return 1;
2477 hrq = phba->sli4_hba.hdr_rq;
2478 drq = phba->sli4_hba.dat_rq;
2479
2480 lockdep_assert_held(&phba->hbalock);
2481 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2482 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2483 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2484 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2485 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2486 if (rc < 0)
2487 return rc;
2488 hbq_buf->tag = (rc | (hbqno << 16));
2489 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2490 return 0;
2491 }
2492
2493 /* HBQ for ELS and CT traffic. */
2494 static struct lpfc_hbq_init lpfc_els_hbq = {
2495 .rn = 1,
2496 .entry_count = 256,
2497 .mask_count = 0,
2498 .profile = 0,
2499 .ring_mask = (1 << LPFC_ELS_RING),
2500 .buffer_count = 0,
2501 .init_count = 40,
2502 .add_count = 40,
2503 };
2504
2505 /* Array of HBQs */
2506 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2507 &lpfc_els_hbq,
2508 };
2509
2510 /**
2511 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2512 * @phba: Pointer to HBA context object.
2513 * @hbqno: HBQ number.
2514 * @count: Number of HBQ buffers to be posted.
2515 *
2516 * This function is called with no lock held to post more hbq buffers to the
2517 * given HBQ. The function returns the number of HBQ buffers successfully
2518 * posted.
2519 **/
2520 static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba * phba,uint32_t hbqno,uint32_t count)2521 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2522 {
2523 uint32_t i, posted = 0;
2524 unsigned long flags;
2525 struct hbq_dmabuf *hbq_buffer;
2526 LIST_HEAD(hbq_buf_list);
2527 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2528 return 0;
2529
2530 if ((phba->hbqs[hbqno].buffer_count + count) >
2531 lpfc_hbq_defs[hbqno]->entry_count)
2532 count = lpfc_hbq_defs[hbqno]->entry_count -
2533 phba->hbqs[hbqno].buffer_count;
2534 if (!count)
2535 return 0;
2536 /* Allocate HBQ entries */
2537 for (i = 0; i < count; i++) {
2538 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2539 if (!hbq_buffer)
2540 break;
2541 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2542 }
2543 /* Check whether HBQ is still in use */
2544 spin_lock_irqsave(&phba->hbalock, flags);
2545 if (!phba->hbq_in_use)
2546 goto err;
2547 while (!list_empty(&hbq_buf_list)) {
2548 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2549 dbuf.list);
2550 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2551 (hbqno << 16));
2552 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2553 phba->hbqs[hbqno].buffer_count++;
2554 posted++;
2555 } else
2556 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2557 }
2558 spin_unlock_irqrestore(&phba->hbalock, flags);
2559 return posted;
2560 err:
2561 spin_unlock_irqrestore(&phba->hbalock, flags);
2562 while (!list_empty(&hbq_buf_list)) {
2563 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2564 dbuf.list);
2565 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2566 }
2567 return 0;
2568 }
2569
2570 /**
2571 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2572 * @phba: Pointer to HBA context object.
2573 * @qno: HBQ number.
2574 *
2575 * This function posts more buffers to the HBQ. This function
2576 * is called with no lock held. The function returns the number of HBQ entries
2577 * successfully allocated.
2578 **/
2579 int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba * phba,uint32_t qno)2580 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2581 {
2582 if (phba->sli_rev == LPFC_SLI_REV4)
2583 return 0;
2584 else
2585 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2586 lpfc_hbq_defs[qno]->add_count);
2587 }
2588
2589 /**
2590 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2591 * @phba: Pointer to HBA context object.
2592 * @qno: HBQ queue number.
2593 *
2594 * This function is called from SLI initialization code path with
2595 * no lock held to post initial HBQ buffers to firmware. The
2596 * function returns the number of HBQ entries successfully allocated.
2597 **/
2598 static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba * phba,uint32_t qno)2599 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2600 {
2601 if (phba->sli_rev == LPFC_SLI_REV4)
2602 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2603 lpfc_hbq_defs[qno]->entry_count);
2604 else
2605 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2606 lpfc_hbq_defs[qno]->init_count);
2607 }
2608
2609 /*
2610 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2611 *
2612 * This function removes the first hbq buffer on an hbq list and returns a
2613 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2614 **/
2615 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_get(struct list_head * rb_list)2616 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2617 {
2618 struct lpfc_dmabuf *d_buf;
2619
2620 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2621 if (!d_buf)
2622 return NULL;
2623 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2624 }
2625
2626 /**
2627 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2628 * @phba: Pointer to HBA context object.
2629 * @hrq: HBQ number.
2630 *
2631 * This function removes the first RQ buffer on an RQ buffer list and returns a
2632 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2633 **/
2634 static struct rqb_dmabuf *
lpfc_sli_rqbuf_get(struct lpfc_hba * phba,struct lpfc_queue * hrq)2635 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2636 {
2637 struct lpfc_dmabuf *h_buf;
2638 struct lpfc_rqb *rqbp;
2639
2640 rqbp = hrq->rqbp;
2641 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2642 struct lpfc_dmabuf, list);
2643 if (!h_buf)
2644 return NULL;
2645 rqbp->buffer_count--;
2646 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2647 }
2648
2649 /**
2650 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2651 * @phba: Pointer to HBA context object.
2652 * @tag: Tag of the hbq buffer.
2653 *
2654 * This function searches for the hbq buffer associated with the given tag in
2655 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2656 * otherwise it returns NULL.
2657 **/
2658 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba * phba,uint32_t tag)2659 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2660 {
2661 struct lpfc_dmabuf *d_buf;
2662 struct hbq_dmabuf *hbq_buf;
2663 uint32_t hbqno;
2664
2665 hbqno = tag >> 16;
2666 if (hbqno >= LPFC_MAX_HBQS)
2667 return NULL;
2668
2669 spin_lock_irq(&phba->hbalock);
2670 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2671 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2672 if (hbq_buf->tag == tag) {
2673 spin_unlock_irq(&phba->hbalock);
2674 return hbq_buf;
2675 }
2676 }
2677 spin_unlock_irq(&phba->hbalock);
2678 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2679 "1803 Bad hbq tag. Data: x%x x%x\n",
2680 tag, phba->hbqs[tag >> 16].buffer_count);
2681 return NULL;
2682 }
2683
2684 /**
2685 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2686 * @phba: Pointer to HBA context object.
2687 * @hbq_buffer: Pointer to HBQ buffer.
2688 *
2689 * This function is called with hbalock. This function gives back
2690 * the hbq buffer to firmware. If the HBQ does not have space to
2691 * post the buffer, it will free the buffer.
2692 **/
2693 void
lpfc_sli_free_hbq(struct lpfc_hba * phba,struct hbq_dmabuf * hbq_buffer)2694 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2695 {
2696 uint32_t hbqno;
2697
2698 if (hbq_buffer) {
2699 hbqno = hbq_buffer->tag >> 16;
2700 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2701 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2702 }
2703 }
2704
2705 /**
2706 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2707 * @mbxCommand: mailbox command code.
2708 *
2709 * This function is called by the mailbox event handler function to verify
2710 * that the completed mailbox command is a legitimate mailbox command. If the
2711 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2712 * and the mailbox event handler will take the HBA offline.
2713 **/
2714 static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)2715 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2716 {
2717 uint8_t ret;
2718
2719 switch (mbxCommand) {
2720 case MBX_LOAD_SM:
2721 case MBX_READ_NV:
2722 case MBX_WRITE_NV:
2723 case MBX_WRITE_VPARMS:
2724 case MBX_RUN_BIU_DIAG:
2725 case MBX_INIT_LINK:
2726 case MBX_DOWN_LINK:
2727 case MBX_CONFIG_LINK:
2728 case MBX_CONFIG_RING:
2729 case MBX_RESET_RING:
2730 case MBX_READ_CONFIG:
2731 case MBX_READ_RCONFIG:
2732 case MBX_READ_SPARM:
2733 case MBX_READ_STATUS:
2734 case MBX_READ_RPI:
2735 case MBX_READ_XRI:
2736 case MBX_READ_REV:
2737 case MBX_READ_LNK_STAT:
2738 case MBX_REG_LOGIN:
2739 case MBX_UNREG_LOGIN:
2740 case MBX_CLEAR_LA:
2741 case MBX_DUMP_MEMORY:
2742 case MBX_DUMP_CONTEXT:
2743 case MBX_RUN_DIAGS:
2744 case MBX_RESTART:
2745 case MBX_UPDATE_CFG:
2746 case MBX_DOWN_LOAD:
2747 case MBX_DEL_LD_ENTRY:
2748 case MBX_RUN_PROGRAM:
2749 case MBX_SET_MASK:
2750 case MBX_SET_VARIABLE:
2751 case MBX_UNREG_D_ID:
2752 case MBX_KILL_BOARD:
2753 case MBX_CONFIG_FARP:
2754 case MBX_BEACON:
2755 case MBX_LOAD_AREA:
2756 case MBX_RUN_BIU_DIAG64:
2757 case MBX_CONFIG_PORT:
2758 case MBX_READ_SPARM64:
2759 case MBX_READ_RPI64:
2760 case MBX_REG_LOGIN64:
2761 case MBX_READ_TOPOLOGY:
2762 case MBX_WRITE_WWN:
2763 case MBX_SET_DEBUG:
2764 case MBX_LOAD_EXP_ROM:
2765 case MBX_ASYNCEVT_ENABLE:
2766 case MBX_REG_VPI:
2767 case MBX_UNREG_VPI:
2768 case MBX_HEARTBEAT:
2769 case MBX_PORT_CAPABILITIES:
2770 case MBX_PORT_IOV_CONTROL:
2771 case MBX_SLI4_CONFIG:
2772 case MBX_SLI4_REQ_FTRS:
2773 case MBX_REG_FCFI:
2774 case MBX_UNREG_FCFI:
2775 case MBX_REG_VFI:
2776 case MBX_UNREG_VFI:
2777 case MBX_INIT_VPI:
2778 case MBX_INIT_VFI:
2779 case MBX_RESUME_RPI:
2780 case MBX_READ_EVENT_LOG_STATUS:
2781 case MBX_READ_EVENT_LOG:
2782 case MBX_SECURITY_MGMT:
2783 case MBX_AUTH_PORT:
2784 case MBX_ACCESS_VDATA:
2785 ret = mbxCommand;
2786 break;
2787 default:
2788 ret = MBX_SHUTDOWN;
2789 break;
2790 }
2791 return ret;
2792 }
2793
2794 /**
2795 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2796 * @phba: Pointer to HBA context object.
2797 * @pmboxq: Pointer to mailbox command.
2798 *
2799 * This is completion handler function for mailbox commands issued from
2800 * lpfc_sli_issue_mbox_wait function. This function is called by the
2801 * mailbox event handler function with no lock held. This function
2802 * will wake up thread waiting on the wait queue pointed by context1
2803 * of the mailbox.
2804 **/
2805 void
lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)2806 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2807 {
2808 unsigned long drvr_flag;
2809 struct completion *pmbox_done;
2810
2811 /*
2812 * If pmbox_done is empty, the driver thread gave up waiting and
2813 * continued running.
2814 */
2815 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2816 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2817 pmbox_done = (struct completion *)pmboxq->context3;
2818 if (pmbox_done)
2819 complete(pmbox_done);
2820 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2821 return;
2822 }
2823
2824 static void
__lpfc_sli_rpi_release(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2825 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2826 {
2827 unsigned long iflags;
2828
2829 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2830 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2831 spin_lock_irqsave(&ndlp->lock, iflags);
2832 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2833 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2834 spin_unlock_irqrestore(&ndlp->lock, iflags);
2835 }
2836 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2837 }
2838
2839 void
lpfc_sli_rpi_release(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2840 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2841 {
2842 __lpfc_sli_rpi_release(vport, ndlp);
2843 }
2844
2845 /**
2846 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2847 * @phba: Pointer to HBA context object.
2848 * @pmb: Pointer to mailbox object.
2849 *
2850 * This function is the default mailbox completion handler. It
2851 * frees the memory resources associated with the completed mailbox
2852 * command. If the completed command is a REG_LOGIN mailbox command,
2853 * this function will issue a UREG_LOGIN to re-claim the RPI.
2854 **/
2855 void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2856 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2857 {
2858 struct lpfc_vport *vport = pmb->vport;
2859 struct lpfc_dmabuf *mp;
2860 struct lpfc_nodelist *ndlp;
2861 struct Scsi_Host *shost;
2862 uint16_t rpi, vpi;
2863 int rc;
2864
2865 /*
2866 * If a REG_LOGIN succeeded after node is destroyed or node
2867 * is in re-discovery driver need to cleanup the RPI.
2868 */
2869 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2870 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2871 !pmb->u.mb.mbxStatus) {
2872 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
2873 if (mp) {
2874 pmb->ctx_buf = NULL;
2875 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2876 kfree(mp);
2877 }
2878 rpi = pmb->u.mb.un.varWords[0];
2879 vpi = pmb->u.mb.un.varRegLogin.vpi;
2880 if (phba->sli_rev == LPFC_SLI_REV4)
2881 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2882 lpfc_unreg_login(phba, vpi, rpi, pmb);
2883 pmb->vport = vport;
2884 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2885 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2886 if (rc != MBX_NOT_FINISHED)
2887 return;
2888 }
2889
2890 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2891 !(phba->pport->load_flag & FC_UNLOADING) &&
2892 !pmb->u.mb.mbxStatus) {
2893 shost = lpfc_shost_from_vport(vport);
2894 spin_lock_irq(shost->host_lock);
2895 vport->vpi_state |= LPFC_VPI_REGISTERED;
2896 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2897 spin_unlock_irq(shost->host_lock);
2898 }
2899
2900 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2901 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2902 lpfc_nlp_put(ndlp);
2903 }
2904
2905 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2906 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2907
2908 /* Check to see if there are any deferred events to process */
2909 if (ndlp) {
2910 lpfc_printf_vlog(
2911 vport,
2912 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2913 "1438 UNREG cmpl deferred mbox x%x "
2914 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2915 ndlp->nlp_rpi, ndlp->nlp_DID,
2916 ndlp->nlp_flag, ndlp->nlp_defer_did,
2917 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2918
2919 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2920 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2921 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2922 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2923 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2924 } else {
2925 __lpfc_sli_rpi_release(vport, ndlp);
2926 }
2927
2928 /* The unreg_login mailbox is complete and had a
2929 * reference that has to be released. The PLOGI
2930 * got its own ref.
2931 */
2932 lpfc_nlp_put(ndlp);
2933 pmb->ctx_ndlp = NULL;
2934 }
2935 }
2936
2937 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2938 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2939 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2940 lpfc_nlp_put(ndlp);
2941 }
2942
2943 /* Check security permission status on INIT_LINK mailbox command */
2944 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2945 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2947 "2860 SLI authentication is required "
2948 "for INIT_LINK but has not done yet\n");
2949
2950 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2951 lpfc_sli4_mbox_cmd_free(phba, pmb);
2952 else
2953 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2954 }
2955 /**
2956 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2957 * @phba: Pointer to HBA context object.
2958 * @pmb: Pointer to mailbox object.
2959 *
2960 * This function is the unreg rpi mailbox completion handler. It
2961 * frees the memory resources associated with the completed mailbox
2962 * command. An additional reference is put on the ndlp to prevent
2963 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2964 * the unreg mailbox command completes, this routine puts the
2965 * reference back.
2966 *
2967 **/
2968 void
lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2969 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2970 {
2971 struct lpfc_vport *vport = pmb->vport;
2972 struct lpfc_nodelist *ndlp;
2973
2974 ndlp = pmb->ctx_ndlp;
2975 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2976 if (phba->sli_rev == LPFC_SLI_REV4 &&
2977 (bf_get(lpfc_sli_intf_if_type,
2978 &phba->sli4_hba.sli_intf) >=
2979 LPFC_SLI_INTF_IF_TYPE_2)) {
2980 if (ndlp) {
2981 lpfc_printf_vlog(
2982 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2983 "0010 UNREG_LOGIN vpi:%x "
2984 "rpi:%x DID:%x defer x%x flg x%x "
2985 "x%px\n",
2986 vport->vpi, ndlp->nlp_rpi,
2987 ndlp->nlp_DID, ndlp->nlp_defer_did,
2988 ndlp->nlp_flag,
2989 ndlp);
2990 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2991
2992 /* Check to see if there are any deferred
2993 * events to process
2994 */
2995 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2996 (ndlp->nlp_defer_did !=
2997 NLP_EVT_NOTHING_PENDING)) {
2998 lpfc_printf_vlog(
2999 vport, KERN_INFO, LOG_DISCOVERY,
3000 "4111 UNREG cmpl deferred "
3001 "clr x%x on "
3002 "NPort x%x Data: x%x x%px\n",
3003 ndlp->nlp_rpi, ndlp->nlp_DID,
3004 ndlp->nlp_defer_did, ndlp);
3005 ndlp->nlp_flag &= ~NLP_UNREG_INP;
3006 ndlp->nlp_defer_did =
3007 NLP_EVT_NOTHING_PENDING;
3008 lpfc_issue_els_plogi(
3009 vport, ndlp->nlp_DID, 0);
3010 } else {
3011 __lpfc_sli_rpi_release(vport, ndlp);
3012 }
3013 lpfc_nlp_put(ndlp);
3014 }
3015 }
3016 }
3017
3018 mempool_free(pmb, phba->mbox_mem_pool);
3019 }
3020
3021 /**
3022 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3023 * @phba: Pointer to HBA context object.
3024 *
3025 * This function is called with no lock held. This function processes all
3026 * the completed mailbox commands and gives it to upper layers. The interrupt
3027 * service routine processes mailbox completion interrupt and adds completed
3028 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3029 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3030 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3031 * function returns the mailbox commands to the upper layer by calling the
3032 * completion handler function of each mailbox.
3033 **/
3034 int
lpfc_sli_handle_mb_event(struct lpfc_hba * phba)3035 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3036 {
3037 MAILBOX_t *pmbox;
3038 LPFC_MBOXQ_t *pmb;
3039 int rc;
3040 LIST_HEAD(cmplq);
3041
3042 phba->sli.slistat.mbox_event++;
3043
3044 /* Get all completed mailboxe buffers into the cmplq */
3045 spin_lock_irq(&phba->hbalock);
3046 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3047 spin_unlock_irq(&phba->hbalock);
3048
3049 /* Get a Mailbox buffer to setup mailbox commands for callback */
3050 do {
3051 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3052 if (pmb == NULL)
3053 break;
3054
3055 pmbox = &pmb->u.mb;
3056
3057 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3058 if (pmb->vport) {
3059 lpfc_debugfs_disc_trc(pmb->vport,
3060 LPFC_DISC_TRC_MBOX_VPORT,
3061 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3062 (uint32_t)pmbox->mbxCommand,
3063 pmbox->un.varWords[0],
3064 pmbox->un.varWords[1]);
3065 }
3066 else {
3067 lpfc_debugfs_disc_trc(phba->pport,
3068 LPFC_DISC_TRC_MBOX,
3069 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3070 (uint32_t)pmbox->mbxCommand,
3071 pmbox->un.varWords[0],
3072 pmbox->un.varWords[1]);
3073 }
3074 }
3075
3076 /*
3077 * It is a fatal error if unknown mbox command completion.
3078 */
3079 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3080 MBX_SHUTDOWN) {
3081 /* Unknown mailbox command compl */
3082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3083 "(%d):0323 Unknown Mailbox command "
3084 "x%x (x%x/x%x) Cmpl\n",
3085 pmb->vport ? pmb->vport->vpi :
3086 LPFC_VPORT_UNKNOWN,
3087 pmbox->mbxCommand,
3088 lpfc_sli_config_mbox_subsys_get(phba,
3089 pmb),
3090 lpfc_sli_config_mbox_opcode_get(phba,
3091 pmb));
3092 phba->link_state = LPFC_HBA_ERROR;
3093 phba->work_hs = HS_FFER3;
3094 lpfc_handle_eratt(phba);
3095 continue;
3096 }
3097
3098 if (pmbox->mbxStatus) {
3099 phba->sli.slistat.mbox_stat_err++;
3100 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3101 /* Mbox cmd cmpl error - RETRYing */
3102 lpfc_printf_log(phba, KERN_INFO,
3103 LOG_MBOX | LOG_SLI,
3104 "(%d):0305 Mbox cmd cmpl "
3105 "error - RETRYing Data: x%x "
3106 "(x%x/x%x) x%x x%x x%x\n",
3107 pmb->vport ? pmb->vport->vpi :
3108 LPFC_VPORT_UNKNOWN,
3109 pmbox->mbxCommand,
3110 lpfc_sli_config_mbox_subsys_get(phba,
3111 pmb),
3112 lpfc_sli_config_mbox_opcode_get(phba,
3113 pmb),
3114 pmbox->mbxStatus,
3115 pmbox->un.varWords[0],
3116 pmb->vport ? pmb->vport->port_state :
3117 LPFC_VPORT_UNKNOWN);
3118 pmbox->mbxStatus = 0;
3119 pmbox->mbxOwner = OWN_HOST;
3120 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3121 if (rc != MBX_NOT_FINISHED)
3122 continue;
3123 }
3124 }
3125
3126 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3127 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3128 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3129 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3130 "x%x x%x x%x\n",
3131 pmb->vport ? pmb->vport->vpi : 0,
3132 pmbox->mbxCommand,
3133 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3134 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3135 pmb->mbox_cmpl,
3136 *((uint32_t *) pmbox),
3137 pmbox->un.varWords[0],
3138 pmbox->un.varWords[1],
3139 pmbox->un.varWords[2],
3140 pmbox->un.varWords[3],
3141 pmbox->un.varWords[4],
3142 pmbox->un.varWords[5],
3143 pmbox->un.varWords[6],
3144 pmbox->un.varWords[7],
3145 pmbox->un.varWords[8],
3146 pmbox->un.varWords[9],
3147 pmbox->un.varWords[10]);
3148
3149 if (pmb->mbox_cmpl)
3150 pmb->mbox_cmpl(phba,pmb);
3151 } while (1);
3152 return 0;
3153 }
3154
3155 /**
3156 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3157 * @phba: Pointer to HBA context object.
3158 * @pring: Pointer to driver SLI ring object.
3159 * @tag: buffer tag.
3160 *
3161 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3162 * is set in the tag the buffer is posted for a particular exchange,
3163 * the function will return the buffer without replacing the buffer.
3164 * If the buffer is for unsolicited ELS or CT traffic, this function
3165 * returns the buffer and also posts another buffer to the firmware.
3166 **/
3167 static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)3168 lpfc_sli_get_buff(struct lpfc_hba *phba,
3169 struct lpfc_sli_ring *pring,
3170 uint32_t tag)
3171 {
3172 struct hbq_dmabuf *hbq_entry;
3173
3174 if (tag & QUE_BUFTAG_BIT)
3175 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3176 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3177 if (!hbq_entry)
3178 return NULL;
3179 return &hbq_entry->dbuf;
3180 }
3181
3182 /**
3183 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3184 * containing a NVME LS request.
3185 * @phba: pointer to lpfc hba data structure.
3186 * @piocb: pointer to the iocbq struct representing the sequence starting
3187 * frame.
3188 *
3189 * This routine initially validates the NVME LS, validates there is a login
3190 * with the port that sent the LS, and then calls the appropriate nvme host
3191 * or target LS request handler.
3192 **/
3193 static void
lpfc_nvme_unsol_ls_handler(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)3194 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3195 {
3196 struct lpfc_nodelist *ndlp;
3197 struct lpfc_dmabuf *d_buf;
3198 struct hbq_dmabuf *nvmebuf;
3199 struct fc_frame_header *fc_hdr;
3200 struct lpfc_async_xchg_ctx *axchg = NULL;
3201 char *failwhy = NULL;
3202 uint32_t oxid, sid, did, fctl, size;
3203 int ret = 1;
3204
3205 d_buf = piocb->cmd_dmabuf;
3206
3207 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3208 fc_hdr = nvmebuf->hbuf.virt;
3209 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3210 sid = sli4_sid_from_fc_hdr(fc_hdr);
3211 did = sli4_did_from_fc_hdr(fc_hdr);
3212 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3213 fc_hdr->fh_f_ctl[1] << 8 |
3214 fc_hdr->fh_f_ctl[2]);
3215 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3216
3217 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3218 oxid, size, sid);
3219
3220 if (phba->pport->load_flag & FC_UNLOADING) {
3221 failwhy = "Driver Unloading";
3222 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3223 failwhy = "NVME FC4 Disabled";
3224 } else if (!phba->nvmet_support && !phba->pport->localport) {
3225 failwhy = "No Localport";
3226 } else if (phba->nvmet_support && !phba->targetport) {
3227 failwhy = "No Targetport";
3228 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3229 failwhy = "Bad NVME LS R_CTL";
3230 } else if (unlikely((fctl & 0x00FF0000) !=
3231 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3232 failwhy = "Bad NVME LS F_CTL";
3233 } else {
3234 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3235 if (!axchg)
3236 failwhy = "No CTX memory";
3237 }
3238
3239 if (unlikely(failwhy)) {
3240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3241 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3242 sid, oxid, failwhy);
3243 goto out_fail;
3244 }
3245
3246 /* validate the source of the LS is logged in */
3247 ndlp = lpfc_findnode_did(phba->pport, sid);
3248 if (!ndlp ||
3249 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3250 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3251 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3252 "6216 NVME Unsol rcv: No ndlp: "
3253 "NPort_ID x%x oxid x%x\n",
3254 sid, oxid);
3255 goto out_fail;
3256 }
3257
3258 axchg->phba = phba;
3259 axchg->ndlp = ndlp;
3260 axchg->size = size;
3261 axchg->oxid = oxid;
3262 axchg->sid = sid;
3263 axchg->wqeq = NULL;
3264 axchg->state = LPFC_NVME_STE_LS_RCV;
3265 axchg->entry_cnt = 1;
3266 axchg->rqb_buffer = (void *)nvmebuf;
3267 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3268 axchg->payload = nvmebuf->dbuf.virt;
3269 INIT_LIST_HEAD(&axchg->list);
3270
3271 if (phba->nvmet_support) {
3272 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3273 spin_lock_irq(&ndlp->lock);
3274 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3275 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3276 spin_unlock_irq(&ndlp->lock);
3277
3278 /* This reference is a single occurrence to hold the
3279 * node valid until the nvmet transport calls
3280 * host_release.
3281 */
3282 if (!lpfc_nlp_get(ndlp))
3283 goto out_fail;
3284
3285 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3286 "6206 NVMET unsol ls_req ndlp x%px "
3287 "DID x%x xflags x%x refcnt %d\n",
3288 ndlp, ndlp->nlp_DID,
3289 ndlp->fc4_xpt_flags,
3290 kref_read(&ndlp->kref));
3291 } else {
3292 spin_unlock_irq(&ndlp->lock);
3293 }
3294 } else {
3295 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3296 }
3297
3298 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3299 if (!ret)
3300 return;
3301
3302 out_fail:
3303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3304 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3305 "NVMe%s handler failed %d\n",
3306 did, sid, oxid,
3307 (phba->nvmet_support) ? "T" : "I", ret);
3308
3309 /* recycle receive buffer */
3310 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3311
3312 /* If start of new exchange, abort it */
3313 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3314 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3315
3316 if (ret)
3317 kfree(axchg);
3318 }
3319
3320 /**
3321 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3322 * @phba: Pointer to HBA context object.
3323 * @pring: Pointer to driver SLI ring object.
3324 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3325 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3326 * @fch_type: the type for the first frame of the sequence.
3327 *
3328 * This function is called with no lock held. This function uses the r_ctl and
3329 * type of the received sequence to find the correct callback function to call
3330 * to process the sequence.
3331 **/
3332 static int
lpfc_complete_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq,uint32_t fch_r_ctl,uint32_t fch_type)3333 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3334 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3335 uint32_t fch_type)
3336 {
3337 int i;
3338
3339 switch (fch_type) {
3340 case FC_TYPE_NVME:
3341 lpfc_nvme_unsol_ls_handler(phba, saveq);
3342 return 1;
3343 default:
3344 break;
3345 }
3346
3347 /* unSolicited Responses */
3348 if (pring->prt[0].profile) {
3349 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3350 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3351 saveq);
3352 return 1;
3353 }
3354 /* We must search, based on rctl / type
3355 for the right routine */
3356 for (i = 0; i < pring->num_mask; i++) {
3357 if ((pring->prt[i].rctl == fch_r_ctl) &&
3358 (pring->prt[i].type == fch_type)) {
3359 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3360 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3361 (phba, pring, saveq);
3362 return 1;
3363 }
3364 }
3365 return 0;
3366 }
3367
3368 static void
lpfc_sli_prep_unsol_wqe(struct lpfc_hba * phba,struct lpfc_iocbq * saveq)3369 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3370 struct lpfc_iocbq *saveq)
3371 {
3372 IOCB_t *irsp;
3373 union lpfc_wqe128 *wqe;
3374 u16 i = 0;
3375
3376 irsp = &saveq->iocb;
3377 wqe = &saveq->wqe;
3378
3379 /* Fill wcqe with the IOCB status fields */
3380 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3381 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3382 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3383 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3384
3385 /* Source ID */
3386 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3387
3388 /* rx-id of the response frame */
3389 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3390
3391 /* ox-id of the frame */
3392 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3393 irsp->unsli3.rcvsli3.ox_id);
3394
3395 /* DID */
3396 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3397 irsp->un.rcvels.remoteID);
3398
3399 /* unsol data len */
3400 for (i = 0; i < irsp->ulpBdeCount; i++) {
3401 struct lpfc_hbq_entry *hbqe = NULL;
3402
3403 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3404 if (i == 0) {
3405 hbqe = (struct lpfc_hbq_entry *)
3406 &irsp->un.ulpWord[0];
3407 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3408 hbqe->bde.tus.f.bdeSize;
3409 } else if (i == 1) {
3410 hbqe = (struct lpfc_hbq_entry *)
3411 &irsp->unsli3.sli3Words[4];
3412 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3413 }
3414 }
3415 }
3416 }
3417
3418 /**
3419 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3420 * @phba: Pointer to HBA context object.
3421 * @pring: Pointer to driver SLI ring object.
3422 * @saveq: Pointer to the unsolicited iocb.
3423 *
3424 * This function is called with no lock held by the ring event handler
3425 * when there is an unsolicited iocb posted to the response ring by the
3426 * firmware. This function gets the buffer associated with the iocbs
3427 * and calls the event handler for the ring. This function handles both
3428 * qring buffers and hbq buffers.
3429 * When the function returns 1 the caller can free the iocb object otherwise
3430 * upper layer functions will free the iocb objects.
3431 **/
3432 static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3433 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3434 struct lpfc_iocbq *saveq)
3435 {
3436 IOCB_t * irsp;
3437 WORD5 * w5p;
3438 dma_addr_t paddr;
3439 uint32_t Rctl, Type;
3440 struct lpfc_iocbq *iocbq;
3441 struct lpfc_dmabuf *dmzbuf;
3442
3443 irsp = &saveq->iocb;
3444 saveq->vport = phba->pport;
3445
3446 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3447 if (pring->lpfc_sli_rcv_async_status)
3448 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3449 else
3450 lpfc_printf_log(phba,
3451 KERN_WARNING,
3452 LOG_SLI,
3453 "0316 Ring %d handler: unexpected "
3454 "ASYNC_STATUS iocb received evt_code "
3455 "0x%x\n",
3456 pring->ringno,
3457 irsp->un.asyncstat.evt_code);
3458 return 1;
3459 }
3460
3461 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3462 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3463 if (irsp->ulpBdeCount > 0) {
3464 dmzbuf = lpfc_sli_get_buff(phba, pring,
3465 irsp->un.ulpWord[3]);
3466 lpfc_in_buf_free(phba, dmzbuf);
3467 }
3468
3469 if (irsp->ulpBdeCount > 1) {
3470 dmzbuf = lpfc_sli_get_buff(phba, pring,
3471 irsp->unsli3.sli3Words[3]);
3472 lpfc_in_buf_free(phba, dmzbuf);
3473 }
3474
3475 if (irsp->ulpBdeCount > 2) {
3476 dmzbuf = lpfc_sli_get_buff(phba, pring,
3477 irsp->unsli3.sli3Words[7]);
3478 lpfc_in_buf_free(phba, dmzbuf);
3479 }
3480
3481 return 1;
3482 }
3483
3484 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3485 if (irsp->ulpBdeCount != 0) {
3486 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3487 irsp->un.ulpWord[3]);
3488 if (!saveq->cmd_dmabuf)
3489 lpfc_printf_log(phba,
3490 KERN_ERR,
3491 LOG_SLI,
3492 "0341 Ring %d Cannot find buffer for "
3493 "an unsolicited iocb. tag 0x%x\n",
3494 pring->ringno,
3495 irsp->un.ulpWord[3]);
3496 }
3497 if (irsp->ulpBdeCount == 2) {
3498 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3499 irsp->unsli3.sli3Words[7]);
3500 if (!saveq->bpl_dmabuf)
3501 lpfc_printf_log(phba,
3502 KERN_ERR,
3503 LOG_SLI,
3504 "0342 Ring %d Cannot find buffer for an"
3505 " unsolicited iocb. tag 0x%x\n",
3506 pring->ringno,
3507 irsp->unsli3.sli3Words[7]);
3508 }
3509 list_for_each_entry(iocbq, &saveq->list, list) {
3510 irsp = &iocbq->iocb;
3511 if (irsp->ulpBdeCount != 0) {
3512 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3513 pring,
3514 irsp->un.ulpWord[3]);
3515 if (!iocbq->cmd_dmabuf)
3516 lpfc_printf_log(phba,
3517 KERN_ERR,
3518 LOG_SLI,
3519 "0343 Ring %d Cannot find "
3520 "buffer for an unsolicited iocb"
3521 ". tag 0x%x\n", pring->ringno,
3522 irsp->un.ulpWord[3]);
3523 }
3524 if (irsp->ulpBdeCount == 2) {
3525 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3526 pring,
3527 irsp->unsli3.sli3Words[7]);
3528 if (!iocbq->bpl_dmabuf)
3529 lpfc_printf_log(phba,
3530 KERN_ERR,
3531 LOG_SLI,
3532 "0344 Ring %d Cannot find "
3533 "buffer for an unsolicited "
3534 "iocb. tag 0x%x\n",
3535 pring->ringno,
3536 irsp->unsli3.sli3Words[7]);
3537 }
3538 }
3539 } else {
3540 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3541 irsp->un.cont64[0].addrLow);
3542 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3543 paddr);
3544 if (irsp->ulpBdeCount == 2) {
3545 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3546 irsp->un.cont64[1].addrLow);
3547 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3548 pring,
3549 paddr);
3550 }
3551 }
3552
3553 if (irsp->ulpBdeCount != 0 &&
3554 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3555 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3556 int found = 0;
3557
3558 /* search continue save q for same XRI */
3559 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3560 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3561 saveq->iocb.unsli3.rcvsli3.ox_id) {
3562 list_add_tail(&saveq->list, &iocbq->list);
3563 found = 1;
3564 break;
3565 }
3566 }
3567 if (!found)
3568 list_add_tail(&saveq->clist,
3569 &pring->iocb_continue_saveq);
3570
3571 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3572 list_del_init(&iocbq->clist);
3573 saveq = iocbq;
3574 irsp = &saveq->iocb;
3575 } else {
3576 return 0;
3577 }
3578 }
3579 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3580 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3581 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3582 Rctl = FC_RCTL_ELS_REQ;
3583 Type = FC_TYPE_ELS;
3584 } else {
3585 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3586 Rctl = w5p->hcsw.Rctl;
3587 Type = w5p->hcsw.Type;
3588
3589 /* Firmware Workaround */
3590 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3591 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3592 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3593 Rctl = FC_RCTL_ELS_REQ;
3594 Type = FC_TYPE_ELS;
3595 w5p->hcsw.Rctl = Rctl;
3596 w5p->hcsw.Type = Type;
3597 }
3598 }
3599
3600 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3601 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3602 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3603 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3604 saveq->vport = phba->pport;
3605 else
3606 saveq->vport = lpfc_find_vport_by_vpid(phba,
3607 irsp->unsli3.rcvsli3.vpi);
3608 }
3609
3610 /* Prepare WQE with Unsol frame */
3611 lpfc_sli_prep_unsol_wqe(phba, saveq);
3612
3613 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3614 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3615 "0313 Ring %d handler: unexpected Rctl x%x "
3616 "Type x%x received\n",
3617 pring->ringno, Rctl, Type);
3618
3619 return 1;
3620 }
3621
3622 /**
3623 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3624 * @phba: Pointer to HBA context object.
3625 * @pring: Pointer to driver SLI ring object.
3626 * @prspiocb: Pointer to response iocb object.
3627 *
3628 * This function looks up the iocb_lookup table to get the command iocb
3629 * corresponding to the given response iocb using the iotag of the
3630 * response iocb. The driver calls this function with the hbalock held
3631 * for SLI3 ports or the ring lock held for SLI4 ports.
3632 * This function returns the command iocb object if it finds the command
3633 * iocb else returns NULL.
3634 **/
3635 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * prspiocb)3636 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3637 struct lpfc_sli_ring *pring,
3638 struct lpfc_iocbq *prspiocb)
3639 {
3640 struct lpfc_iocbq *cmd_iocb = NULL;
3641 u16 iotag;
3642
3643 if (phba->sli_rev == LPFC_SLI_REV4)
3644 iotag = get_wqe_reqtag(prspiocb);
3645 else
3646 iotag = prspiocb->iocb.ulpIoTag;
3647
3648 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3649 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3650 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3651 /* remove from txcmpl queue list */
3652 list_del_init(&cmd_iocb->list);
3653 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3654 pring->txcmplq_cnt--;
3655 return cmd_iocb;
3656 }
3657 }
3658
3659 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3660 "0317 iotag x%x is out of "
3661 "range: max iotag x%x\n",
3662 iotag, phba->sli.last_iotag);
3663 return NULL;
3664 }
3665
3666 /**
3667 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3668 * @phba: Pointer to HBA context object.
3669 * @pring: Pointer to driver SLI ring object.
3670 * @iotag: IOCB tag.
3671 *
3672 * This function looks up the iocb_lookup table to get the command iocb
3673 * corresponding to the given iotag. The driver calls this function with
3674 * the ring lock held because this function is an SLI4 port only helper.
3675 * This function returns the command iocb object if it finds the command
3676 * iocb else returns NULL.
3677 **/
3678 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint16_t iotag)3679 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3680 struct lpfc_sli_ring *pring, uint16_t iotag)
3681 {
3682 struct lpfc_iocbq *cmd_iocb = NULL;
3683
3684 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3685 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3686 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3687 /* remove from txcmpl queue list */
3688 list_del_init(&cmd_iocb->list);
3689 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3690 pring->txcmplq_cnt--;
3691 return cmd_iocb;
3692 }
3693 }
3694
3695 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3696 "0372 iotag x%x lookup error: max iotag (x%x) "
3697 "cmd_flag x%x\n",
3698 iotag, phba->sli.last_iotag,
3699 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3700 return NULL;
3701 }
3702
3703 /**
3704 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3705 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object.
3707 * @saveq: Pointer to the response iocb to be processed.
3708 *
3709 * This function is called by the ring event handler for non-fcp
3710 * rings when there is a new response iocb in the response ring.
3711 * The caller is not required to hold any locks. This function
3712 * gets the command iocb associated with the response iocb and
3713 * calls the completion handler for the command iocb. If there
3714 * is no completion handler, the function will free the resources
3715 * associated with command iocb. If the response iocb is for
3716 * an already aborted command iocb, the status of the completion
3717 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3718 * This function always returns 1.
3719 **/
3720 static int
lpfc_sli_process_sol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3721 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3722 struct lpfc_iocbq *saveq)
3723 {
3724 struct lpfc_iocbq *cmdiocbp;
3725 unsigned long iflag;
3726 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3727
3728 if (phba->sli_rev == LPFC_SLI_REV4)
3729 spin_lock_irqsave(&pring->ring_lock, iflag);
3730 else
3731 spin_lock_irqsave(&phba->hbalock, iflag);
3732 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3733 if (phba->sli_rev == LPFC_SLI_REV4)
3734 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3735 else
3736 spin_unlock_irqrestore(&phba->hbalock, iflag);
3737
3738 ulp_command = get_job_cmnd(phba, saveq);
3739 ulp_status = get_job_ulpstatus(phba, saveq);
3740 ulp_word4 = get_job_word4(phba, saveq);
3741 ulp_context = get_job_ulpcontext(phba, saveq);
3742 if (phba->sli_rev == LPFC_SLI_REV4)
3743 iotag = get_wqe_reqtag(saveq);
3744 else
3745 iotag = saveq->iocb.ulpIoTag;
3746
3747 if (cmdiocbp) {
3748 ulp_command = get_job_cmnd(phba, cmdiocbp);
3749 if (cmdiocbp->cmd_cmpl) {
3750 /*
3751 * If an ELS command failed send an event to mgmt
3752 * application.
3753 */
3754 if (ulp_status &&
3755 (pring->ringno == LPFC_ELS_RING) &&
3756 (ulp_command == CMD_ELS_REQUEST64_CR))
3757 lpfc_send_els_failure_event(phba,
3758 cmdiocbp, saveq);
3759
3760 /*
3761 * Post all ELS completions to the worker thread.
3762 * All other are passed to the completion callback.
3763 */
3764 if (pring->ringno == LPFC_ELS_RING) {
3765 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3766 (cmdiocbp->cmd_flag &
3767 LPFC_DRIVER_ABORTED)) {
3768 spin_lock_irqsave(&phba->hbalock,
3769 iflag);
3770 cmdiocbp->cmd_flag &=
3771 ~LPFC_DRIVER_ABORTED;
3772 spin_unlock_irqrestore(&phba->hbalock,
3773 iflag);
3774 saveq->iocb.ulpStatus =
3775 IOSTAT_LOCAL_REJECT;
3776 saveq->iocb.un.ulpWord[4] =
3777 IOERR_SLI_ABORTED;
3778
3779 /* Firmware could still be in progress
3780 * of DMAing payload, so don't free data
3781 * buffer till after a hbeat.
3782 */
3783 spin_lock_irqsave(&phba->hbalock,
3784 iflag);
3785 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3786 spin_unlock_irqrestore(&phba->hbalock,
3787 iflag);
3788 }
3789 if (phba->sli_rev == LPFC_SLI_REV4) {
3790 if (saveq->cmd_flag &
3791 LPFC_EXCHANGE_BUSY) {
3792 /* Set cmdiocb flag for the
3793 * exchange busy so sgl (xri)
3794 * will not be released until
3795 * the abort xri is received
3796 * from hba.
3797 */
3798 spin_lock_irqsave(
3799 &phba->hbalock, iflag);
3800 cmdiocbp->cmd_flag |=
3801 LPFC_EXCHANGE_BUSY;
3802 spin_unlock_irqrestore(
3803 &phba->hbalock, iflag);
3804 }
3805 if (cmdiocbp->cmd_flag &
3806 LPFC_DRIVER_ABORTED) {
3807 /*
3808 * Clear LPFC_DRIVER_ABORTED
3809 * bit in case it was driver
3810 * initiated abort.
3811 */
3812 spin_lock_irqsave(
3813 &phba->hbalock, iflag);
3814 cmdiocbp->cmd_flag &=
3815 ~LPFC_DRIVER_ABORTED;
3816 spin_unlock_irqrestore(
3817 &phba->hbalock, iflag);
3818 set_job_ulpstatus(cmdiocbp,
3819 IOSTAT_LOCAL_REJECT);
3820 set_job_ulpword4(cmdiocbp,
3821 IOERR_ABORT_REQUESTED);
3822 /*
3823 * For SLI4, irspiocb contains
3824 * NO_XRI in sli_xritag, it
3825 * shall not affect releasing
3826 * sgl (xri) process.
3827 */
3828 set_job_ulpstatus(saveq,
3829 IOSTAT_LOCAL_REJECT);
3830 set_job_ulpword4(saveq,
3831 IOERR_SLI_ABORTED);
3832 spin_lock_irqsave(
3833 &phba->hbalock, iflag);
3834 saveq->cmd_flag |=
3835 LPFC_DELAY_MEM_FREE;
3836 spin_unlock_irqrestore(
3837 &phba->hbalock, iflag);
3838 }
3839 }
3840 }
3841 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3842 } else
3843 lpfc_sli_release_iocbq(phba, cmdiocbp);
3844 } else {
3845 /*
3846 * Unknown initiating command based on the response iotag.
3847 * This could be the case on the ELS ring because of
3848 * lpfc_els_abort().
3849 */
3850 if (pring->ringno != LPFC_ELS_RING) {
3851 /*
3852 * Ring <ringno> handler: unexpected completion IoTag
3853 * <IoTag>
3854 */
3855 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3856 "0322 Ring %d handler: "
3857 "unexpected completion IoTag x%x "
3858 "Data: x%x x%x x%x x%x\n",
3859 pring->ringno, iotag, ulp_status,
3860 ulp_word4, ulp_command, ulp_context);
3861 }
3862 }
3863
3864 return 1;
3865 }
3866
3867 /**
3868 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3869 * @phba: Pointer to HBA context object.
3870 * @pring: Pointer to driver SLI ring object.
3871 *
3872 * This function is called from the iocb ring event handlers when
3873 * put pointer is ahead of the get pointer for a ring. This function signal
3874 * an error attention condition to the worker thread and the worker
3875 * thread will transition the HBA to offline state.
3876 **/
3877 static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)3878 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3879 {
3880 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3881 /*
3882 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3883 * rsp ring <portRspMax>
3884 */
3885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3886 "0312 Ring %d handler: portRspPut %d "
3887 "is bigger than rsp ring %d\n",
3888 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3889 pring->sli.sli3.numRiocb);
3890
3891 phba->link_state = LPFC_HBA_ERROR;
3892
3893 /*
3894 * All error attention handlers are posted to
3895 * worker thread
3896 */
3897 phba->work_ha |= HA_ERATT;
3898 phba->work_hs = HS_FFER3;
3899
3900 lpfc_worker_wake_up(phba);
3901
3902 return;
3903 }
3904
3905 /**
3906 * lpfc_poll_eratt - Error attention polling timer timeout handler
3907 * @t: Context to fetch pointer to address of HBA context object from.
3908 *
3909 * This function is invoked by the Error Attention polling timer when the
3910 * timer times out. It will check the SLI Error Attention register for
3911 * possible attention events. If so, it will post an Error Attention event
3912 * and wake up worker thread to process it. Otherwise, it will set up the
3913 * Error Attention polling timer for the next poll.
3914 **/
lpfc_poll_eratt(struct timer_list * t)3915 void lpfc_poll_eratt(struct timer_list *t)
3916 {
3917 struct lpfc_hba *phba;
3918 uint32_t eratt = 0;
3919 uint64_t sli_intr, cnt;
3920
3921 phba = from_timer(phba, t, eratt_poll);
3922
3923 /* Here we will also keep track of interrupts per sec of the hba */
3924 sli_intr = phba->sli.slistat.sli_intr;
3925
3926 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3927 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3928 sli_intr);
3929 else
3930 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3931
3932 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3933 do_div(cnt, phba->eratt_poll_interval);
3934 phba->sli.slistat.sli_ips = cnt;
3935
3936 phba->sli.slistat.sli_prev_intr = sli_intr;
3937
3938 /* Check chip HA register for error event */
3939 eratt = lpfc_sli_check_eratt(phba);
3940
3941 if (eratt)
3942 /* Tell the worker thread there is work to do */
3943 lpfc_worker_wake_up(phba);
3944 else
3945 /* Restart the timer for next eratt poll */
3946 mod_timer(&phba->eratt_poll,
3947 jiffies +
3948 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3949 return;
3950 }
3951
3952
3953 /**
3954 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3955 * @phba: Pointer to HBA context object.
3956 * @pring: Pointer to driver SLI ring object.
3957 * @mask: Host attention register mask for this ring.
3958 *
3959 * This function is called from the interrupt context when there is a ring
3960 * event for the fcp ring. The caller does not hold any lock.
3961 * The function processes each response iocb in the response ring until it
3962 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3963 * LE bit set. The function will call the completion handler of the command iocb
3964 * if the response iocb indicates a completion for a command iocb or it is
3965 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3966 * function if this is an unsolicited iocb.
3967 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3968 * to check it explicitly.
3969 */
3970 int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3971 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3972 struct lpfc_sli_ring *pring, uint32_t mask)
3973 {
3974 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3975 IOCB_t *irsp = NULL;
3976 IOCB_t *entry = NULL;
3977 struct lpfc_iocbq *cmdiocbq = NULL;
3978 struct lpfc_iocbq rspiocbq;
3979 uint32_t status;
3980 uint32_t portRspPut, portRspMax;
3981 int rc = 1;
3982 lpfc_iocb_type type;
3983 unsigned long iflag;
3984 uint32_t rsp_cmpl = 0;
3985
3986 spin_lock_irqsave(&phba->hbalock, iflag);
3987 pring->stats.iocb_event++;
3988
3989 /*
3990 * The next available response entry should never exceed the maximum
3991 * entries. If it does, treat it as an adapter hardware error.
3992 */
3993 portRspMax = pring->sli.sli3.numRiocb;
3994 portRspPut = le32_to_cpu(pgp->rspPutInx);
3995 if (unlikely(portRspPut >= portRspMax)) {
3996 lpfc_sli_rsp_pointers_error(phba, pring);
3997 spin_unlock_irqrestore(&phba->hbalock, iflag);
3998 return 1;
3999 }
4000 if (phba->fcp_ring_in_use) {
4001 spin_unlock_irqrestore(&phba->hbalock, iflag);
4002 return 1;
4003 } else
4004 phba->fcp_ring_in_use = 1;
4005
4006 rmb();
4007 while (pring->sli.sli3.rspidx != portRspPut) {
4008 /*
4009 * Fetch an entry off the ring and copy it into a local data
4010 * structure. The copy involves a byte-swap since the
4011 * network byte order and pci byte orders are different.
4012 */
4013 entry = lpfc_resp_iocb(phba, pring);
4014 phba->last_completion_time = jiffies;
4015
4016 if (++pring->sli.sli3.rspidx >= portRspMax)
4017 pring->sli.sli3.rspidx = 0;
4018
4019 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4020 (uint32_t *) &rspiocbq.iocb,
4021 phba->iocb_rsp_size);
4022 INIT_LIST_HEAD(&(rspiocbq.list));
4023 irsp = &rspiocbq.iocb;
4024
4025 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4026 pring->stats.iocb_rsp++;
4027 rsp_cmpl++;
4028
4029 if (unlikely(irsp->ulpStatus)) {
4030 /*
4031 * If resource errors reported from HBA, reduce
4032 * queuedepths of the SCSI device.
4033 */
4034 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4035 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4036 IOERR_NO_RESOURCES)) {
4037 spin_unlock_irqrestore(&phba->hbalock, iflag);
4038 phba->lpfc_rampdown_queue_depth(phba);
4039 spin_lock_irqsave(&phba->hbalock, iflag);
4040 }
4041
4042 /* Rsp ring <ringno> error: IOCB */
4043 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4044 "0336 Rsp Ring %d error: IOCB Data: "
4045 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4046 pring->ringno,
4047 irsp->un.ulpWord[0],
4048 irsp->un.ulpWord[1],
4049 irsp->un.ulpWord[2],
4050 irsp->un.ulpWord[3],
4051 irsp->un.ulpWord[4],
4052 irsp->un.ulpWord[5],
4053 *(uint32_t *)&irsp->un1,
4054 *((uint32_t *)&irsp->un1 + 1));
4055 }
4056
4057 switch (type) {
4058 case LPFC_ABORT_IOCB:
4059 case LPFC_SOL_IOCB:
4060 /*
4061 * Idle exchange closed via ABTS from port. No iocb
4062 * resources need to be recovered.
4063 */
4064 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4066 "0333 IOCB cmd 0x%x"
4067 " processed. Skipping"
4068 " completion\n",
4069 irsp->ulpCommand);
4070 break;
4071 }
4072
4073 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4074 &rspiocbq);
4075 if (unlikely(!cmdiocbq))
4076 break;
4077 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4078 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4079 if (cmdiocbq->cmd_cmpl) {
4080 spin_unlock_irqrestore(&phba->hbalock, iflag);
4081 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4082 spin_lock_irqsave(&phba->hbalock, iflag);
4083 }
4084 break;
4085 case LPFC_UNSOL_IOCB:
4086 spin_unlock_irqrestore(&phba->hbalock, iflag);
4087 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4088 spin_lock_irqsave(&phba->hbalock, iflag);
4089 break;
4090 default:
4091 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4092 char adaptermsg[LPFC_MAX_ADPTMSG];
4093 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4094 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4095 MAX_MSG_DATA);
4096 dev_warn(&((phba->pcidev)->dev),
4097 "lpfc%d: %s\n",
4098 phba->brd_no, adaptermsg);
4099 } else {
4100 /* Unknown IOCB command */
4101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4102 "0334 Unknown IOCB command "
4103 "Data: x%x, x%x x%x x%x x%x\n",
4104 type, irsp->ulpCommand,
4105 irsp->ulpStatus,
4106 irsp->ulpIoTag,
4107 irsp->ulpContext);
4108 }
4109 break;
4110 }
4111
4112 /*
4113 * The response IOCB has been processed. Update the ring
4114 * pointer in SLIM. If the port response put pointer has not
4115 * been updated, sync the pgp->rspPutInx and fetch the new port
4116 * response put pointer.
4117 */
4118 writel(pring->sli.sli3.rspidx,
4119 &phba->host_gp[pring->ringno].rspGetInx);
4120
4121 if (pring->sli.sli3.rspidx == portRspPut)
4122 portRspPut = le32_to_cpu(pgp->rspPutInx);
4123 }
4124
4125 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4126 pring->stats.iocb_rsp_full++;
4127 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4128 writel(status, phba->CAregaddr);
4129 readl(phba->CAregaddr);
4130 }
4131 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4132 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4133 pring->stats.iocb_cmd_empty++;
4134
4135 /* Force update of the local copy of cmdGetInx */
4136 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4137 lpfc_sli_resume_iocb(phba, pring);
4138
4139 if ((pring->lpfc_sli_cmd_available))
4140 (pring->lpfc_sli_cmd_available) (phba, pring);
4141
4142 }
4143
4144 phba->fcp_ring_in_use = 0;
4145 spin_unlock_irqrestore(&phba->hbalock, iflag);
4146 return rc;
4147 }
4148
4149 /**
4150 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4151 * @phba: Pointer to HBA context object.
4152 * @pring: Pointer to driver SLI ring object.
4153 * @rspiocbp: Pointer to driver response IOCB object.
4154 *
4155 * This function is called from the worker thread when there is a slow-path
4156 * response IOCB to process. This function chains all the response iocbs until
4157 * seeing the iocb with the LE bit set. The function will call
4158 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4159 * completion of a command iocb. The function will call the
4160 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4161 * The function frees the resources or calls the completion handler if this
4162 * iocb is an abort completion. The function returns NULL when the response
4163 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4164 * this function shall chain the iocb on to the iocb_continueq and return the
4165 * response iocb passed in.
4166 **/
4167 static struct lpfc_iocbq *
lpfc_sli_sp_handle_rspiocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * rspiocbp)4168 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4169 struct lpfc_iocbq *rspiocbp)
4170 {
4171 struct lpfc_iocbq *saveq;
4172 struct lpfc_iocbq *cmdiocb;
4173 struct lpfc_iocbq *next_iocb;
4174 IOCB_t *irsp;
4175 uint32_t free_saveq;
4176 u8 cmd_type;
4177 lpfc_iocb_type type;
4178 unsigned long iflag;
4179 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4180 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4181 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4182 int rc;
4183
4184 spin_lock_irqsave(&phba->hbalock, iflag);
4185 /* First add the response iocb to the countinueq list */
4186 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4187 pring->iocb_continueq_cnt++;
4188
4189 /*
4190 * By default, the driver expects to free all resources
4191 * associated with this iocb completion.
4192 */
4193 free_saveq = 1;
4194 saveq = list_get_first(&pring->iocb_continueq,
4195 struct lpfc_iocbq, list);
4196 list_del_init(&pring->iocb_continueq);
4197 pring->iocb_continueq_cnt = 0;
4198
4199 pring->stats.iocb_rsp++;
4200
4201 /*
4202 * If resource errors reported from HBA, reduce
4203 * queuedepths of the SCSI device.
4204 */
4205 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4206 ((ulp_word4 & IOERR_PARAM_MASK) ==
4207 IOERR_NO_RESOURCES)) {
4208 spin_unlock_irqrestore(&phba->hbalock, iflag);
4209 phba->lpfc_rampdown_queue_depth(phba);
4210 spin_lock_irqsave(&phba->hbalock, iflag);
4211 }
4212
4213 if (ulp_status) {
4214 /* Rsp ring <ringno> error: IOCB */
4215 if (phba->sli_rev < LPFC_SLI_REV4) {
4216 irsp = &rspiocbp->iocb;
4217 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4218 "0328 Rsp Ring %d error: ulp_status x%x "
4219 "IOCB Data: "
4220 "x%08x x%08x x%08x x%08x "
4221 "x%08x x%08x x%08x x%08x "
4222 "x%08x x%08x x%08x x%08x "
4223 "x%08x x%08x x%08x x%08x\n",
4224 pring->ringno, ulp_status,
4225 get_job_ulpword(rspiocbp, 0),
4226 get_job_ulpword(rspiocbp, 1),
4227 get_job_ulpword(rspiocbp, 2),
4228 get_job_ulpword(rspiocbp, 3),
4229 get_job_ulpword(rspiocbp, 4),
4230 get_job_ulpword(rspiocbp, 5),
4231 *(((uint32_t *)irsp) + 6),
4232 *(((uint32_t *)irsp) + 7),
4233 *(((uint32_t *)irsp) + 8),
4234 *(((uint32_t *)irsp) + 9),
4235 *(((uint32_t *)irsp) + 10),
4236 *(((uint32_t *)irsp) + 11),
4237 *(((uint32_t *)irsp) + 12),
4238 *(((uint32_t *)irsp) + 13),
4239 *(((uint32_t *)irsp) + 14),
4240 *(((uint32_t *)irsp) + 15));
4241 } else {
4242 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4243 "0321 Rsp Ring %d error: "
4244 "IOCB Data: "
4245 "x%x x%x x%x x%x\n",
4246 pring->ringno,
4247 rspiocbp->wcqe_cmpl.word0,
4248 rspiocbp->wcqe_cmpl.total_data_placed,
4249 rspiocbp->wcqe_cmpl.parameter,
4250 rspiocbp->wcqe_cmpl.word3);
4251 }
4252 }
4253
4254
4255 /*
4256 * Fetch the iocb command type and call the correct completion
4257 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4258 * get freed back to the lpfc_iocb_list by the discovery
4259 * kernel thread.
4260 */
4261 cmd_type = ulp_command & CMD_IOCB_MASK;
4262 type = lpfc_sli_iocb_cmd_type(cmd_type);
4263 switch (type) {
4264 case LPFC_SOL_IOCB:
4265 spin_unlock_irqrestore(&phba->hbalock, iflag);
4266 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4267 spin_lock_irqsave(&phba->hbalock, iflag);
4268 break;
4269 case LPFC_UNSOL_IOCB:
4270 spin_unlock_irqrestore(&phba->hbalock, iflag);
4271 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4272 spin_lock_irqsave(&phba->hbalock, iflag);
4273 if (!rc)
4274 free_saveq = 0;
4275 break;
4276 case LPFC_ABORT_IOCB:
4277 cmdiocb = NULL;
4278 if (ulp_command != CMD_XRI_ABORTED_CX)
4279 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4280 saveq);
4281 if (cmdiocb) {
4282 /* Call the specified completion routine */
4283 if (cmdiocb->cmd_cmpl) {
4284 spin_unlock_irqrestore(&phba->hbalock, iflag);
4285 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4286 spin_lock_irqsave(&phba->hbalock, iflag);
4287 } else {
4288 __lpfc_sli_release_iocbq(phba, cmdiocb);
4289 }
4290 }
4291 break;
4292 case LPFC_UNKNOWN_IOCB:
4293 if (ulp_command == CMD_ADAPTER_MSG) {
4294 char adaptermsg[LPFC_MAX_ADPTMSG];
4295
4296 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4297 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4298 MAX_MSG_DATA);
4299 dev_warn(&((phba->pcidev)->dev),
4300 "lpfc%d: %s\n",
4301 phba->brd_no, adaptermsg);
4302 } else {
4303 /* Unknown command */
4304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4305 "0335 Unknown IOCB "
4306 "command Data: x%x "
4307 "x%x x%x x%x\n",
4308 ulp_command,
4309 ulp_status,
4310 get_wqe_reqtag(rspiocbp),
4311 get_job_ulpcontext(phba, rspiocbp));
4312 }
4313 break;
4314 }
4315
4316 if (free_saveq) {
4317 list_for_each_entry_safe(rspiocbp, next_iocb,
4318 &saveq->list, list) {
4319 list_del_init(&rspiocbp->list);
4320 __lpfc_sli_release_iocbq(phba, rspiocbp);
4321 }
4322 __lpfc_sli_release_iocbq(phba, saveq);
4323 }
4324 rspiocbp = NULL;
4325 spin_unlock_irqrestore(&phba->hbalock, iflag);
4326 return rspiocbp;
4327 }
4328
4329 /**
4330 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4331 * @phba: Pointer to HBA context object.
4332 * @pring: Pointer to driver SLI ring object.
4333 * @mask: Host attention register mask for this ring.
4334 *
4335 * This routine wraps the actual slow_ring event process routine from the
4336 * API jump table function pointer from the lpfc_hba struct.
4337 **/
4338 void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4339 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4340 struct lpfc_sli_ring *pring, uint32_t mask)
4341 {
4342 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4343 }
4344
4345 /**
4346 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4347 * @phba: Pointer to HBA context object.
4348 * @pring: Pointer to driver SLI ring object.
4349 * @mask: Host attention register mask for this ring.
4350 *
4351 * This function is called from the worker thread when there is a ring event
4352 * for non-fcp rings. The caller does not hold any lock. The function will
4353 * remove each response iocb in the response ring and calls the handle
4354 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4355 **/
4356 static void
lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4357 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4358 struct lpfc_sli_ring *pring, uint32_t mask)
4359 {
4360 struct lpfc_pgp *pgp;
4361 IOCB_t *entry;
4362 IOCB_t *irsp = NULL;
4363 struct lpfc_iocbq *rspiocbp = NULL;
4364 uint32_t portRspPut, portRspMax;
4365 unsigned long iflag;
4366 uint32_t status;
4367
4368 pgp = &phba->port_gp[pring->ringno];
4369 spin_lock_irqsave(&phba->hbalock, iflag);
4370 pring->stats.iocb_event++;
4371
4372 /*
4373 * The next available response entry should never exceed the maximum
4374 * entries. If it does, treat it as an adapter hardware error.
4375 */
4376 portRspMax = pring->sli.sli3.numRiocb;
4377 portRspPut = le32_to_cpu(pgp->rspPutInx);
4378 if (portRspPut >= portRspMax) {
4379 /*
4380 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4381 * rsp ring <portRspMax>
4382 */
4383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4384 "0303 Ring %d handler: portRspPut %d "
4385 "is bigger than rsp ring %d\n",
4386 pring->ringno, portRspPut, portRspMax);
4387
4388 phba->link_state = LPFC_HBA_ERROR;
4389 spin_unlock_irqrestore(&phba->hbalock, iflag);
4390
4391 phba->work_hs = HS_FFER3;
4392 lpfc_handle_eratt(phba);
4393
4394 return;
4395 }
4396
4397 rmb();
4398 while (pring->sli.sli3.rspidx != portRspPut) {
4399 /*
4400 * Build a completion list and call the appropriate handler.
4401 * The process is to get the next available response iocb, get
4402 * a free iocb from the list, copy the response data into the
4403 * free iocb, insert to the continuation list, and update the
4404 * next response index to slim. This process makes response
4405 * iocb's in the ring available to DMA as fast as possible but
4406 * pays a penalty for a copy operation. Since the iocb is
4407 * only 32 bytes, this penalty is considered small relative to
4408 * the PCI reads for register values and a slim write. When
4409 * the ulpLe field is set, the entire Command has been
4410 * received.
4411 */
4412 entry = lpfc_resp_iocb(phba, pring);
4413
4414 phba->last_completion_time = jiffies;
4415 rspiocbp = __lpfc_sli_get_iocbq(phba);
4416 if (rspiocbp == NULL) {
4417 printk(KERN_ERR "%s: out of buffers! Failing "
4418 "completion.\n", __func__);
4419 break;
4420 }
4421
4422 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4423 phba->iocb_rsp_size);
4424 irsp = &rspiocbp->iocb;
4425
4426 if (++pring->sli.sli3.rspidx >= portRspMax)
4427 pring->sli.sli3.rspidx = 0;
4428
4429 if (pring->ringno == LPFC_ELS_RING) {
4430 lpfc_debugfs_slow_ring_trc(phba,
4431 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4432 *(((uint32_t *) irsp) + 4),
4433 *(((uint32_t *) irsp) + 6),
4434 *(((uint32_t *) irsp) + 7));
4435 }
4436
4437 writel(pring->sli.sli3.rspidx,
4438 &phba->host_gp[pring->ringno].rspGetInx);
4439
4440 spin_unlock_irqrestore(&phba->hbalock, iflag);
4441 /* Handle the response IOCB */
4442 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4443 spin_lock_irqsave(&phba->hbalock, iflag);
4444
4445 /*
4446 * If the port response put pointer has not been updated, sync
4447 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4448 * response put pointer.
4449 */
4450 if (pring->sli.sli3.rspidx == portRspPut) {
4451 portRspPut = le32_to_cpu(pgp->rspPutInx);
4452 }
4453 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4454
4455 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4456 /* At least one response entry has been freed */
4457 pring->stats.iocb_rsp_full++;
4458 /* SET RxRE_RSP in Chip Att register */
4459 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4460 writel(status, phba->CAregaddr);
4461 readl(phba->CAregaddr); /* flush */
4462 }
4463 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4464 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4465 pring->stats.iocb_cmd_empty++;
4466
4467 /* Force update of the local copy of cmdGetInx */
4468 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4469 lpfc_sli_resume_iocb(phba, pring);
4470
4471 if ((pring->lpfc_sli_cmd_available))
4472 (pring->lpfc_sli_cmd_available) (phba, pring);
4473
4474 }
4475
4476 spin_unlock_irqrestore(&phba->hbalock, iflag);
4477 return;
4478 }
4479
4480 /**
4481 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4482 * @phba: Pointer to HBA context object.
4483 * @pring: Pointer to driver SLI ring object.
4484 * @mask: Host attention register mask for this ring.
4485 *
4486 * This function is called from the worker thread when there is a pending
4487 * ELS response iocb on the driver internal slow-path response iocb worker
4488 * queue. The caller does not hold any lock. The function will remove each
4489 * response iocb from the response worker queue and calls the handle
4490 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4491 **/
4492 static void
lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4493 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4494 struct lpfc_sli_ring *pring, uint32_t mask)
4495 {
4496 struct lpfc_iocbq *irspiocbq;
4497 struct hbq_dmabuf *dmabuf;
4498 struct lpfc_cq_event *cq_event;
4499 unsigned long iflag;
4500 int count = 0;
4501
4502 spin_lock_irqsave(&phba->hbalock, iflag);
4503 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4504 spin_unlock_irqrestore(&phba->hbalock, iflag);
4505 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4506 /* Get the response iocb from the head of work queue */
4507 spin_lock_irqsave(&phba->hbalock, iflag);
4508 list_remove_head(&phba->sli4_hba.sp_queue_event,
4509 cq_event, struct lpfc_cq_event, list);
4510 spin_unlock_irqrestore(&phba->hbalock, iflag);
4511
4512 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4513 case CQE_CODE_COMPL_WQE:
4514 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4515 cq_event);
4516 /* Translate ELS WCQE to response IOCBQ */
4517 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4518 irspiocbq);
4519 if (irspiocbq)
4520 lpfc_sli_sp_handle_rspiocb(phba, pring,
4521 irspiocbq);
4522 count++;
4523 break;
4524 case CQE_CODE_RECEIVE:
4525 case CQE_CODE_RECEIVE_V1:
4526 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4527 cq_event);
4528 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4529 count++;
4530 break;
4531 default:
4532 break;
4533 }
4534
4535 /* Limit the number of events to 64 to avoid soft lockups */
4536 if (count == 64)
4537 break;
4538 }
4539 }
4540
4541 /**
4542 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4543 * @phba: Pointer to HBA context object.
4544 * @pring: Pointer to driver SLI ring object.
4545 *
4546 * This function aborts all iocbs in the given ring and frees all the iocb
4547 * objects in txq. This function issues an abort iocb for all the iocb commands
4548 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4549 * the return of this function. The caller is not required to hold any locks.
4550 **/
4551 void
lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)4552 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4553 {
4554 LIST_HEAD(tx_completions);
4555 LIST_HEAD(txcmplq_completions);
4556 struct lpfc_iocbq *iocb, *next_iocb;
4557 int offline;
4558
4559 if (pring->ringno == LPFC_ELS_RING) {
4560 lpfc_fabric_abort_hba(phba);
4561 }
4562 offline = pci_channel_offline(phba->pcidev);
4563
4564 /* Error everything on txq and txcmplq
4565 * First do the txq.
4566 */
4567 if (phba->sli_rev >= LPFC_SLI_REV4) {
4568 spin_lock_irq(&pring->ring_lock);
4569 list_splice_init(&pring->txq, &tx_completions);
4570 pring->txq_cnt = 0;
4571
4572 if (offline) {
4573 list_splice_init(&pring->txcmplq,
4574 &txcmplq_completions);
4575 } else {
4576 /* Next issue ABTS for everything on the txcmplq */
4577 list_for_each_entry_safe(iocb, next_iocb,
4578 &pring->txcmplq, list)
4579 lpfc_sli_issue_abort_iotag(phba, pring,
4580 iocb, NULL);
4581 }
4582 spin_unlock_irq(&pring->ring_lock);
4583 } else {
4584 spin_lock_irq(&phba->hbalock);
4585 list_splice_init(&pring->txq, &tx_completions);
4586 pring->txq_cnt = 0;
4587
4588 if (offline) {
4589 list_splice_init(&pring->txcmplq, &txcmplq_completions);
4590 } else {
4591 /* Next issue ABTS for everything on the txcmplq */
4592 list_for_each_entry_safe(iocb, next_iocb,
4593 &pring->txcmplq, list)
4594 lpfc_sli_issue_abort_iotag(phba, pring,
4595 iocb, NULL);
4596 }
4597 spin_unlock_irq(&phba->hbalock);
4598 }
4599
4600 if (offline) {
4601 /* Cancel all the IOCBs from the completions list */
4602 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4603 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4604 } else {
4605 /* Make sure HBA is alive */
4606 lpfc_issue_hb_tmo(phba);
4607 }
4608 /* Cancel all the IOCBs from the completions list */
4609 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4610 IOERR_SLI_ABORTED);
4611 }
4612
4613 /**
4614 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4615 * @phba: Pointer to HBA context object.
4616 *
4617 * This function aborts all iocbs in FCP rings and frees all the iocb
4618 * objects in txq. This function issues an abort iocb for all the iocb commands
4619 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4620 * the return of this function. The caller is not required to hold any locks.
4621 **/
4622 void
lpfc_sli_abort_fcp_rings(struct lpfc_hba * phba)4623 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4624 {
4625 struct lpfc_sli *psli = &phba->sli;
4626 struct lpfc_sli_ring *pring;
4627 uint32_t i;
4628
4629 /* Look on all the FCP Rings for the iotag */
4630 if (phba->sli_rev >= LPFC_SLI_REV4) {
4631 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4632 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4633 lpfc_sli_abort_iocb_ring(phba, pring);
4634 }
4635 } else {
4636 pring = &psli->sli3_ring[LPFC_FCP_RING];
4637 lpfc_sli_abort_iocb_ring(phba, pring);
4638 }
4639 }
4640
4641 /**
4642 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4643 * @phba: Pointer to HBA context object.
4644 *
4645 * This function flushes all iocbs in the IO ring and frees all the iocb
4646 * objects in txq and txcmplq. This function will not issue abort iocbs
4647 * for all the iocb commands in txcmplq, they will just be returned with
4648 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4649 * slot has been permanently disabled.
4650 **/
4651 void
lpfc_sli_flush_io_rings(struct lpfc_hba * phba)4652 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4653 {
4654 LIST_HEAD(txq);
4655 LIST_HEAD(txcmplq);
4656 struct lpfc_sli *psli = &phba->sli;
4657 struct lpfc_sli_ring *pring;
4658 uint32_t i;
4659 struct lpfc_iocbq *piocb, *next_iocb;
4660
4661 spin_lock_irq(&phba->hbalock);
4662 /* Indicate the I/O queues are flushed */
4663 phba->hba_flag |= HBA_IOQ_FLUSH;
4664 spin_unlock_irq(&phba->hbalock);
4665
4666 /* Look on all the FCP Rings for the iotag */
4667 if (phba->sli_rev >= LPFC_SLI_REV4) {
4668 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4669 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4670
4671 spin_lock_irq(&pring->ring_lock);
4672 /* Retrieve everything on txq */
4673 list_splice_init(&pring->txq, &txq);
4674 list_for_each_entry_safe(piocb, next_iocb,
4675 &pring->txcmplq, list)
4676 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4677 /* Retrieve everything on the txcmplq */
4678 list_splice_init(&pring->txcmplq, &txcmplq);
4679 pring->txq_cnt = 0;
4680 pring->txcmplq_cnt = 0;
4681 spin_unlock_irq(&pring->ring_lock);
4682
4683 /* Flush the txq */
4684 lpfc_sli_cancel_iocbs(phba, &txq,
4685 IOSTAT_LOCAL_REJECT,
4686 IOERR_SLI_DOWN);
4687 /* Flush the txcmplq */
4688 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4689 IOSTAT_LOCAL_REJECT,
4690 IOERR_SLI_DOWN);
4691 if (unlikely(pci_channel_offline(phba->pcidev)))
4692 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4693 }
4694 } else {
4695 pring = &psli->sli3_ring[LPFC_FCP_RING];
4696
4697 spin_lock_irq(&phba->hbalock);
4698 /* Retrieve everything on txq */
4699 list_splice_init(&pring->txq, &txq);
4700 list_for_each_entry_safe(piocb, next_iocb,
4701 &pring->txcmplq, list)
4702 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4703 /* Retrieve everything on the txcmplq */
4704 list_splice_init(&pring->txcmplq, &txcmplq);
4705 pring->txq_cnt = 0;
4706 pring->txcmplq_cnt = 0;
4707 spin_unlock_irq(&phba->hbalock);
4708
4709 /* Flush the txq */
4710 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4711 IOERR_SLI_DOWN);
4712 /* Flush the txcmpq */
4713 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4714 IOERR_SLI_DOWN);
4715 }
4716 }
4717
4718 /**
4719 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4720 * @phba: Pointer to HBA context object.
4721 * @mask: Bit mask to be checked.
4722 *
4723 * This function reads the host status register and compares
4724 * with the provided bit mask to check if HBA completed
4725 * the restart. This function will wait in a loop for the
4726 * HBA to complete restart. If the HBA does not restart within
4727 * 15 iterations, the function will reset the HBA again. The
4728 * function returns 1 when HBA fail to restart otherwise returns
4729 * zero.
4730 **/
4731 static int
lpfc_sli_brdready_s3(struct lpfc_hba * phba,uint32_t mask)4732 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4733 {
4734 uint32_t status;
4735 int i = 0;
4736 int retval = 0;
4737
4738 /* Read the HBA Host Status Register */
4739 if (lpfc_readl(phba->HSregaddr, &status))
4740 return 1;
4741
4742 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4743
4744 /*
4745 * Check status register every 100ms for 5 retries, then every
4746 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4747 * every 2.5 sec for 4.
4748 * Break our of the loop if errors occurred during init.
4749 */
4750 while (((status & mask) != mask) &&
4751 !(status & HS_FFERM) &&
4752 i++ < 20) {
4753
4754 if (i <= 5)
4755 msleep(10);
4756 else if (i <= 10)
4757 msleep(500);
4758 else
4759 msleep(2500);
4760
4761 if (i == 15) {
4762 /* Do post */
4763 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4764 lpfc_sli_brdrestart(phba);
4765 }
4766 /* Read the HBA Host Status Register */
4767 if (lpfc_readl(phba->HSregaddr, &status)) {
4768 retval = 1;
4769 break;
4770 }
4771 }
4772
4773 /* Check to see if any errors occurred during init */
4774 if ((status & HS_FFERM) || (i >= 20)) {
4775 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4776 "2751 Adapter failed to restart, "
4777 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4778 status,
4779 readl(phba->MBslimaddr + 0xa8),
4780 readl(phba->MBslimaddr + 0xac));
4781 phba->link_state = LPFC_HBA_ERROR;
4782 retval = 1;
4783 }
4784
4785 return retval;
4786 }
4787
4788 /**
4789 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4790 * @phba: Pointer to HBA context object.
4791 * @mask: Bit mask to be checked.
4792 *
4793 * This function checks the host status register to check if HBA is
4794 * ready. This function will wait in a loop for the HBA to be ready
4795 * If the HBA is not ready , the function will will reset the HBA PCI
4796 * function again. The function returns 1 when HBA fail to be ready
4797 * otherwise returns zero.
4798 **/
4799 static int
lpfc_sli_brdready_s4(struct lpfc_hba * phba,uint32_t mask)4800 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4801 {
4802 uint32_t status;
4803 int retval = 0;
4804
4805 /* Read the HBA Host Status Register */
4806 status = lpfc_sli4_post_status_check(phba);
4807
4808 if (status) {
4809 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4810 lpfc_sli_brdrestart(phba);
4811 status = lpfc_sli4_post_status_check(phba);
4812 }
4813
4814 /* Check to see if any errors occurred during init */
4815 if (status) {
4816 phba->link_state = LPFC_HBA_ERROR;
4817 retval = 1;
4818 } else
4819 phba->sli4_hba.intr_enable = 0;
4820
4821 phba->hba_flag &= ~HBA_SETUP;
4822 return retval;
4823 }
4824
4825 /**
4826 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4827 * @phba: Pointer to HBA context object.
4828 * @mask: Bit mask to be checked.
4829 *
4830 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4831 * from the API jump table function pointer from the lpfc_hba struct.
4832 **/
4833 int
lpfc_sli_brdready(struct lpfc_hba * phba,uint32_t mask)4834 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4835 {
4836 return phba->lpfc_sli_brdready(phba, mask);
4837 }
4838
4839 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4840
4841 /**
4842 * lpfc_reset_barrier - Make HBA ready for HBA reset
4843 * @phba: Pointer to HBA context object.
4844 *
4845 * This function is called before resetting an HBA. This function is called
4846 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4847 **/
lpfc_reset_barrier(struct lpfc_hba * phba)4848 void lpfc_reset_barrier(struct lpfc_hba *phba)
4849 {
4850 uint32_t __iomem *resp_buf;
4851 uint32_t __iomem *mbox_buf;
4852 volatile struct MAILBOX_word0 mbox;
4853 uint32_t hc_copy, ha_copy, resp_data;
4854 int i;
4855 uint8_t hdrtype;
4856
4857 lockdep_assert_held(&phba->hbalock);
4858
4859 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4860 if (hdrtype != 0x80 ||
4861 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4862 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4863 return;
4864
4865 /*
4866 * Tell the other part of the chip to suspend temporarily all
4867 * its DMA activity.
4868 */
4869 resp_buf = phba->MBslimaddr;
4870
4871 /* Disable the error attention */
4872 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4873 return;
4874 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4875 readl(phba->HCregaddr); /* flush */
4876 phba->link_flag |= LS_IGNORE_ERATT;
4877
4878 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4879 return;
4880 if (ha_copy & HA_ERATT) {
4881 /* Clear Chip error bit */
4882 writel(HA_ERATT, phba->HAregaddr);
4883 phba->pport->stopped = 1;
4884 }
4885
4886 mbox.word0 = 0;
4887 mbox.mbxCommand = MBX_KILL_BOARD;
4888 mbox.mbxOwner = OWN_CHIP;
4889
4890 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4891 mbox_buf = phba->MBslimaddr;
4892 writel(mbox.word0, mbox_buf);
4893
4894 for (i = 0; i < 50; i++) {
4895 if (lpfc_readl((resp_buf + 1), &resp_data))
4896 return;
4897 if (resp_data != ~(BARRIER_TEST_PATTERN))
4898 mdelay(1);
4899 else
4900 break;
4901 }
4902 resp_data = 0;
4903 if (lpfc_readl((resp_buf + 1), &resp_data))
4904 return;
4905 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4906 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4907 phba->pport->stopped)
4908 goto restore_hc;
4909 else
4910 goto clear_errat;
4911 }
4912
4913 mbox.mbxOwner = OWN_HOST;
4914 resp_data = 0;
4915 for (i = 0; i < 500; i++) {
4916 if (lpfc_readl(resp_buf, &resp_data))
4917 return;
4918 if (resp_data != mbox.word0)
4919 mdelay(1);
4920 else
4921 break;
4922 }
4923
4924 clear_errat:
4925
4926 while (++i < 500) {
4927 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4928 return;
4929 if (!(ha_copy & HA_ERATT))
4930 mdelay(1);
4931 else
4932 break;
4933 }
4934
4935 if (readl(phba->HAregaddr) & HA_ERATT) {
4936 writel(HA_ERATT, phba->HAregaddr);
4937 phba->pport->stopped = 1;
4938 }
4939
4940 restore_hc:
4941 phba->link_flag &= ~LS_IGNORE_ERATT;
4942 writel(hc_copy, phba->HCregaddr);
4943 readl(phba->HCregaddr); /* flush */
4944 }
4945
4946 /**
4947 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4948 * @phba: Pointer to HBA context object.
4949 *
4950 * This function issues a kill_board mailbox command and waits for
4951 * the error attention interrupt. This function is called for stopping
4952 * the firmware processing. The caller is not required to hold any
4953 * locks. This function calls lpfc_hba_down_post function to free
4954 * any pending commands after the kill. The function will return 1 when it
4955 * fails to kill the board else will return 0.
4956 **/
4957 int
lpfc_sli_brdkill(struct lpfc_hba * phba)4958 lpfc_sli_brdkill(struct lpfc_hba *phba)
4959 {
4960 struct lpfc_sli *psli;
4961 LPFC_MBOXQ_t *pmb;
4962 uint32_t status;
4963 uint32_t ha_copy;
4964 int retval;
4965 int i = 0;
4966
4967 psli = &phba->sli;
4968
4969 /* Kill HBA */
4970 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4971 "0329 Kill HBA Data: x%x x%x\n",
4972 phba->pport->port_state, psli->sli_flag);
4973
4974 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4975 if (!pmb)
4976 return 1;
4977
4978 /* Disable the error attention */
4979 spin_lock_irq(&phba->hbalock);
4980 if (lpfc_readl(phba->HCregaddr, &status)) {
4981 spin_unlock_irq(&phba->hbalock);
4982 mempool_free(pmb, phba->mbox_mem_pool);
4983 return 1;
4984 }
4985 status &= ~HC_ERINT_ENA;
4986 writel(status, phba->HCregaddr);
4987 readl(phba->HCregaddr); /* flush */
4988 phba->link_flag |= LS_IGNORE_ERATT;
4989 spin_unlock_irq(&phba->hbalock);
4990
4991 lpfc_kill_board(phba, pmb);
4992 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4993 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4994
4995 if (retval != MBX_SUCCESS) {
4996 if (retval != MBX_BUSY)
4997 mempool_free(pmb, phba->mbox_mem_pool);
4998 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4999 "2752 KILL_BOARD command failed retval %d\n",
5000 retval);
5001 spin_lock_irq(&phba->hbalock);
5002 phba->link_flag &= ~LS_IGNORE_ERATT;
5003 spin_unlock_irq(&phba->hbalock);
5004 return 1;
5005 }
5006
5007 spin_lock_irq(&phba->hbalock);
5008 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5009 spin_unlock_irq(&phba->hbalock);
5010
5011 mempool_free(pmb, phba->mbox_mem_pool);
5012
5013 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5014 * attention every 100ms for 3 seconds. If we don't get ERATT after
5015 * 3 seconds we still set HBA_ERROR state because the status of the
5016 * board is now undefined.
5017 */
5018 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5019 return 1;
5020 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5021 mdelay(100);
5022 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5023 return 1;
5024 }
5025
5026 del_timer_sync(&psli->mbox_tmo);
5027 if (ha_copy & HA_ERATT) {
5028 writel(HA_ERATT, phba->HAregaddr);
5029 phba->pport->stopped = 1;
5030 }
5031 spin_lock_irq(&phba->hbalock);
5032 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5033 psli->mbox_active = NULL;
5034 phba->link_flag &= ~LS_IGNORE_ERATT;
5035 spin_unlock_irq(&phba->hbalock);
5036
5037 lpfc_hba_down_post(phba);
5038 phba->link_state = LPFC_HBA_ERROR;
5039
5040 return ha_copy & HA_ERATT ? 0 : 1;
5041 }
5042
5043 /**
5044 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5045 * @phba: Pointer to HBA context object.
5046 *
5047 * This function resets the HBA by writing HC_INITFF to the control
5048 * register. After the HBA resets, this function resets all the iocb ring
5049 * indices. This function disables PCI layer parity checking during
5050 * the reset.
5051 * This function returns 0 always.
5052 * The caller is not required to hold any locks.
5053 **/
5054 int
lpfc_sli_brdreset(struct lpfc_hba * phba)5055 lpfc_sli_brdreset(struct lpfc_hba *phba)
5056 {
5057 struct lpfc_sli *psli;
5058 struct lpfc_sli_ring *pring;
5059 uint16_t cfg_value;
5060 int i;
5061
5062 psli = &phba->sli;
5063
5064 /* Reset HBA */
5065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5066 "0325 Reset HBA Data: x%x x%x\n",
5067 (phba->pport) ? phba->pport->port_state : 0,
5068 psli->sli_flag);
5069
5070 /* perform board reset */
5071 phba->fc_eventTag = 0;
5072 phba->link_events = 0;
5073 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5074 if (phba->pport) {
5075 phba->pport->fc_myDID = 0;
5076 phba->pport->fc_prevDID = 0;
5077 }
5078
5079 /* Turn off parity checking and serr during the physical reset */
5080 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5081 return -EIO;
5082
5083 pci_write_config_word(phba->pcidev, PCI_COMMAND,
5084 (cfg_value &
5085 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5086
5087 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5088
5089 /* Now toggle INITFF bit in the Host Control Register */
5090 writel(HC_INITFF, phba->HCregaddr);
5091 mdelay(1);
5092 readl(phba->HCregaddr); /* flush */
5093 writel(0, phba->HCregaddr);
5094 readl(phba->HCregaddr); /* flush */
5095
5096 /* Restore PCI cmd register */
5097 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5098
5099 /* Initialize relevant SLI info */
5100 for (i = 0; i < psli->num_rings; i++) {
5101 pring = &psli->sli3_ring[i];
5102 pring->flag = 0;
5103 pring->sli.sli3.rspidx = 0;
5104 pring->sli.sli3.next_cmdidx = 0;
5105 pring->sli.sli3.local_getidx = 0;
5106 pring->sli.sli3.cmdidx = 0;
5107 pring->missbufcnt = 0;
5108 }
5109
5110 phba->link_state = LPFC_WARM_START;
5111 return 0;
5112 }
5113
5114 /**
5115 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5116 * @phba: Pointer to HBA context object.
5117 *
5118 * This function resets a SLI4 HBA. This function disables PCI layer parity
5119 * checking during resets the device. The caller is not required to hold
5120 * any locks.
5121 *
5122 * This function returns 0 on success else returns negative error code.
5123 **/
5124 int
lpfc_sli4_brdreset(struct lpfc_hba * phba)5125 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5126 {
5127 struct lpfc_sli *psli = &phba->sli;
5128 uint16_t cfg_value;
5129 int rc = 0;
5130
5131 /* Reset HBA */
5132 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5133 "0295 Reset HBA Data: x%x x%x x%x\n",
5134 phba->pport->port_state, psli->sli_flag,
5135 phba->hba_flag);
5136
5137 /* perform board reset */
5138 phba->fc_eventTag = 0;
5139 phba->link_events = 0;
5140 phba->pport->fc_myDID = 0;
5141 phba->pport->fc_prevDID = 0;
5142 phba->hba_flag &= ~HBA_SETUP;
5143
5144 spin_lock_irq(&phba->hbalock);
5145 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5146 phba->fcf.fcf_flag = 0;
5147 spin_unlock_irq(&phba->hbalock);
5148
5149 /* Now physically reset the device */
5150 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5151 "0389 Performing PCI function reset!\n");
5152
5153 /* Turn off parity checking and serr during the physical reset */
5154 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5155 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5156 "3205 PCI read Config failed\n");
5157 return -EIO;
5158 }
5159
5160 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5161 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5162
5163 /* Perform FCoE PCI function reset before freeing queue memory */
5164 rc = lpfc_pci_function_reset(phba);
5165
5166 /* Restore PCI cmd register */
5167 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5168
5169 return rc;
5170 }
5171
5172 /**
5173 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5174 * @phba: Pointer to HBA context object.
5175 *
5176 * This function is called in the SLI initialization code path to
5177 * restart the HBA. The caller is not required to hold any lock.
5178 * This function writes MBX_RESTART mailbox command to the SLIM and
5179 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5180 * function to free any pending commands. The function enables
5181 * POST only during the first initialization. The function returns zero.
5182 * The function does not guarantee completion of MBX_RESTART mailbox
5183 * command before the return of this function.
5184 **/
5185 static int
lpfc_sli_brdrestart_s3(struct lpfc_hba * phba)5186 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5187 {
5188 volatile struct MAILBOX_word0 mb;
5189 struct lpfc_sli *psli;
5190 void __iomem *to_slim;
5191 uint32_t hba_aer_enabled;
5192
5193 spin_lock_irq(&phba->hbalock);
5194
5195 /* Take PCIe device Advanced Error Reporting (AER) state */
5196 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5197
5198 psli = &phba->sli;
5199
5200 /* Restart HBA */
5201 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5202 "0337 Restart HBA Data: x%x x%x\n",
5203 (phba->pport) ? phba->pport->port_state : 0,
5204 psli->sli_flag);
5205
5206 mb.word0 = 0;
5207 mb.mbxCommand = MBX_RESTART;
5208 mb.mbxHc = 1;
5209
5210 lpfc_reset_barrier(phba);
5211
5212 to_slim = phba->MBslimaddr;
5213 writel(mb.word0, to_slim);
5214 readl(to_slim); /* flush */
5215
5216 /* Only skip post after fc_ffinit is completed */
5217 if (phba->pport && phba->pport->port_state)
5218 mb.word0 = 1; /* This is really setting up word1 */
5219 else
5220 mb.word0 = 0; /* This is really setting up word1 */
5221 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5222 writel(mb.word0, to_slim);
5223 readl(to_slim); /* flush */
5224
5225 lpfc_sli_brdreset(phba);
5226 if (phba->pport)
5227 phba->pport->stopped = 0;
5228 phba->link_state = LPFC_INIT_START;
5229 phba->hba_flag = 0;
5230 spin_unlock_irq(&phba->hbalock);
5231
5232 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5233 psli->stats_start = ktime_get_seconds();
5234
5235 /* Give the INITFF and Post time to settle. */
5236 mdelay(100);
5237
5238 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5239 if (hba_aer_enabled)
5240 pci_disable_pcie_error_reporting(phba->pcidev);
5241
5242 lpfc_hba_down_post(phba);
5243
5244 return 0;
5245 }
5246
5247 /**
5248 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5249 * @phba: Pointer to HBA context object.
5250 *
5251 * This function is called in the SLI initialization code path to restart
5252 * a SLI4 HBA. The caller is not required to hold any lock.
5253 * At the end of the function, it calls lpfc_hba_down_post function to
5254 * free any pending commands.
5255 **/
5256 static int
lpfc_sli_brdrestart_s4(struct lpfc_hba * phba)5257 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5258 {
5259 struct lpfc_sli *psli = &phba->sli;
5260 uint32_t hba_aer_enabled;
5261 int rc;
5262
5263 /* Restart HBA */
5264 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5265 "0296 Restart HBA Data: x%x x%x\n",
5266 phba->pport->port_state, psli->sli_flag);
5267
5268 /* Take PCIe device Advanced Error Reporting (AER) state */
5269 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5270
5271 rc = lpfc_sli4_brdreset(phba);
5272 if (rc) {
5273 phba->link_state = LPFC_HBA_ERROR;
5274 goto hba_down_queue;
5275 }
5276
5277 spin_lock_irq(&phba->hbalock);
5278 phba->pport->stopped = 0;
5279 phba->link_state = LPFC_INIT_START;
5280 phba->hba_flag = 0;
5281 /* Preserve FA-PWWN expectation */
5282 phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5283 spin_unlock_irq(&phba->hbalock);
5284
5285 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5286 psli->stats_start = ktime_get_seconds();
5287
5288 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5289 if (hba_aer_enabled)
5290 pci_disable_pcie_error_reporting(phba->pcidev);
5291
5292 hba_down_queue:
5293 lpfc_hba_down_post(phba);
5294 lpfc_sli4_queue_destroy(phba);
5295
5296 return rc;
5297 }
5298
5299 /**
5300 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5301 * @phba: Pointer to HBA context object.
5302 *
5303 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5304 * API jump table function pointer from the lpfc_hba struct.
5305 **/
5306 int
lpfc_sli_brdrestart(struct lpfc_hba * phba)5307 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5308 {
5309 return phba->lpfc_sli_brdrestart(phba);
5310 }
5311
5312 /**
5313 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5314 * @phba: Pointer to HBA context object.
5315 *
5316 * This function is called after a HBA restart to wait for successful
5317 * restart of the HBA. Successful restart of the HBA is indicated by
5318 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5319 * iteration, the function will restart the HBA again. The function returns
5320 * zero if HBA successfully restarted else returns negative error code.
5321 **/
5322 int
lpfc_sli_chipset_init(struct lpfc_hba * phba)5323 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5324 {
5325 uint32_t status, i = 0;
5326
5327 /* Read the HBA Host Status Register */
5328 if (lpfc_readl(phba->HSregaddr, &status))
5329 return -EIO;
5330
5331 /* Check status register to see what current state is */
5332 i = 0;
5333 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5334
5335 /* Check every 10ms for 10 retries, then every 100ms for 90
5336 * retries, then every 1 sec for 50 retires for a total of
5337 * ~60 seconds before reset the board again and check every
5338 * 1 sec for 50 retries. The up to 60 seconds before the
5339 * board ready is required by the Falcon FIPS zeroization
5340 * complete, and any reset the board in between shall cause
5341 * restart of zeroization, further delay the board ready.
5342 */
5343 if (i++ >= 200) {
5344 /* Adapter failed to init, timeout, status reg
5345 <status> */
5346 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5347 "0436 Adapter failed to init, "
5348 "timeout, status reg x%x, "
5349 "FW Data: A8 x%x AC x%x\n", status,
5350 readl(phba->MBslimaddr + 0xa8),
5351 readl(phba->MBslimaddr + 0xac));
5352 phba->link_state = LPFC_HBA_ERROR;
5353 return -ETIMEDOUT;
5354 }
5355
5356 /* Check to see if any errors occurred during init */
5357 if (status & HS_FFERM) {
5358 /* ERROR: During chipset initialization */
5359 /* Adapter failed to init, chipset, status reg
5360 <status> */
5361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5362 "0437 Adapter failed to init, "
5363 "chipset, status reg x%x, "
5364 "FW Data: A8 x%x AC x%x\n", status,
5365 readl(phba->MBslimaddr + 0xa8),
5366 readl(phba->MBslimaddr + 0xac));
5367 phba->link_state = LPFC_HBA_ERROR;
5368 return -EIO;
5369 }
5370
5371 if (i <= 10)
5372 msleep(10);
5373 else if (i <= 100)
5374 msleep(100);
5375 else
5376 msleep(1000);
5377
5378 if (i == 150) {
5379 /* Do post */
5380 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5381 lpfc_sli_brdrestart(phba);
5382 }
5383 /* Read the HBA Host Status Register */
5384 if (lpfc_readl(phba->HSregaddr, &status))
5385 return -EIO;
5386 }
5387
5388 /* Check to see if any errors occurred during init */
5389 if (status & HS_FFERM) {
5390 /* ERROR: During chipset initialization */
5391 /* Adapter failed to init, chipset, status reg <status> */
5392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5393 "0438 Adapter failed to init, chipset, "
5394 "status reg x%x, "
5395 "FW Data: A8 x%x AC x%x\n", status,
5396 readl(phba->MBslimaddr + 0xa8),
5397 readl(phba->MBslimaddr + 0xac));
5398 phba->link_state = LPFC_HBA_ERROR;
5399 return -EIO;
5400 }
5401
5402 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5403
5404 /* Clear all interrupt enable conditions */
5405 writel(0, phba->HCregaddr);
5406 readl(phba->HCregaddr); /* flush */
5407
5408 /* setup host attn register */
5409 writel(0xffffffff, phba->HAregaddr);
5410 readl(phba->HAregaddr); /* flush */
5411 return 0;
5412 }
5413
5414 /**
5415 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5416 *
5417 * This function calculates and returns the number of HBQs required to be
5418 * configured.
5419 **/
5420 int
lpfc_sli_hbq_count(void)5421 lpfc_sli_hbq_count(void)
5422 {
5423 return ARRAY_SIZE(lpfc_hbq_defs);
5424 }
5425
5426 /**
5427 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5428 *
5429 * This function adds the number of hbq entries in every HBQ to get
5430 * the total number of hbq entries required for the HBA and returns
5431 * the total count.
5432 **/
5433 static int
lpfc_sli_hbq_entry_count(void)5434 lpfc_sli_hbq_entry_count(void)
5435 {
5436 int hbq_count = lpfc_sli_hbq_count();
5437 int count = 0;
5438 int i;
5439
5440 for (i = 0; i < hbq_count; ++i)
5441 count += lpfc_hbq_defs[i]->entry_count;
5442 return count;
5443 }
5444
5445 /**
5446 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5447 *
5448 * This function calculates amount of memory required for all hbq entries
5449 * to be configured and returns the total memory required.
5450 **/
5451 int
lpfc_sli_hbq_size(void)5452 lpfc_sli_hbq_size(void)
5453 {
5454 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5455 }
5456
5457 /**
5458 * lpfc_sli_hbq_setup - configure and initialize HBQs
5459 * @phba: Pointer to HBA context object.
5460 *
5461 * This function is called during the SLI initialization to configure
5462 * all the HBQs and post buffers to the HBQ. The caller is not
5463 * required to hold any locks. This function will return zero if successful
5464 * else it will return negative error code.
5465 **/
5466 static int
lpfc_sli_hbq_setup(struct lpfc_hba * phba)5467 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5468 {
5469 int hbq_count = lpfc_sli_hbq_count();
5470 LPFC_MBOXQ_t *pmb;
5471 MAILBOX_t *pmbox;
5472 uint32_t hbqno;
5473 uint32_t hbq_entry_index;
5474
5475 /* Get a Mailbox buffer to setup mailbox
5476 * commands for HBA initialization
5477 */
5478 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5479
5480 if (!pmb)
5481 return -ENOMEM;
5482
5483 pmbox = &pmb->u.mb;
5484
5485 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5486 phba->link_state = LPFC_INIT_MBX_CMDS;
5487 phba->hbq_in_use = 1;
5488
5489 hbq_entry_index = 0;
5490 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5491 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5492 phba->hbqs[hbqno].hbqPutIdx = 0;
5493 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5494 phba->hbqs[hbqno].entry_count =
5495 lpfc_hbq_defs[hbqno]->entry_count;
5496 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5497 hbq_entry_index, pmb);
5498 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5499
5500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5501 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5502 mbxStatus <status>, ring <num> */
5503
5504 lpfc_printf_log(phba, KERN_ERR,
5505 LOG_SLI | LOG_VPORT,
5506 "1805 Adapter failed to init. "
5507 "Data: x%x x%x x%x\n",
5508 pmbox->mbxCommand,
5509 pmbox->mbxStatus, hbqno);
5510
5511 phba->link_state = LPFC_HBA_ERROR;
5512 mempool_free(pmb, phba->mbox_mem_pool);
5513 return -ENXIO;
5514 }
5515 }
5516 phba->hbq_count = hbq_count;
5517
5518 mempool_free(pmb, phba->mbox_mem_pool);
5519
5520 /* Initially populate or replenish the HBQs */
5521 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5522 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5523 return 0;
5524 }
5525
5526 /**
5527 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5528 * @phba: Pointer to HBA context object.
5529 *
5530 * This function is called during the SLI initialization to configure
5531 * all the HBQs and post buffers to the HBQ. The caller is not
5532 * required to hold any locks. This function will return zero if successful
5533 * else it will return negative error code.
5534 **/
5535 static int
lpfc_sli4_rb_setup(struct lpfc_hba * phba)5536 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5537 {
5538 phba->hbq_in_use = 1;
5539 /**
5540 * Specific case when the MDS diagnostics is enabled and supported.
5541 * The receive buffer count is truncated to manage the incoming
5542 * traffic.
5543 **/
5544 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5545 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5546 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5547 else
5548 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5549 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5550 phba->hbq_count = 1;
5551 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5552 /* Initially populate or replenish the HBQs */
5553 return 0;
5554 }
5555
5556 /**
5557 * lpfc_sli_config_port - Issue config port mailbox command
5558 * @phba: Pointer to HBA context object.
5559 * @sli_mode: sli mode - 2/3
5560 *
5561 * This function is called by the sli initialization code path
5562 * to issue config_port mailbox command. This function restarts the
5563 * HBA firmware and issues a config_port mailbox command to configure
5564 * the SLI interface in the sli mode specified by sli_mode
5565 * variable. The caller is not required to hold any locks.
5566 * The function returns 0 if successful, else returns negative error
5567 * code.
5568 **/
5569 int
lpfc_sli_config_port(struct lpfc_hba * phba,int sli_mode)5570 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5571 {
5572 LPFC_MBOXQ_t *pmb;
5573 uint32_t resetcount = 0, rc = 0, done = 0;
5574
5575 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5576 if (!pmb) {
5577 phba->link_state = LPFC_HBA_ERROR;
5578 return -ENOMEM;
5579 }
5580
5581 phba->sli_rev = sli_mode;
5582 while (resetcount < 2 && !done) {
5583 spin_lock_irq(&phba->hbalock);
5584 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5585 spin_unlock_irq(&phba->hbalock);
5586 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5587 lpfc_sli_brdrestart(phba);
5588 rc = lpfc_sli_chipset_init(phba);
5589 if (rc)
5590 break;
5591
5592 spin_lock_irq(&phba->hbalock);
5593 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5594 spin_unlock_irq(&phba->hbalock);
5595 resetcount++;
5596
5597 /* Call pre CONFIG_PORT mailbox command initialization. A
5598 * value of 0 means the call was successful. Any other
5599 * nonzero value is a failure, but if ERESTART is returned,
5600 * the driver may reset the HBA and try again.
5601 */
5602 rc = lpfc_config_port_prep(phba);
5603 if (rc == -ERESTART) {
5604 phba->link_state = LPFC_LINK_UNKNOWN;
5605 continue;
5606 } else if (rc)
5607 break;
5608
5609 phba->link_state = LPFC_INIT_MBX_CMDS;
5610 lpfc_config_port(phba, pmb);
5611 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5612 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5613 LPFC_SLI3_HBQ_ENABLED |
5614 LPFC_SLI3_CRP_ENABLED |
5615 LPFC_SLI3_DSS_ENABLED);
5616 if (rc != MBX_SUCCESS) {
5617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5618 "0442 Adapter failed to init, mbxCmd x%x "
5619 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5620 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5621 spin_lock_irq(&phba->hbalock);
5622 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5623 spin_unlock_irq(&phba->hbalock);
5624 rc = -ENXIO;
5625 } else {
5626 /* Allow asynchronous mailbox command to go through */
5627 spin_lock_irq(&phba->hbalock);
5628 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5629 spin_unlock_irq(&phba->hbalock);
5630 done = 1;
5631
5632 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5633 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5634 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5635 "3110 Port did not grant ASABT\n");
5636 }
5637 }
5638 if (!done) {
5639 rc = -EINVAL;
5640 goto do_prep_failed;
5641 }
5642 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5643 if (!pmb->u.mb.un.varCfgPort.cMA) {
5644 rc = -ENXIO;
5645 goto do_prep_failed;
5646 }
5647 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5648 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5649 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5650 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5651 phba->max_vpi : phba->max_vports;
5652
5653 } else
5654 phba->max_vpi = 0;
5655 if (pmb->u.mb.un.varCfgPort.gerbm)
5656 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5657 if (pmb->u.mb.un.varCfgPort.gcrp)
5658 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5659
5660 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5661 phba->port_gp = phba->mbox->us.s3_pgp.port;
5662
5663 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5664 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5665 phba->cfg_enable_bg = 0;
5666 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5667 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5668 "0443 Adapter did not grant "
5669 "BlockGuard\n");
5670 }
5671 }
5672 } else {
5673 phba->hbq_get = NULL;
5674 phba->port_gp = phba->mbox->us.s2.port;
5675 phba->max_vpi = 0;
5676 }
5677 do_prep_failed:
5678 mempool_free(pmb, phba->mbox_mem_pool);
5679 return rc;
5680 }
5681
5682
5683 /**
5684 * lpfc_sli_hba_setup - SLI initialization function
5685 * @phba: Pointer to HBA context object.
5686 *
5687 * This function is the main SLI initialization function. This function
5688 * is called by the HBA initialization code, HBA reset code and HBA
5689 * error attention handler code. Caller is not required to hold any
5690 * locks. This function issues config_port mailbox command to configure
5691 * the SLI, setup iocb rings and HBQ rings. In the end the function
5692 * calls the config_port_post function to issue init_link mailbox
5693 * command and to start the discovery. The function will return zero
5694 * if successful, else it will return negative error code.
5695 **/
5696 int
lpfc_sli_hba_setup(struct lpfc_hba * phba)5697 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5698 {
5699 uint32_t rc;
5700 int i;
5701 int longs;
5702
5703 /* Enable ISR already does config_port because of config_msi mbx */
5704 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5705 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5706 if (rc)
5707 return -EIO;
5708 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5709 }
5710 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5711
5712 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5713 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5714 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5715 if (!rc) {
5716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5717 "2709 This device supports "
5718 "Advanced Error Reporting (AER)\n");
5719 spin_lock_irq(&phba->hbalock);
5720 phba->hba_flag |= HBA_AER_ENABLED;
5721 spin_unlock_irq(&phba->hbalock);
5722 } else {
5723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5724 "2708 This device does not support "
5725 "Advanced Error Reporting (AER): %d\n",
5726 rc);
5727 phba->cfg_aer_support = 0;
5728 }
5729 }
5730
5731 if (phba->sli_rev == 3) {
5732 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5733 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5734 } else {
5735 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5736 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5737 phba->sli3_options = 0;
5738 }
5739
5740 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5741 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5742 phba->sli_rev, phba->max_vpi);
5743 rc = lpfc_sli_ring_map(phba);
5744
5745 if (rc)
5746 goto lpfc_sli_hba_setup_error;
5747
5748 /* Initialize VPIs. */
5749 if (phba->sli_rev == LPFC_SLI_REV3) {
5750 /*
5751 * The VPI bitmask and physical ID array are allocated
5752 * and initialized once only - at driver load. A port
5753 * reset doesn't need to reinitialize this memory.
5754 */
5755 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5756 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5757 phba->vpi_bmask = kcalloc(longs,
5758 sizeof(unsigned long),
5759 GFP_KERNEL);
5760 if (!phba->vpi_bmask) {
5761 rc = -ENOMEM;
5762 goto lpfc_sli_hba_setup_error;
5763 }
5764
5765 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5766 sizeof(uint16_t),
5767 GFP_KERNEL);
5768 if (!phba->vpi_ids) {
5769 kfree(phba->vpi_bmask);
5770 rc = -ENOMEM;
5771 goto lpfc_sli_hba_setup_error;
5772 }
5773 for (i = 0; i < phba->max_vpi; i++)
5774 phba->vpi_ids[i] = i;
5775 }
5776 }
5777
5778 /* Init HBQs */
5779 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5780 rc = lpfc_sli_hbq_setup(phba);
5781 if (rc)
5782 goto lpfc_sli_hba_setup_error;
5783 }
5784 spin_lock_irq(&phba->hbalock);
5785 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5786 spin_unlock_irq(&phba->hbalock);
5787
5788 rc = lpfc_config_port_post(phba);
5789 if (rc)
5790 goto lpfc_sli_hba_setup_error;
5791
5792 return rc;
5793
5794 lpfc_sli_hba_setup_error:
5795 phba->link_state = LPFC_HBA_ERROR;
5796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5797 "0445 Firmware initialization failed\n");
5798 return rc;
5799 }
5800
5801 /**
5802 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5803 * @phba: Pointer to HBA context object.
5804 *
5805 * This function issue a dump mailbox command to read config region
5806 * 23 and parse the records in the region and populate driver
5807 * data structure.
5808 **/
5809 static int
lpfc_sli4_read_fcoe_params(struct lpfc_hba * phba)5810 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5811 {
5812 LPFC_MBOXQ_t *mboxq;
5813 struct lpfc_dmabuf *mp;
5814 struct lpfc_mqe *mqe;
5815 uint32_t data_length;
5816 int rc;
5817
5818 /* Program the default value of vlan_id and fc_map */
5819 phba->valid_vlan = 0;
5820 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5821 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5822 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5823
5824 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5825 if (!mboxq)
5826 return -ENOMEM;
5827
5828 mqe = &mboxq->u.mqe;
5829 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5830 rc = -ENOMEM;
5831 goto out_free_mboxq;
5832 }
5833
5834 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5835 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5836
5837 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5838 "(%d):2571 Mailbox cmd x%x Status x%x "
5839 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5840 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5841 "CQ: x%x x%x x%x x%x\n",
5842 mboxq->vport ? mboxq->vport->vpi : 0,
5843 bf_get(lpfc_mqe_command, mqe),
5844 bf_get(lpfc_mqe_status, mqe),
5845 mqe->un.mb_words[0], mqe->un.mb_words[1],
5846 mqe->un.mb_words[2], mqe->un.mb_words[3],
5847 mqe->un.mb_words[4], mqe->un.mb_words[5],
5848 mqe->un.mb_words[6], mqe->un.mb_words[7],
5849 mqe->un.mb_words[8], mqe->un.mb_words[9],
5850 mqe->un.mb_words[10], mqe->un.mb_words[11],
5851 mqe->un.mb_words[12], mqe->un.mb_words[13],
5852 mqe->un.mb_words[14], mqe->un.mb_words[15],
5853 mqe->un.mb_words[16], mqe->un.mb_words[50],
5854 mboxq->mcqe.word0,
5855 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5856 mboxq->mcqe.trailer);
5857
5858 if (rc) {
5859 rc = -EIO;
5860 goto out_free_mboxq;
5861 }
5862 data_length = mqe->un.mb_words[5];
5863 if (data_length > DMP_RGN23_SIZE) {
5864 rc = -EIO;
5865 goto out_free_mboxq;
5866 }
5867
5868 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5869 rc = 0;
5870
5871 out_free_mboxq:
5872 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5873 return rc;
5874 }
5875
5876 /**
5877 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5878 * @phba: pointer to lpfc hba data structure.
5879 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5880 * @vpd: pointer to the memory to hold resulting port vpd data.
5881 * @vpd_size: On input, the number of bytes allocated to @vpd.
5882 * On output, the number of data bytes in @vpd.
5883 *
5884 * This routine executes a READ_REV SLI4 mailbox command. In
5885 * addition, this routine gets the port vpd data.
5886 *
5887 * Return codes
5888 * 0 - successful
5889 * -ENOMEM - could not allocated memory.
5890 **/
5891 static int
lpfc_sli4_read_rev(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint8_t * vpd,uint32_t * vpd_size)5892 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5893 uint8_t *vpd, uint32_t *vpd_size)
5894 {
5895 int rc = 0;
5896 uint32_t dma_size;
5897 struct lpfc_dmabuf *dmabuf;
5898 struct lpfc_mqe *mqe;
5899
5900 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5901 if (!dmabuf)
5902 return -ENOMEM;
5903
5904 /*
5905 * Get a DMA buffer for the vpd data resulting from the READ_REV
5906 * mailbox command.
5907 */
5908 dma_size = *vpd_size;
5909 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5910 &dmabuf->phys, GFP_KERNEL);
5911 if (!dmabuf->virt) {
5912 kfree(dmabuf);
5913 return -ENOMEM;
5914 }
5915
5916 /*
5917 * The SLI4 implementation of READ_REV conflicts at word1,
5918 * bits 31:16 and SLI4 adds vpd functionality not present
5919 * in SLI3. This code corrects the conflicts.
5920 */
5921 lpfc_read_rev(phba, mboxq);
5922 mqe = &mboxq->u.mqe;
5923 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5924 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5925 mqe->un.read_rev.word1 &= 0x0000FFFF;
5926 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5927 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5928
5929 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5930 if (rc) {
5931 dma_free_coherent(&phba->pcidev->dev, dma_size,
5932 dmabuf->virt, dmabuf->phys);
5933 kfree(dmabuf);
5934 return -EIO;
5935 }
5936
5937 /*
5938 * The available vpd length cannot be bigger than the
5939 * DMA buffer passed to the port. Catch the less than
5940 * case and update the caller's size.
5941 */
5942 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5943 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5944
5945 memcpy(vpd, dmabuf->virt, *vpd_size);
5946
5947 dma_free_coherent(&phba->pcidev->dev, dma_size,
5948 dmabuf->virt, dmabuf->phys);
5949 kfree(dmabuf);
5950 return 0;
5951 }
5952
5953 /**
5954 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5955 * @phba: pointer to lpfc hba data structure.
5956 *
5957 * This routine retrieves SLI4 device physical port name this PCI function
5958 * is attached to.
5959 *
5960 * Return codes
5961 * 0 - successful
5962 * otherwise - failed to retrieve controller attributes
5963 **/
5964 static int
lpfc_sli4_get_ctl_attr(struct lpfc_hba * phba)5965 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5966 {
5967 LPFC_MBOXQ_t *mboxq;
5968 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5969 struct lpfc_controller_attribute *cntl_attr;
5970 void *virtaddr = NULL;
5971 uint32_t alloclen, reqlen;
5972 uint32_t shdr_status, shdr_add_status;
5973 union lpfc_sli4_cfg_shdr *shdr;
5974 int rc;
5975
5976 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5977 if (!mboxq)
5978 return -ENOMEM;
5979
5980 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5981 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5982 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5983 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5984 LPFC_SLI4_MBX_NEMBED);
5985
5986 if (alloclen < reqlen) {
5987 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5988 "3084 Allocated DMA memory size (%d) is "
5989 "less than the requested DMA memory size "
5990 "(%d)\n", alloclen, reqlen);
5991 rc = -ENOMEM;
5992 goto out_free_mboxq;
5993 }
5994 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5995 virtaddr = mboxq->sge_array->addr[0];
5996 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5997 shdr = &mbx_cntl_attr->cfg_shdr;
5998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6000 if (shdr_status || shdr_add_status || rc) {
6001 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6002 "3085 Mailbox x%x (x%x/x%x) failed, "
6003 "rc:x%x, status:x%x, add_status:x%x\n",
6004 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6005 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6006 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6007 rc, shdr_status, shdr_add_status);
6008 rc = -ENXIO;
6009 goto out_free_mboxq;
6010 }
6011
6012 cntl_attr = &mbx_cntl_attr->cntl_attr;
6013 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6014 phba->sli4_hba.lnk_info.lnk_tp =
6015 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6016 phba->sli4_hba.lnk_info.lnk_no =
6017 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6018 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6019 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6020
6021 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6022 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6023 sizeof(phba->BIOSVersion));
6024
6025 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6026 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6027 "flash_id: x%02x, asic_rev: x%02x\n",
6028 phba->sli4_hba.lnk_info.lnk_tp,
6029 phba->sli4_hba.lnk_info.lnk_no,
6030 phba->BIOSVersion, phba->sli4_hba.flash_id,
6031 phba->sli4_hba.asic_rev);
6032 out_free_mboxq:
6033 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6034 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6035 else
6036 mempool_free(mboxq, phba->mbox_mem_pool);
6037 return rc;
6038 }
6039
6040 /**
6041 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6042 * @phba: pointer to lpfc hba data structure.
6043 *
6044 * This routine retrieves SLI4 device physical port name this PCI function
6045 * is attached to.
6046 *
6047 * Return codes
6048 * 0 - successful
6049 * otherwise - failed to retrieve physical port name
6050 **/
6051 static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba * phba)6052 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6053 {
6054 LPFC_MBOXQ_t *mboxq;
6055 struct lpfc_mbx_get_port_name *get_port_name;
6056 uint32_t shdr_status, shdr_add_status;
6057 union lpfc_sli4_cfg_shdr *shdr;
6058 char cport_name = 0;
6059 int rc;
6060
6061 /* We assume nothing at this point */
6062 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6063 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6064
6065 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6066 if (!mboxq)
6067 return -ENOMEM;
6068 /* obtain link type and link number via READ_CONFIG */
6069 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6070 lpfc_sli4_read_config(phba);
6071
6072 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6073 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6074
6075 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6076 goto retrieve_ppname;
6077
6078 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6079 rc = lpfc_sli4_get_ctl_attr(phba);
6080 if (rc)
6081 goto out_free_mboxq;
6082
6083 retrieve_ppname:
6084 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6085 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6086 sizeof(struct lpfc_mbx_get_port_name) -
6087 sizeof(struct lpfc_sli4_cfg_mhdr),
6088 LPFC_SLI4_MBX_EMBED);
6089 get_port_name = &mboxq->u.mqe.un.get_port_name;
6090 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6091 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6092 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6093 phba->sli4_hba.lnk_info.lnk_tp);
6094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6095 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6096 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6097 if (shdr_status || shdr_add_status || rc) {
6098 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6099 "3087 Mailbox x%x (x%x/x%x) failed: "
6100 "rc:x%x, status:x%x, add_status:x%x\n",
6101 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6102 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6103 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6104 rc, shdr_status, shdr_add_status);
6105 rc = -ENXIO;
6106 goto out_free_mboxq;
6107 }
6108 switch (phba->sli4_hba.lnk_info.lnk_no) {
6109 case LPFC_LINK_NUMBER_0:
6110 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6111 &get_port_name->u.response);
6112 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6113 break;
6114 case LPFC_LINK_NUMBER_1:
6115 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6116 &get_port_name->u.response);
6117 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6118 break;
6119 case LPFC_LINK_NUMBER_2:
6120 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6121 &get_port_name->u.response);
6122 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6123 break;
6124 case LPFC_LINK_NUMBER_3:
6125 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6126 &get_port_name->u.response);
6127 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6128 break;
6129 default:
6130 break;
6131 }
6132
6133 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6134 phba->Port[0] = cport_name;
6135 phba->Port[1] = '\0';
6136 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6137 "3091 SLI get port name: %s\n", phba->Port);
6138 }
6139
6140 out_free_mboxq:
6141 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6142 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6143 else
6144 mempool_free(mboxq, phba->mbox_mem_pool);
6145 return rc;
6146 }
6147
6148 /**
6149 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6150 * @phba: pointer to lpfc hba data structure.
6151 *
6152 * This routine is called to explicitly arm the SLI4 device's completion and
6153 * event queues
6154 **/
6155 static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba * phba)6156 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6157 {
6158 int qidx;
6159 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6160 struct lpfc_sli4_hdw_queue *qp;
6161 struct lpfc_queue *eq;
6162
6163 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6164 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6165 if (sli4_hba->nvmels_cq)
6166 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6167 LPFC_QUEUE_REARM);
6168
6169 if (sli4_hba->hdwq) {
6170 /* Loop thru all Hardware Queues */
6171 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6172 qp = &sli4_hba->hdwq[qidx];
6173 /* ARM the corresponding CQ */
6174 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6175 LPFC_QUEUE_REARM);
6176 }
6177
6178 /* Loop thru all IRQ vectors */
6179 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6180 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6181 /* ARM the corresponding EQ */
6182 sli4_hba->sli4_write_eq_db(phba, eq,
6183 0, LPFC_QUEUE_REARM);
6184 }
6185 }
6186
6187 if (phba->nvmet_support) {
6188 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6189 sli4_hba->sli4_write_cq_db(phba,
6190 sli4_hba->nvmet_cqset[qidx], 0,
6191 LPFC_QUEUE_REARM);
6192 }
6193 }
6194 }
6195
6196 /**
6197 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6198 * @phba: Pointer to HBA context object.
6199 * @type: The resource extent type.
6200 * @extnt_count: buffer to hold port available extent count.
6201 * @extnt_size: buffer to hold element count per extent.
6202 *
6203 * This function calls the port and retrievs the number of available
6204 * extents and their size for a particular extent type.
6205 *
6206 * Returns: 0 if successful. Nonzero otherwise.
6207 **/
6208 int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_count,uint16_t * extnt_size)6209 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6210 uint16_t *extnt_count, uint16_t *extnt_size)
6211 {
6212 int rc = 0;
6213 uint32_t length;
6214 uint32_t mbox_tmo;
6215 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6216 LPFC_MBOXQ_t *mbox;
6217
6218 *extnt_count = 0;
6219 *extnt_size = 0;
6220
6221 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6222 if (!mbox)
6223 return -ENOMEM;
6224
6225 /* Find out how many extents are available for this resource type */
6226 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6227 sizeof(struct lpfc_sli4_cfg_mhdr));
6228 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6229 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6230 length, LPFC_SLI4_MBX_EMBED);
6231
6232 /* Send an extents count of 0 - the GET doesn't use it. */
6233 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6234 LPFC_SLI4_MBX_EMBED);
6235 if (unlikely(rc)) {
6236 rc = -EIO;
6237 goto err_exit;
6238 }
6239
6240 if (!phba->sli4_hba.intr_enable)
6241 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6242 else {
6243 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6244 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6245 }
6246 if (unlikely(rc)) {
6247 rc = -EIO;
6248 goto err_exit;
6249 }
6250
6251 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6252 if (bf_get(lpfc_mbox_hdr_status,
6253 &rsrc_info->header.cfg_shdr.response)) {
6254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6255 "2930 Failed to get resource extents "
6256 "Status 0x%x Add'l Status 0x%x\n",
6257 bf_get(lpfc_mbox_hdr_status,
6258 &rsrc_info->header.cfg_shdr.response),
6259 bf_get(lpfc_mbox_hdr_add_status,
6260 &rsrc_info->header.cfg_shdr.response));
6261 rc = -EIO;
6262 goto err_exit;
6263 }
6264
6265 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6266 &rsrc_info->u.rsp);
6267 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6268 &rsrc_info->u.rsp);
6269
6270 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6271 "3162 Retrieved extents type-%d from port: count:%d, "
6272 "size:%d\n", type, *extnt_count, *extnt_size);
6273
6274 err_exit:
6275 mempool_free(mbox, phba->mbox_mem_pool);
6276 return rc;
6277 }
6278
6279 /**
6280 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6281 * @phba: Pointer to HBA context object.
6282 * @type: The extent type to check.
6283 *
6284 * This function reads the current available extents from the port and checks
6285 * if the extent count or extent size has changed since the last access.
6286 * Callers use this routine post port reset to understand if there is a
6287 * extent reprovisioning requirement.
6288 *
6289 * Returns:
6290 * -Error: error indicates problem.
6291 * 1: Extent count or size has changed.
6292 * 0: No changes.
6293 **/
6294 static int
lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type)6295 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6296 {
6297 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6298 uint16_t size_diff, rsrc_ext_size;
6299 int rc = 0;
6300 struct lpfc_rsrc_blks *rsrc_entry;
6301 struct list_head *rsrc_blk_list = NULL;
6302
6303 size_diff = 0;
6304 curr_ext_cnt = 0;
6305 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6306 &rsrc_ext_cnt,
6307 &rsrc_ext_size);
6308 if (unlikely(rc))
6309 return -EIO;
6310
6311 switch (type) {
6312 case LPFC_RSC_TYPE_FCOE_RPI:
6313 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6314 break;
6315 case LPFC_RSC_TYPE_FCOE_VPI:
6316 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6317 break;
6318 case LPFC_RSC_TYPE_FCOE_XRI:
6319 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6320 break;
6321 case LPFC_RSC_TYPE_FCOE_VFI:
6322 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6323 break;
6324 default:
6325 break;
6326 }
6327
6328 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6329 curr_ext_cnt++;
6330 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6331 size_diff++;
6332 }
6333
6334 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6335 rc = 1;
6336
6337 return rc;
6338 }
6339
6340 /**
6341 * lpfc_sli4_cfg_post_extnts -
6342 * @phba: Pointer to HBA context object.
6343 * @extnt_cnt: number of available extents.
6344 * @type: the extent type (rpi, xri, vfi, vpi).
6345 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6346 * @mbox: pointer to the caller's allocated mailbox structure.
6347 *
6348 * This function executes the extents allocation request. It also
6349 * takes care of the amount of memory needed to allocate or get the
6350 * allocated extents. It is the caller's responsibility to evaluate
6351 * the response.
6352 *
6353 * Returns:
6354 * -Error: Error value describes the condition found.
6355 * 0: if successful
6356 **/
6357 static int
lpfc_sli4_cfg_post_extnts(struct lpfc_hba * phba,uint16_t extnt_cnt,uint16_t type,bool * emb,LPFC_MBOXQ_t * mbox)6358 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6359 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6360 {
6361 int rc = 0;
6362 uint32_t req_len;
6363 uint32_t emb_len;
6364 uint32_t alloc_len, mbox_tmo;
6365
6366 /* Calculate the total requested length of the dma memory */
6367 req_len = extnt_cnt * sizeof(uint16_t);
6368
6369 /*
6370 * Calculate the size of an embedded mailbox. The uint32_t
6371 * accounts for extents-specific word.
6372 */
6373 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6374 sizeof(uint32_t);
6375
6376 /*
6377 * Presume the allocation and response will fit into an embedded
6378 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6379 */
6380 *emb = LPFC_SLI4_MBX_EMBED;
6381 if (req_len > emb_len) {
6382 req_len = extnt_cnt * sizeof(uint16_t) +
6383 sizeof(union lpfc_sli4_cfg_shdr) +
6384 sizeof(uint32_t);
6385 *emb = LPFC_SLI4_MBX_NEMBED;
6386 }
6387
6388 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6389 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6390 req_len, *emb);
6391 if (alloc_len < req_len) {
6392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6393 "2982 Allocated DMA memory size (x%x) is "
6394 "less than the requested DMA memory "
6395 "size (x%x)\n", alloc_len, req_len);
6396 return -ENOMEM;
6397 }
6398 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6399 if (unlikely(rc))
6400 return -EIO;
6401
6402 if (!phba->sli4_hba.intr_enable)
6403 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6404 else {
6405 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6406 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6407 }
6408
6409 if (unlikely(rc))
6410 rc = -EIO;
6411 return rc;
6412 }
6413
6414 /**
6415 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6416 * @phba: Pointer to HBA context object.
6417 * @type: The resource extent type to allocate.
6418 *
6419 * This function allocates the number of elements for the specified
6420 * resource type.
6421 **/
6422 static int
lpfc_sli4_alloc_extent(struct lpfc_hba * phba,uint16_t type)6423 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6424 {
6425 bool emb = false;
6426 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6427 uint16_t rsrc_id, rsrc_start, j, k;
6428 uint16_t *ids;
6429 int i, rc;
6430 unsigned long longs;
6431 unsigned long *bmask;
6432 struct lpfc_rsrc_blks *rsrc_blks;
6433 LPFC_MBOXQ_t *mbox;
6434 uint32_t length;
6435 struct lpfc_id_range *id_array = NULL;
6436 void *virtaddr = NULL;
6437 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6438 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6439 struct list_head *ext_blk_list;
6440
6441 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6442 &rsrc_cnt,
6443 &rsrc_size);
6444 if (unlikely(rc))
6445 return -EIO;
6446
6447 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6449 "3009 No available Resource Extents "
6450 "for resource type 0x%x: Count: 0x%x, "
6451 "Size 0x%x\n", type, rsrc_cnt,
6452 rsrc_size);
6453 return -ENOMEM;
6454 }
6455
6456 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6457 "2903 Post resource extents type-0x%x: "
6458 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6459
6460 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6461 if (!mbox)
6462 return -ENOMEM;
6463
6464 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6465 if (unlikely(rc)) {
6466 rc = -EIO;
6467 goto err_exit;
6468 }
6469
6470 /*
6471 * Figure out where the response is located. Then get local pointers
6472 * to the response data. The port does not guarantee to respond to
6473 * all extents counts request so update the local variable with the
6474 * allocated count from the port.
6475 */
6476 if (emb == LPFC_SLI4_MBX_EMBED) {
6477 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6478 id_array = &rsrc_ext->u.rsp.id[0];
6479 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6480 } else {
6481 virtaddr = mbox->sge_array->addr[0];
6482 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6483 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6484 id_array = &n_rsrc->id;
6485 }
6486
6487 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6488 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6489
6490 /*
6491 * Based on the resource size and count, correct the base and max
6492 * resource values.
6493 */
6494 length = sizeof(struct lpfc_rsrc_blks);
6495 switch (type) {
6496 case LPFC_RSC_TYPE_FCOE_RPI:
6497 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6498 sizeof(unsigned long),
6499 GFP_KERNEL);
6500 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6501 rc = -ENOMEM;
6502 goto err_exit;
6503 }
6504 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6505 sizeof(uint16_t),
6506 GFP_KERNEL);
6507 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6508 kfree(phba->sli4_hba.rpi_bmask);
6509 rc = -ENOMEM;
6510 goto err_exit;
6511 }
6512
6513 /*
6514 * The next_rpi was initialized with the maximum available
6515 * count but the port may allocate a smaller number. Catch
6516 * that case and update the next_rpi.
6517 */
6518 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6519
6520 /* Initialize local ptrs for common extent processing later. */
6521 bmask = phba->sli4_hba.rpi_bmask;
6522 ids = phba->sli4_hba.rpi_ids;
6523 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6524 break;
6525 case LPFC_RSC_TYPE_FCOE_VPI:
6526 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6527 GFP_KERNEL);
6528 if (unlikely(!phba->vpi_bmask)) {
6529 rc = -ENOMEM;
6530 goto err_exit;
6531 }
6532 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6533 GFP_KERNEL);
6534 if (unlikely(!phba->vpi_ids)) {
6535 kfree(phba->vpi_bmask);
6536 rc = -ENOMEM;
6537 goto err_exit;
6538 }
6539
6540 /* Initialize local ptrs for common extent processing later. */
6541 bmask = phba->vpi_bmask;
6542 ids = phba->vpi_ids;
6543 ext_blk_list = &phba->lpfc_vpi_blk_list;
6544 break;
6545 case LPFC_RSC_TYPE_FCOE_XRI:
6546 phba->sli4_hba.xri_bmask = kcalloc(longs,
6547 sizeof(unsigned long),
6548 GFP_KERNEL);
6549 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6550 rc = -ENOMEM;
6551 goto err_exit;
6552 }
6553 phba->sli4_hba.max_cfg_param.xri_used = 0;
6554 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6555 sizeof(uint16_t),
6556 GFP_KERNEL);
6557 if (unlikely(!phba->sli4_hba.xri_ids)) {
6558 kfree(phba->sli4_hba.xri_bmask);
6559 rc = -ENOMEM;
6560 goto err_exit;
6561 }
6562
6563 /* Initialize local ptrs for common extent processing later. */
6564 bmask = phba->sli4_hba.xri_bmask;
6565 ids = phba->sli4_hba.xri_ids;
6566 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6567 break;
6568 case LPFC_RSC_TYPE_FCOE_VFI:
6569 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6570 sizeof(unsigned long),
6571 GFP_KERNEL);
6572 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6573 rc = -ENOMEM;
6574 goto err_exit;
6575 }
6576 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6577 sizeof(uint16_t),
6578 GFP_KERNEL);
6579 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6580 kfree(phba->sli4_hba.vfi_bmask);
6581 rc = -ENOMEM;
6582 goto err_exit;
6583 }
6584
6585 /* Initialize local ptrs for common extent processing later. */
6586 bmask = phba->sli4_hba.vfi_bmask;
6587 ids = phba->sli4_hba.vfi_ids;
6588 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6589 break;
6590 default:
6591 /* Unsupported Opcode. Fail call. */
6592 id_array = NULL;
6593 bmask = NULL;
6594 ids = NULL;
6595 ext_blk_list = NULL;
6596 goto err_exit;
6597 }
6598
6599 /*
6600 * Complete initializing the extent configuration with the
6601 * allocated ids assigned to this function. The bitmask serves
6602 * as an index into the array and manages the available ids. The
6603 * array just stores the ids communicated to the port via the wqes.
6604 */
6605 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6606 if ((i % 2) == 0)
6607 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6608 &id_array[k]);
6609 else
6610 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6611 &id_array[k]);
6612
6613 rsrc_blks = kzalloc(length, GFP_KERNEL);
6614 if (unlikely(!rsrc_blks)) {
6615 rc = -ENOMEM;
6616 kfree(bmask);
6617 kfree(ids);
6618 goto err_exit;
6619 }
6620 rsrc_blks->rsrc_start = rsrc_id;
6621 rsrc_blks->rsrc_size = rsrc_size;
6622 list_add_tail(&rsrc_blks->list, ext_blk_list);
6623 rsrc_start = rsrc_id;
6624 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6625 phba->sli4_hba.io_xri_start = rsrc_start +
6626 lpfc_sli4_get_iocb_cnt(phba);
6627 }
6628
6629 while (rsrc_id < (rsrc_start + rsrc_size)) {
6630 ids[j] = rsrc_id;
6631 rsrc_id++;
6632 j++;
6633 }
6634 /* Entire word processed. Get next word.*/
6635 if ((i % 2) == 1)
6636 k++;
6637 }
6638 err_exit:
6639 lpfc_sli4_mbox_cmd_free(phba, mbox);
6640 return rc;
6641 }
6642
6643
6644
6645 /**
6646 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6647 * @phba: Pointer to HBA context object.
6648 * @type: the extent's type.
6649 *
6650 * This function deallocates all extents of a particular resource type.
6651 * SLI4 does not allow for deallocating a particular extent range. It
6652 * is the caller's responsibility to release all kernel memory resources.
6653 **/
6654 static int
lpfc_sli4_dealloc_extent(struct lpfc_hba * phba,uint16_t type)6655 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6656 {
6657 int rc;
6658 uint32_t length, mbox_tmo = 0;
6659 LPFC_MBOXQ_t *mbox;
6660 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6661 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6662
6663 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6664 if (!mbox)
6665 return -ENOMEM;
6666
6667 /*
6668 * This function sends an embedded mailbox because it only sends the
6669 * the resource type. All extents of this type are released by the
6670 * port.
6671 */
6672 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6673 sizeof(struct lpfc_sli4_cfg_mhdr));
6674 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6675 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6676 length, LPFC_SLI4_MBX_EMBED);
6677
6678 /* Send an extents count of 0 - the dealloc doesn't use it. */
6679 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6680 LPFC_SLI4_MBX_EMBED);
6681 if (unlikely(rc)) {
6682 rc = -EIO;
6683 goto out_free_mbox;
6684 }
6685 if (!phba->sli4_hba.intr_enable)
6686 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6687 else {
6688 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6689 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6690 }
6691 if (unlikely(rc)) {
6692 rc = -EIO;
6693 goto out_free_mbox;
6694 }
6695
6696 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6697 if (bf_get(lpfc_mbox_hdr_status,
6698 &dealloc_rsrc->header.cfg_shdr.response)) {
6699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6700 "2919 Failed to release resource extents "
6701 "for type %d - Status 0x%x Add'l Status 0x%x. "
6702 "Resource memory not released.\n",
6703 type,
6704 bf_get(lpfc_mbox_hdr_status,
6705 &dealloc_rsrc->header.cfg_shdr.response),
6706 bf_get(lpfc_mbox_hdr_add_status,
6707 &dealloc_rsrc->header.cfg_shdr.response));
6708 rc = -EIO;
6709 goto out_free_mbox;
6710 }
6711
6712 /* Release kernel memory resources for the specific type. */
6713 switch (type) {
6714 case LPFC_RSC_TYPE_FCOE_VPI:
6715 kfree(phba->vpi_bmask);
6716 kfree(phba->vpi_ids);
6717 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6718 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6719 &phba->lpfc_vpi_blk_list, list) {
6720 list_del_init(&rsrc_blk->list);
6721 kfree(rsrc_blk);
6722 }
6723 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6724 break;
6725 case LPFC_RSC_TYPE_FCOE_XRI:
6726 kfree(phba->sli4_hba.xri_bmask);
6727 kfree(phba->sli4_hba.xri_ids);
6728 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6729 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6730 list_del_init(&rsrc_blk->list);
6731 kfree(rsrc_blk);
6732 }
6733 break;
6734 case LPFC_RSC_TYPE_FCOE_VFI:
6735 kfree(phba->sli4_hba.vfi_bmask);
6736 kfree(phba->sli4_hba.vfi_ids);
6737 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6738 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6739 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6740 list_del_init(&rsrc_blk->list);
6741 kfree(rsrc_blk);
6742 }
6743 break;
6744 case LPFC_RSC_TYPE_FCOE_RPI:
6745 /* RPI bitmask and physical id array are cleaned up earlier. */
6746 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6747 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6748 list_del_init(&rsrc_blk->list);
6749 kfree(rsrc_blk);
6750 }
6751 break;
6752 default:
6753 break;
6754 }
6755
6756 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6757
6758 out_free_mbox:
6759 mempool_free(mbox, phba->mbox_mem_pool);
6760 return rc;
6761 }
6762
6763 static void
lpfc_set_features(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox,uint32_t feature)6764 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6765 uint32_t feature)
6766 {
6767 uint32_t len;
6768 u32 sig_freq = 0;
6769
6770 len = sizeof(struct lpfc_mbx_set_feature) -
6771 sizeof(struct lpfc_sli4_cfg_mhdr);
6772 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6773 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6774 LPFC_SLI4_MBX_EMBED);
6775
6776 switch (feature) {
6777 case LPFC_SET_UE_RECOVERY:
6778 bf_set(lpfc_mbx_set_feature_UER,
6779 &mbox->u.mqe.un.set_feature, 1);
6780 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6781 mbox->u.mqe.un.set_feature.param_len = 8;
6782 break;
6783 case LPFC_SET_MDS_DIAGS:
6784 bf_set(lpfc_mbx_set_feature_mds,
6785 &mbox->u.mqe.un.set_feature, 1);
6786 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6787 &mbox->u.mqe.un.set_feature, 1);
6788 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6789 mbox->u.mqe.un.set_feature.param_len = 8;
6790 break;
6791 case LPFC_SET_CGN_SIGNAL:
6792 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6793 sig_freq = 0;
6794 else
6795 sig_freq = phba->cgn_sig_freq;
6796
6797 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6798 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6799 &mbox->u.mqe.un.set_feature, sig_freq);
6800 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6801 &mbox->u.mqe.un.set_feature, sig_freq);
6802 }
6803
6804 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6805 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6806 &mbox->u.mqe.un.set_feature, sig_freq);
6807
6808 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6809 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6810 sig_freq = 0;
6811 else
6812 sig_freq = lpfc_acqe_cgn_frequency;
6813
6814 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6815 &mbox->u.mqe.un.set_feature, sig_freq);
6816
6817 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6818 mbox->u.mqe.un.set_feature.param_len = 12;
6819 break;
6820 case LPFC_SET_DUAL_DUMP:
6821 bf_set(lpfc_mbx_set_feature_dd,
6822 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6823 bf_set(lpfc_mbx_set_feature_ddquery,
6824 &mbox->u.mqe.un.set_feature, 0);
6825 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6826 mbox->u.mqe.un.set_feature.param_len = 4;
6827 break;
6828 case LPFC_SET_ENABLE_MI:
6829 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6830 mbox->u.mqe.un.set_feature.param_len = 4;
6831 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6832 phba->pport->cfg_lun_queue_depth);
6833 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6834 phba->sli4_hba.pc_sli4_params.mi_ver);
6835 break;
6836 case LPFC_SET_LD_SIGNAL:
6837 mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6838 mbox->u.mqe.un.set_feature.param_len = 16;
6839 bf_set(lpfc_mbx_set_feature_lds_qry,
6840 &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6841 break;
6842 case LPFC_SET_ENABLE_CMF:
6843 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6844 mbox->u.mqe.un.set_feature.param_len = 4;
6845 bf_set(lpfc_mbx_set_feature_cmf,
6846 &mbox->u.mqe.un.set_feature, 1);
6847 break;
6848 }
6849 return;
6850 }
6851
6852 /**
6853 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6854 * @phba: Pointer to HBA context object.
6855 *
6856 * Disable FW logging into host memory on the adapter. To
6857 * be done before reading logs from the host memory.
6858 **/
6859 void
lpfc_ras_stop_fwlog(struct lpfc_hba * phba)6860 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6861 {
6862 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6863
6864 spin_lock_irq(&phba->hbalock);
6865 ras_fwlog->state = INACTIVE;
6866 spin_unlock_irq(&phba->hbalock);
6867
6868 /* Disable FW logging to host memory */
6869 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6870 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6871
6872 /* Wait 10ms for firmware to stop using DMA buffer */
6873 usleep_range(10 * 1000, 20 * 1000);
6874 }
6875
6876 /**
6877 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6878 * @phba: Pointer to HBA context object.
6879 *
6880 * This function is called to free memory allocated for RAS FW logging
6881 * support in the driver.
6882 **/
6883 void
lpfc_sli4_ras_dma_free(struct lpfc_hba * phba)6884 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6885 {
6886 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6887 struct lpfc_dmabuf *dmabuf, *next;
6888
6889 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6890 list_for_each_entry_safe(dmabuf, next,
6891 &ras_fwlog->fwlog_buff_list,
6892 list) {
6893 list_del(&dmabuf->list);
6894 dma_free_coherent(&phba->pcidev->dev,
6895 LPFC_RAS_MAX_ENTRY_SIZE,
6896 dmabuf->virt, dmabuf->phys);
6897 kfree(dmabuf);
6898 }
6899 }
6900
6901 if (ras_fwlog->lwpd.virt) {
6902 dma_free_coherent(&phba->pcidev->dev,
6903 sizeof(uint32_t) * 2,
6904 ras_fwlog->lwpd.virt,
6905 ras_fwlog->lwpd.phys);
6906 ras_fwlog->lwpd.virt = NULL;
6907 }
6908
6909 spin_lock_irq(&phba->hbalock);
6910 ras_fwlog->state = INACTIVE;
6911 spin_unlock_irq(&phba->hbalock);
6912 }
6913
6914 /**
6915 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6916 * @phba: Pointer to HBA context object.
6917 * @fwlog_buff_count: Count of buffers to be created.
6918 *
6919 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6920 * to update FW log is posted to the adapter.
6921 * Buffer count is calculated based on module param ras_fwlog_buffsize
6922 * Size of each buffer posted to FW is 64K.
6923 **/
6924
6925 static int
lpfc_sli4_ras_dma_alloc(struct lpfc_hba * phba,uint32_t fwlog_buff_count)6926 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6927 uint32_t fwlog_buff_count)
6928 {
6929 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6930 struct lpfc_dmabuf *dmabuf;
6931 int rc = 0, i = 0;
6932
6933 /* Initialize List */
6934 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6935
6936 /* Allocate memory for the LWPD */
6937 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6938 sizeof(uint32_t) * 2,
6939 &ras_fwlog->lwpd.phys,
6940 GFP_KERNEL);
6941 if (!ras_fwlog->lwpd.virt) {
6942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6943 "6185 LWPD Memory Alloc Failed\n");
6944
6945 return -ENOMEM;
6946 }
6947
6948 ras_fwlog->fw_buffcount = fwlog_buff_count;
6949 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6950 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6951 GFP_KERNEL);
6952 if (!dmabuf) {
6953 rc = -ENOMEM;
6954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6955 "6186 Memory Alloc failed FW logging");
6956 goto free_mem;
6957 }
6958
6959 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6960 LPFC_RAS_MAX_ENTRY_SIZE,
6961 &dmabuf->phys, GFP_KERNEL);
6962 if (!dmabuf->virt) {
6963 kfree(dmabuf);
6964 rc = -ENOMEM;
6965 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6966 "6187 DMA Alloc Failed FW logging");
6967 goto free_mem;
6968 }
6969 dmabuf->buffer_tag = i;
6970 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6971 }
6972
6973 free_mem:
6974 if (rc)
6975 lpfc_sli4_ras_dma_free(phba);
6976
6977 return rc;
6978 }
6979
6980 /**
6981 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6982 * @phba: pointer to lpfc hba data structure.
6983 * @pmb: pointer to the driver internal queue element for mailbox command.
6984 *
6985 * Completion handler for driver's RAS MBX command to the device.
6986 **/
6987 static void
lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)6988 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6989 {
6990 MAILBOX_t *mb;
6991 union lpfc_sli4_cfg_shdr *shdr;
6992 uint32_t shdr_status, shdr_add_status;
6993 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6994
6995 mb = &pmb->u.mb;
6996
6997 shdr = (union lpfc_sli4_cfg_shdr *)
6998 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6999 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7000 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7001
7002 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
7003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7004 "6188 FW LOG mailbox "
7005 "completed with status x%x add_status x%x,"
7006 " mbx status x%x\n",
7007 shdr_status, shdr_add_status, mb->mbxStatus);
7008
7009 ras_fwlog->ras_hwsupport = false;
7010 goto disable_ras;
7011 }
7012
7013 spin_lock_irq(&phba->hbalock);
7014 ras_fwlog->state = ACTIVE;
7015 spin_unlock_irq(&phba->hbalock);
7016 mempool_free(pmb, phba->mbox_mem_pool);
7017
7018 return;
7019
7020 disable_ras:
7021 /* Free RAS DMA memory */
7022 lpfc_sli4_ras_dma_free(phba);
7023 mempool_free(pmb, phba->mbox_mem_pool);
7024 }
7025
7026 /**
7027 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7028 * @phba: pointer to lpfc hba data structure.
7029 * @fwlog_level: Logging verbosity level.
7030 * @fwlog_enable: Enable/Disable logging.
7031 *
7032 * Initialize memory and post mailbox command to enable FW logging in host
7033 * memory.
7034 **/
7035 int
lpfc_sli4_ras_fwlog_init(struct lpfc_hba * phba,uint32_t fwlog_level,uint32_t fwlog_enable)7036 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7037 uint32_t fwlog_level,
7038 uint32_t fwlog_enable)
7039 {
7040 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7041 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7042 struct lpfc_dmabuf *dmabuf;
7043 LPFC_MBOXQ_t *mbox;
7044 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7045 int rc = 0;
7046
7047 spin_lock_irq(&phba->hbalock);
7048 ras_fwlog->state = INACTIVE;
7049 spin_unlock_irq(&phba->hbalock);
7050
7051 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7052 phba->cfg_ras_fwlog_buffsize);
7053 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7054
7055 /*
7056 * If re-enabling FW logging support use earlier allocated
7057 * DMA buffers while posting MBX command.
7058 **/
7059 if (!ras_fwlog->lwpd.virt) {
7060 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7061 if (rc) {
7062 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7063 "6189 FW Log Memory Allocation Failed");
7064 return rc;
7065 }
7066 }
7067
7068 /* Setup Mailbox command */
7069 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7070 if (!mbox) {
7071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7072 "6190 RAS MBX Alloc Failed");
7073 rc = -ENOMEM;
7074 goto mem_free;
7075 }
7076
7077 ras_fwlog->fw_loglevel = fwlog_level;
7078 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7079 sizeof(struct lpfc_sli4_cfg_mhdr));
7080
7081 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7082 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7083 len, LPFC_SLI4_MBX_EMBED);
7084
7085 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7086 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7087 fwlog_enable);
7088 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7089 ras_fwlog->fw_loglevel);
7090 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7091 ras_fwlog->fw_buffcount);
7092 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7093 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7094
7095 /* Update DMA buffer address */
7096 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7097 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7098
7099 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7100 putPaddrLow(dmabuf->phys);
7101
7102 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7103 putPaddrHigh(dmabuf->phys);
7104 }
7105
7106 /* Update LPWD address */
7107 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7108 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7109
7110 spin_lock_irq(&phba->hbalock);
7111 ras_fwlog->state = REG_INPROGRESS;
7112 spin_unlock_irq(&phba->hbalock);
7113 mbox->vport = phba->pport;
7114 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7115
7116 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7117
7118 if (rc == MBX_NOT_FINISHED) {
7119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7120 "6191 FW-Log Mailbox failed. "
7121 "status %d mbxStatus : x%x", rc,
7122 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7123 mempool_free(mbox, phba->mbox_mem_pool);
7124 rc = -EIO;
7125 goto mem_free;
7126 } else
7127 rc = 0;
7128 mem_free:
7129 if (rc)
7130 lpfc_sli4_ras_dma_free(phba);
7131
7132 return rc;
7133 }
7134
7135 /**
7136 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7137 * @phba: Pointer to HBA context object.
7138 *
7139 * Check if RAS is supported on the adapter and initialize it.
7140 **/
7141 void
lpfc_sli4_ras_setup(struct lpfc_hba * phba)7142 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7143 {
7144 /* Check RAS FW Log needs to be enabled or not */
7145 if (lpfc_check_fwlog_support(phba))
7146 return;
7147
7148 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7149 LPFC_RAS_ENABLE_LOGGING);
7150 }
7151
7152 /**
7153 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7154 * @phba: Pointer to HBA context object.
7155 *
7156 * This function allocates all SLI4 resource identifiers.
7157 **/
7158 int
lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba * phba)7159 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7160 {
7161 int i, rc, error = 0;
7162 uint16_t count, base;
7163 unsigned long longs;
7164
7165 if (!phba->sli4_hba.rpi_hdrs_in_use)
7166 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7167 if (phba->sli4_hba.extents_in_use) {
7168 /*
7169 * The port supports resource extents. The XRI, VPI, VFI, RPI
7170 * resource extent count must be read and allocated before
7171 * provisioning the resource id arrays.
7172 */
7173 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7174 LPFC_IDX_RSRC_RDY) {
7175 /*
7176 * Extent-based resources are set - the driver could
7177 * be in a port reset. Figure out if any corrective
7178 * actions need to be taken.
7179 */
7180 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7181 LPFC_RSC_TYPE_FCOE_VFI);
7182 if (rc != 0)
7183 error++;
7184 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7185 LPFC_RSC_TYPE_FCOE_VPI);
7186 if (rc != 0)
7187 error++;
7188 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7189 LPFC_RSC_TYPE_FCOE_XRI);
7190 if (rc != 0)
7191 error++;
7192 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7193 LPFC_RSC_TYPE_FCOE_RPI);
7194 if (rc != 0)
7195 error++;
7196
7197 /*
7198 * It's possible that the number of resources
7199 * provided to this port instance changed between
7200 * resets. Detect this condition and reallocate
7201 * resources. Otherwise, there is no action.
7202 */
7203 if (error) {
7204 lpfc_printf_log(phba, KERN_INFO,
7205 LOG_MBOX | LOG_INIT,
7206 "2931 Detected extent resource "
7207 "change. Reallocating all "
7208 "extents.\n");
7209 rc = lpfc_sli4_dealloc_extent(phba,
7210 LPFC_RSC_TYPE_FCOE_VFI);
7211 rc = lpfc_sli4_dealloc_extent(phba,
7212 LPFC_RSC_TYPE_FCOE_VPI);
7213 rc = lpfc_sli4_dealloc_extent(phba,
7214 LPFC_RSC_TYPE_FCOE_XRI);
7215 rc = lpfc_sli4_dealloc_extent(phba,
7216 LPFC_RSC_TYPE_FCOE_RPI);
7217 } else
7218 return 0;
7219 }
7220
7221 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7222 if (unlikely(rc))
7223 goto err_exit;
7224
7225 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7226 if (unlikely(rc))
7227 goto err_exit;
7228
7229 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7230 if (unlikely(rc))
7231 goto err_exit;
7232
7233 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7234 if (unlikely(rc))
7235 goto err_exit;
7236 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7237 LPFC_IDX_RSRC_RDY);
7238 return rc;
7239 } else {
7240 /*
7241 * The port does not support resource extents. The XRI, VPI,
7242 * VFI, RPI resource ids were determined from READ_CONFIG.
7243 * Just allocate the bitmasks and provision the resource id
7244 * arrays. If a port reset is active, the resources don't
7245 * need any action - just exit.
7246 */
7247 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7248 LPFC_IDX_RSRC_RDY) {
7249 lpfc_sli4_dealloc_resource_identifiers(phba);
7250 lpfc_sli4_remove_rpis(phba);
7251 }
7252 /* RPIs. */
7253 count = phba->sli4_hba.max_cfg_param.max_rpi;
7254 if (count <= 0) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7256 "3279 Invalid provisioning of "
7257 "rpi:%d\n", count);
7258 rc = -EINVAL;
7259 goto err_exit;
7260 }
7261 base = phba->sli4_hba.max_cfg_param.rpi_base;
7262 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7263 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7264 sizeof(unsigned long),
7265 GFP_KERNEL);
7266 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7267 rc = -ENOMEM;
7268 goto err_exit;
7269 }
7270 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7271 GFP_KERNEL);
7272 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7273 rc = -ENOMEM;
7274 goto free_rpi_bmask;
7275 }
7276
7277 for (i = 0; i < count; i++)
7278 phba->sli4_hba.rpi_ids[i] = base + i;
7279
7280 /* VPIs. */
7281 count = phba->sli4_hba.max_cfg_param.max_vpi;
7282 if (count <= 0) {
7283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7284 "3280 Invalid provisioning of "
7285 "vpi:%d\n", count);
7286 rc = -EINVAL;
7287 goto free_rpi_ids;
7288 }
7289 base = phba->sli4_hba.max_cfg_param.vpi_base;
7290 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7291 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7292 GFP_KERNEL);
7293 if (unlikely(!phba->vpi_bmask)) {
7294 rc = -ENOMEM;
7295 goto free_rpi_ids;
7296 }
7297 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7298 GFP_KERNEL);
7299 if (unlikely(!phba->vpi_ids)) {
7300 rc = -ENOMEM;
7301 goto free_vpi_bmask;
7302 }
7303
7304 for (i = 0; i < count; i++)
7305 phba->vpi_ids[i] = base + i;
7306
7307 /* XRIs. */
7308 count = phba->sli4_hba.max_cfg_param.max_xri;
7309 if (count <= 0) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7311 "3281 Invalid provisioning of "
7312 "xri:%d\n", count);
7313 rc = -EINVAL;
7314 goto free_vpi_ids;
7315 }
7316 base = phba->sli4_hba.max_cfg_param.xri_base;
7317 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7318 phba->sli4_hba.xri_bmask = kcalloc(longs,
7319 sizeof(unsigned long),
7320 GFP_KERNEL);
7321 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7322 rc = -ENOMEM;
7323 goto free_vpi_ids;
7324 }
7325 phba->sli4_hba.max_cfg_param.xri_used = 0;
7326 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7327 GFP_KERNEL);
7328 if (unlikely(!phba->sli4_hba.xri_ids)) {
7329 rc = -ENOMEM;
7330 goto free_xri_bmask;
7331 }
7332
7333 for (i = 0; i < count; i++)
7334 phba->sli4_hba.xri_ids[i] = base + i;
7335
7336 /* VFIs. */
7337 count = phba->sli4_hba.max_cfg_param.max_vfi;
7338 if (count <= 0) {
7339 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7340 "3282 Invalid provisioning of "
7341 "vfi:%d\n", count);
7342 rc = -EINVAL;
7343 goto free_xri_ids;
7344 }
7345 base = phba->sli4_hba.max_cfg_param.vfi_base;
7346 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7347 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7348 sizeof(unsigned long),
7349 GFP_KERNEL);
7350 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7351 rc = -ENOMEM;
7352 goto free_xri_ids;
7353 }
7354 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7355 GFP_KERNEL);
7356 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7357 rc = -ENOMEM;
7358 goto free_vfi_bmask;
7359 }
7360
7361 for (i = 0; i < count; i++)
7362 phba->sli4_hba.vfi_ids[i] = base + i;
7363
7364 /*
7365 * Mark all resources ready. An HBA reset doesn't need
7366 * to reset the initialization.
7367 */
7368 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7369 LPFC_IDX_RSRC_RDY);
7370 return 0;
7371 }
7372
7373 free_vfi_bmask:
7374 kfree(phba->sli4_hba.vfi_bmask);
7375 phba->sli4_hba.vfi_bmask = NULL;
7376 free_xri_ids:
7377 kfree(phba->sli4_hba.xri_ids);
7378 phba->sli4_hba.xri_ids = NULL;
7379 free_xri_bmask:
7380 kfree(phba->sli4_hba.xri_bmask);
7381 phba->sli4_hba.xri_bmask = NULL;
7382 free_vpi_ids:
7383 kfree(phba->vpi_ids);
7384 phba->vpi_ids = NULL;
7385 free_vpi_bmask:
7386 kfree(phba->vpi_bmask);
7387 phba->vpi_bmask = NULL;
7388 free_rpi_ids:
7389 kfree(phba->sli4_hba.rpi_ids);
7390 phba->sli4_hba.rpi_ids = NULL;
7391 free_rpi_bmask:
7392 kfree(phba->sli4_hba.rpi_bmask);
7393 phba->sli4_hba.rpi_bmask = NULL;
7394 err_exit:
7395 return rc;
7396 }
7397
7398 /**
7399 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7400 * @phba: Pointer to HBA context object.
7401 *
7402 * This function allocates the number of elements for the specified
7403 * resource type.
7404 **/
7405 int
lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba * phba)7406 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7407 {
7408 if (phba->sli4_hba.extents_in_use) {
7409 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7410 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7411 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7412 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7413 } else {
7414 kfree(phba->vpi_bmask);
7415 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7416 kfree(phba->vpi_ids);
7417 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7418 kfree(phba->sli4_hba.xri_bmask);
7419 kfree(phba->sli4_hba.xri_ids);
7420 kfree(phba->sli4_hba.vfi_bmask);
7421 kfree(phba->sli4_hba.vfi_ids);
7422 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7423 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7424 }
7425
7426 return 0;
7427 }
7428
7429 /**
7430 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7431 * @phba: Pointer to HBA context object.
7432 * @type: The resource extent type.
7433 * @extnt_cnt: buffer to hold port extent count response
7434 * @extnt_size: buffer to hold port extent size response.
7435 *
7436 * This function calls the port to read the host allocated extents
7437 * for a particular type.
7438 **/
7439 int
lpfc_sli4_get_allocated_extnts(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_cnt,uint16_t * extnt_size)7440 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7441 uint16_t *extnt_cnt, uint16_t *extnt_size)
7442 {
7443 bool emb;
7444 int rc = 0;
7445 uint16_t curr_blks = 0;
7446 uint32_t req_len, emb_len;
7447 uint32_t alloc_len, mbox_tmo;
7448 struct list_head *blk_list_head;
7449 struct lpfc_rsrc_blks *rsrc_blk;
7450 LPFC_MBOXQ_t *mbox;
7451 void *virtaddr = NULL;
7452 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7453 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7454 union lpfc_sli4_cfg_shdr *shdr;
7455
7456 switch (type) {
7457 case LPFC_RSC_TYPE_FCOE_VPI:
7458 blk_list_head = &phba->lpfc_vpi_blk_list;
7459 break;
7460 case LPFC_RSC_TYPE_FCOE_XRI:
7461 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7462 break;
7463 case LPFC_RSC_TYPE_FCOE_VFI:
7464 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7465 break;
7466 case LPFC_RSC_TYPE_FCOE_RPI:
7467 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7468 break;
7469 default:
7470 return -EIO;
7471 }
7472
7473 /* Count the number of extents currently allocatd for this type. */
7474 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7475 if (curr_blks == 0) {
7476 /*
7477 * The GET_ALLOCATED mailbox does not return the size,
7478 * just the count. The size should be just the size
7479 * stored in the current allocated block and all sizes
7480 * for an extent type are the same so set the return
7481 * value now.
7482 */
7483 *extnt_size = rsrc_blk->rsrc_size;
7484 }
7485 curr_blks++;
7486 }
7487
7488 /*
7489 * Calculate the size of an embedded mailbox. The uint32_t
7490 * accounts for extents-specific word.
7491 */
7492 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7493 sizeof(uint32_t);
7494
7495 /*
7496 * Presume the allocation and response will fit into an embedded
7497 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7498 */
7499 emb = LPFC_SLI4_MBX_EMBED;
7500 req_len = emb_len;
7501 if (req_len > emb_len) {
7502 req_len = curr_blks * sizeof(uint16_t) +
7503 sizeof(union lpfc_sli4_cfg_shdr) +
7504 sizeof(uint32_t);
7505 emb = LPFC_SLI4_MBX_NEMBED;
7506 }
7507
7508 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7509 if (!mbox)
7510 return -ENOMEM;
7511 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7512
7513 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7514 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7515 req_len, emb);
7516 if (alloc_len < req_len) {
7517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7518 "2983 Allocated DMA memory size (x%x) is "
7519 "less than the requested DMA memory "
7520 "size (x%x)\n", alloc_len, req_len);
7521 rc = -ENOMEM;
7522 goto err_exit;
7523 }
7524 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7525 if (unlikely(rc)) {
7526 rc = -EIO;
7527 goto err_exit;
7528 }
7529
7530 if (!phba->sli4_hba.intr_enable)
7531 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7532 else {
7533 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7534 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7535 }
7536
7537 if (unlikely(rc)) {
7538 rc = -EIO;
7539 goto err_exit;
7540 }
7541
7542 /*
7543 * Figure out where the response is located. Then get local pointers
7544 * to the response data. The port does not guarantee to respond to
7545 * all extents counts request so update the local variable with the
7546 * allocated count from the port.
7547 */
7548 if (emb == LPFC_SLI4_MBX_EMBED) {
7549 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7550 shdr = &rsrc_ext->header.cfg_shdr;
7551 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7552 } else {
7553 virtaddr = mbox->sge_array->addr[0];
7554 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7555 shdr = &n_rsrc->cfg_shdr;
7556 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7557 }
7558
7559 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7560 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7561 "2984 Failed to read allocated resources "
7562 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7563 type,
7564 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7565 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7566 rc = -EIO;
7567 goto err_exit;
7568 }
7569 err_exit:
7570 lpfc_sli4_mbox_cmd_free(phba, mbox);
7571 return rc;
7572 }
7573
7574 /**
7575 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7576 * @phba: pointer to lpfc hba data structure.
7577 * @sgl_list: linked link of sgl buffers to post
7578 * @cnt: number of linked list buffers
7579 *
7580 * This routine walks the list of buffers that have been allocated and
7581 * repost them to the port by using SGL block post. This is needed after a
7582 * pci_function_reset/warm_start or start. It attempts to construct blocks
7583 * of buffer sgls which contains contiguous xris and uses the non-embedded
7584 * SGL block post mailbox commands to post them to the port. For single
7585 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7586 * mailbox command for posting.
7587 *
7588 * Returns: 0 = success, non-zero failure.
7589 **/
7590 static int
lpfc_sli4_repost_sgl_list(struct lpfc_hba * phba,struct list_head * sgl_list,int cnt)7591 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7592 struct list_head *sgl_list, int cnt)
7593 {
7594 struct lpfc_sglq *sglq_entry = NULL;
7595 struct lpfc_sglq *sglq_entry_next = NULL;
7596 struct lpfc_sglq *sglq_entry_first = NULL;
7597 int status, total_cnt;
7598 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7599 int last_xritag = NO_XRI;
7600 LIST_HEAD(prep_sgl_list);
7601 LIST_HEAD(blck_sgl_list);
7602 LIST_HEAD(allc_sgl_list);
7603 LIST_HEAD(post_sgl_list);
7604 LIST_HEAD(free_sgl_list);
7605
7606 spin_lock_irq(&phba->hbalock);
7607 spin_lock(&phba->sli4_hba.sgl_list_lock);
7608 list_splice_init(sgl_list, &allc_sgl_list);
7609 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7610 spin_unlock_irq(&phba->hbalock);
7611
7612 total_cnt = cnt;
7613 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7614 &allc_sgl_list, list) {
7615 list_del_init(&sglq_entry->list);
7616 block_cnt++;
7617 if ((last_xritag != NO_XRI) &&
7618 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7619 /* a hole in xri block, form a sgl posting block */
7620 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7621 post_cnt = block_cnt - 1;
7622 /* prepare list for next posting block */
7623 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7624 block_cnt = 1;
7625 } else {
7626 /* prepare list for next posting block */
7627 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7628 /* enough sgls for non-embed sgl mbox command */
7629 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7630 list_splice_init(&prep_sgl_list,
7631 &blck_sgl_list);
7632 post_cnt = block_cnt;
7633 block_cnt = 0;
7634 }
7635 }
7636 num_posted++;
7637
7638 /* keep track of last sgl's xritag */
7639 last_xritag = sglq_entry->sli4_xritag;
7640
7641 /* end of repost sgl list condition for buffers */
7642 if (num_posted == total_cnt) {
7643 if (post_cnt == 0) {
7644 list_splice_init(&prep_sgl_list,
7645 &blck_sgl_list);
7646 post_cnt = block_cnt;
7647 } else if (block_cnt == 1) {
7648 status = lpfc_sli4_post_sgl(phba,
7649 sglq_entry->phys, 0,
7650 sglq_entry->sli4_xritag);
7651 if (!status) {
7652 /* successful, put sgl to posted list */
7653 list_add_tail(&sglq_entry->list,
7654 &post_sgl_list);
7655 } else {
7656 /* Failure, put sgl to free list */
7657 lpfc_printf_log(phba, KERN_WARNING,
7658 LOG_SLI,
7659 "3159 Failed to post "
7660 "sgl, xritag:x%x\n",
7661 sglq_entry->sli4_xritag);
7662 list_add_tail(&sglq_entry->list,
7663 &free_sgl_list);
7664 total_cnt--;
7665 }
7666 }
7667 }
7668
7669 /* continue until a nembed page worth of sgls */
7670 if (post_cnt == 0)
7671 continue;
7672
7673 /* post the buffer list sgls as a block */
7674 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7675 post_cnt);
7676
7677 if (!status) {
7678 /* success, put sgl list to posted sgl list */
7679 list_splice_init(&blck_sgl_list, &post_sgl_list);
7680 } else {
7681 /* Failure, put sgl list to free sgl list */
7682 sglq_entry_first = list_first_entry(&blck_sgl_list,
7683 struct lpfc_sglq,
7684 list);
7685 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7686 "3160 Failed to post sgl-list, "
7687 "xritag:x%x-x%x\n",
7688 sglq_entry_first->sli4_xritag,
7689 (sglq_entry_first->sli4_xritag +
7690 post_cnt - 1));
7691 list_splice_init(&blck_sgl_list, &free_sgl_list);
7692 total_cnt -= post_cnt;
7693 }
7694
7695 /* don't reset xirtag due to hole in xri block */
7696 if (block_cnt == 0)
7697 last_xritag = NO_XRI;
7698
7699 /* reset sgl post count for next round of posting */
7700 post_cnt = 0;
7701 }
7702
7703 /* free the sgls failed to post */
7704 lpfc_free_sgl_list(phba, &free_sgl_list);
7705
7706 /* push sgls posted to the available list */
7707 if (!list_empty(&post_sgl_list)) {
7708 spin_lock_irq(&phba->hbalock);
7709 spin_lock(&phba->sli4_hba.sgl_list_lock);
7710 list_splice_init(&post_sgl_list, sgl_list);
7711 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7712 spin_unlock_irq(&phba->hbalock);
7713 } else {
7714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7715 "3161 Failure to post sgl to port.\n");
7716 return -EIO;
7717 }
7718
7719 /* return the number of XRIs actually posted */
7720 return total_cnt;
7721 }
7722
7723 /**
7724 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7725 * @phba: pointer to lpfc hba data structure.
7726 *
7727 * This routine walks the list of nvme buffers that have been allocated and
7728 * repost them to the port by using SGL block post. This is needed after a
7729 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7730 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7731 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7732 *
7733 * Returns: 0 = success, non-zero failure.
7734 **/
7735 static int
lpfc_sli4_repost_io_sgl_list(struct lpfc_hba * phba)7736 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7737 {
7738 LIST_HEAD(post_nblist);
7739 int num_posted, rc = 0;
7740
7741 /* get all NVME buffers need to repost to a local list */
7742 lpfc_io_buf_flush(phba, &post_nblist);
7743
7744 /* post the list of nvme buffer sgls to port if available */
7745 if (!list_empty(&post_nblist)) {
7746 num_posted = lpfc_sli4_post_io_sgl_list(
7747 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7748 /* failed to post any nvme buffer, return error */
7749 if (num_posted == 0)
7750 rc = -EIO;
7751 }
7752 return rc;
7753 }
7754
7755 static void
lpfc_set_host_data(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)7756 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7757 {
7758 uint32_t len;
7759
7760 len = sizeof(struct lpfc_mbx_set_host_data) -
7761 sizeof(struct lpfc_sli4_cfg_mhdr);
7762 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7763 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7764 LPFC_SLI4_MBX_EMBED);
7765
7766 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7767 mbox->u.mqe.un.set_host_data.param_len =
7768 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7769 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7770 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7771 "Linux %s v"LPFC_DRIVER_VERSION,
7772 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7773 }
7774
7775 int
lpfc_post_rq_buffer(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,int count,int idx)7776 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7777 struct lpfc_queue *drq, int count, int idx)
7778 {
7779 int rc, i;
7780 struct lpfc_rqe hrqe;
7781 struct lpfc_rqe drqe;
7782 struct lpfc_rqb *rqbp;
7783 unsigned long flags;
7784 struct rqb_dmabuf *rqb_buffer;
7785 LIST_HEAD(rqb_buf_list);
7786
7787 rqbp = hrq->rqbp;
7788 for (i = 0; i < count; i++) {
7789 spin_lock_irqsave(&phba->hbalock, flags);
7790 /* IF RQ is already full, don't bother */
7791 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7792 spin_unlock_irqrestore(&phba->hbalock, flags);
7793 break;
7794 }
7795 spin_unlock_irqrestore(&phba->hbalock, flags);
7796
7797 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7798 if (!rqb_buffer)
7799 break;
7800 rqb_buffer->hrq = hrq;
7801 rqb_buffer->drq = drq;
7802 rqb_buffer->idx = idx;
7803 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7804 }
7805
7806 spin_lock_irqsave(&phba->hbalock, flags);
7807 while (!list_empty(&rqb_buf_list)) {
7808 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7809 hbuf.list);
7810
7811 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7812 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7813 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7814 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7815 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7816 if (rc < 0) {
7817 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7818 "6421 Cannot post to HRQ %d: %x %x %x "
7819 "DRQ %x %x\n",
7820 hrq->queue_id,
7821 hrq->host_index,
7822 hrq->hba_index,
7823 hrq->entry_count,
7824 drq->host_index,
7825 drq->hba_index);
7826 rqbp->rqb_free_buffer(phba, rqb_buffer);
7827 } else {
7828 list_add_tail(&rqb_buffer->hbuf.list,
7829 &rqbp->rqb_buffer_list);
7830 rqbp->buffer_count++;
7831 }
7832 }
7833 spin_unlock_irqrestore(&phba->hbalock, flags);
7834 return 1;
7835 }
7836
7837 static void
lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7838 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7839 {
7840 union lpfc_sli4_cfg_shdr *shdr;
7841 u32 shdr_status, shdr_add_status;
7842
7843 shdr = (union lpfc_sli4_cfg_shdr *)
7844 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7847 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7848 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7849 "4622 SET_FEATURE (x%x) mbox failed, "
7850 "status x%x add_status x%x, mbx status x%x\n",
7851 LPFC_SET_LD_SIGNAL, shdr_status,
7852 shdr_add_status, pmb->u.mb.mbxStatus);
7853 phba->degrade_activate_threshold = 0;
7854 phba->degrade_deactivate_threshold = 0;
7855 phba->fec_degrade_interval = 0;
7856 goto out;
7857 }
7858
7859 phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7860 phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7861 phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7862
7863 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7864 "4624 Success: da x%x dd x%x interval x%x\n",
7865 phba->degrade_activate_threshold,
7866 phba->degrade_deactivate_threshold,
7867 phba->fec_degrade_interval);
7868 out:
7869 mempool_free(pmb, phba->mbox_mem_pool);
7870 }
7871
7872 int
lpfc_read_lds_params(struct lpfc_hba * phba)7873 lpfc_read_lds_params(struct lpfc_hba *phba)
7874 {
7875 LPFC_MBOXQ_t *mboxq;
7876 int rc;
7877
7878 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7879 if (!mboxq)
7880 return -ENOMEM;
7881
7882 lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7883 mboxq->vport = phba->pport;
7884 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7885 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7886 if (rc == MBX_NOT_FINISHED) {
7887 mempool_free(mboxq, phba->mbox_mem_pool);
7888 return -EIO;
7889 }
7890 return 0;
7891 }
7892
7893 static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7894 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7895 {
7896 struct lpfc_vport *vport = pmb->vport;
7897 union lpfc_sli4_cfg_shdr *shdr;
7898 u32 shdr_status, shdr_add_status;
7899 u32 sig, acqe;
7900
7901 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7902 * is done. (2) Mailbox failed and send FPIN support only.
7903 */
7904 shdr = (union lpfc_sli4_cfg_shdr *)
7905 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7906 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7907 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7908 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7910 "2516 CGN SET_FEATURE mbox failed with "
7911 "status x%x add_status x%x, mbx status x%x "
7912 "Reset Congestion to FPINs only\n",
7913 shdr_status, shdr_add_status,
7914 pmb->u.mb.mbxStatus);
7915 /* If there is a mbox error, move on to RDF */
7916 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7917 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7918 goto out;
7919 }
7920
7921 /* Zero out Congestion Signal ACQE counter */
7922 phba->cgn_acqe_cnt = 0;
7923
7924 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7925 &pmb->u.mqe.un.set_feature);
7926 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7927 &pmb->u.mqe.un.set_feature);
7928 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7929 "4620 SET_FEATURES Success: Freq: %ds %dms "
7930 " Reg: x%x x%x\n", acqe, sig,
7931 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7932 out:
7933 mempool_free(pmb, phba->mbox_mem_pool);
7934
7935 /* Register for FPIN events from the fabric now that the
7936 * EDC common_set_features has completed.
7937 */
7938 lpfc_issue_els_rdf(vport, 0);
7939 }
7940
7941 int
lpfc_config_cgn_signal(struct lpfc_hba * phba)7942 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7943 {
7944 LPFC_MBOXQ_t *mboxq;
7945 u32 rc;
7946
7947 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7948 if (!mboxq)
7949 goto out_rdf;
7950
7951 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7952 mboxq->vport = phba->pport;
7953 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7954
7955 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7956 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7957 "Reg: x%x x%x\n",
7958 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7959 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7960
7961 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7962 if (rc == MBX_NOT_FINISHED)
7963 goto out;
7964 return 0;
7965
7966 out:
7967 mempool_free(mboxq, phba->mbox_mem_pool);
7968 out_rdf:
7969 /* If there is a mbox error, move on to RDF */
7970 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7971 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7972 lpfc_issue_els_rdf(phba->pport, 0);
7973 return -EIO;
7974 }
7975
7976 /**
7977 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7978 * @phba: pointer to lpfc hba data structure.
7979 *
7980 * This routine initializes the per-cq idle_stat to dynamically dictate
7981 * polling decisions.
7982 *
7983 * Return codes:
7984 * None
7985 **/
lpfc_init_idle_stat_hb(struct lpfc_hba * phba)7986 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7987 {
7988 int i;
7989 struct lpfc_sli4_hdw_queue *hdwq;
7990 struct lpfc_queue *cq;
7991 struct lpfc_idle_stat *idle_stat;
7992 u64 wall;
7993
7994 for_each_present_cpu(i) {
7995 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7996 cq = hdwq->io_cq;
7997
7998 /* Skip if we've already handled this cq's primary CPU */
7999 if (cq->chann != i)
8000 continue;
8001
8002 idle_stat = &phba->sli4_hba.idle_stat[i];
8003
8004 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
8005 idle_stat->prev_wall = wall;
8006
8007 if (phba->nvmet_support ||
8008 phba->cmf_active_mode != LPFC_CFG_OFF)
8009 cq->poll_mode = LPFC_QUEUE_WORK;
8010 else
8011 cq->poll_mode = LPFC_IRQ_POLL;
8012 }
8013
8014 if (!phba->nvmet_support)
8015 schedule_delayed_work(&phba->idle_stat_delay_work,
8016 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
8017 }
8018
lpfc_sli4_dip(struct lpfc_hba * phba)8019 static void lpfc_sli4_dip(struct lpfc_hba *phba)
8020 {
8021 uint32_t if_type;
8022
8023 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8024 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
8025 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
8026 struct lpfc_register reg_data;
8027
8028 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8029 ®_data.word0))
8030 return;
8031
8032 if (bf_get(lpfc_sliport_status_dip, ®_data))
8033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8034 "2904 Firmware Dump Image Present"
8035 " on Adapter");
8036 }
8037 }
8038
8039 /**
8040 * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8041 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8042 * @entries: Number of rx_info_entry objects to allocate in ring
8043 *
8044 * Return:
8045 * 0 - Success
8046 * ENOMEM - Failure to kmalloc
8047 **/
lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor * rx_monitor,u32 entries)8048 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8049 u32 entries)
8050 {
8051 rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
8052 GFP_KERNEL);
8053 if (!rx_monitor->ring)
8054 return -ENOMEM;
8055
8056 rx_monitor->head_idx = 0;
8057 rx_monitor->tail_idx = 0;
8058 spin_lock_init(&rx_monitor->lock);
8059 rx_monitor->entries = entries;
8060
8061 return 0;
8062 }
8063
8064 /**
8065 * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8066 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8067 **/
lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor * rx_monitor)8068 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8069 {
8070 spin_lock(&rx_monitor->lock);
8071 kfree(rx_monitor->ring);
8072 rx_monitor->ring = NULL;
8073 rx_monitor->entries = 0;
8074 rx_monitor->head_idx = 0;
8075 rx_monitor->tail_idx = 0;
8076 spin_unlock(&rx_monitor->lock);
8077 }
8078
8079 /**
8080 * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8081 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8082 * @entry: Pointer to rx_info_entry
8083 *
8084 * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
8085 * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8086 *
8087 * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8088 *
8089 * In cases of old data overflow, we do a best effort of FIFO order.
8090 **/
lpfc_rx_monitor_record(struct lpfc_rx_info_monitor * rx_monitor,struct rx_info_entry * entry)8091 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8092 struct rx_info_entry *entry)
8093 {
8094 struct rx_info_entry *ring = rx_monitor->ring;
8095 u32 *head_idx = &rx_monitor->head_idx;
8096 u32 *tail_idx = &rx_monitor->tail_idx;
8097 spinlock_t *ring_lock = &rx_monitor->lock;
8098 u32 ring_size = rx_monitor->entries;
8099
8100 spin_lock(ring_lock);
8101 memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8102 *tail_idx = (*tail_idx + 1) % ring_size;
8103
8104 /* Best effort of FIFO saved data */
8105 if (*tail_idx == *head_idx)
8106 *head_idx = (*head_idx + 1) % ring_size;
8107
8108 spin_unlock(ring_lock);
8109 }
8110
8111 /**
8112 * lpfc_rx_monitor_report - Read out rx_monitor's ring
8113 * @phba: Pointer to lpfc_hba object
8114 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8115 * @buf: Pointer to char buffer that will contain rx monitor info data
8116 * @buf_len: Length buf including null char
8117 * @max_read_entries: Maximum number of entries to read out of ring
8118 *
8119 * Used to dump/read what's in rx_monitor's ring buffer.
8120 *
8121 * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8122 * information to kmsg instead of filling out buf.
8123 *
8124 * Return:
8125 * Number of entries read out of the ring
8126 **/
lpfc_rx_monitor_report(struct lpfc_hba * phba,struct lpfc_rx_info_monitor * rx_monitor,char * buf,u32 buf_len,u32 max_read_entries)8127 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8128 struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8129 u32 buf_len, u32 max_read_entries)
8130 {
8131 struct rx_info_entry *ring = rx_monitor->ring;
8132 struct rx_info_entry *entry;
8133 u32 *head_idx = &rx_monitor->head_idx;
8134 u32 *tail_idx = &rx_monitor->tail_idx;
8135 spinlock_t *ring_lock = &rx_monitor->lock;
8136 u32 ring_size = rx_monitor->entries;
8137 u32 cnt = 0;
8138 char tmp[DBG_LOG_STR_SZ] = {0};
8139 bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8140
8141 if (!log_to_kmsg) {
8142 /* clear the buffer to be sure */
8143 memset(buf, 0, buf_len);
8144
8145 scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8146 "%-8s%-8s%-8s%-16s\n",
8147 "MaxBPI", "Tot_Data_CMF",
8148 "Tot_Data_Cmd", "Tot_Data_Cmpl",
8149 "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8150 "IO_cnt", "Info", "BWutil(ms)");
8151 }
8152
8153 /* Needs to be _irq because record is called from timer interrupt
8154 * context
8155 */
8156 spin_lock_irq(ring_lock);
8157 while (*head_idx != *tail_idx) {
8158 entry = &ring[*head_idx];
8159
8160 /* Read out this entry's data. */
8161 if (!log_to_kmsg) {
8162 /* If !log_to_kmsg, then store to buf. */
8163 scnprintf(tmp, sizeof(tmp),
8164 "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8165 "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8166 *head_idx, entry->max_bytes_per_interval,
8167 entry->cmf_bytes, entry->total_bytes,
8168 entry->rcv_bytes, entry->avg_io_latency,
8169 entry->avg_io_size, entry->max_read_cnt,
8170 entry->cmf_busy, entry->io_cnt,
8171 entry->cmf_info, entry->timer_utilization,
8172 entry->timer_interval);
8173
8174 /* Check for buffer overflow */
8175 if ((strlen(buf) + strlen(tmp)) >= buf_len)
8176 break;
8177
8178 /* Append entry's data to buffer */
8179 strlcat(buf, tmp, buf_len);
8180 } else {
8181 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8182 "4410 %02u: MBPI %llu Xmit %llu "
8183 "Cmpl %llu Lat %llu ASz %llu Info %02u "
8184 "BWUtil %u Int %u slot %u\n",
8185 cnt, entry->max_bytes_per_interval,
8186 entry->total_bytes, entry->rcv_bytes,
8187 entry->avg_io_latency,
8188 entry->avg_io_size, entry->cmf_info,
8189 entry->timer_utilization,
8190 entry->timer_interval, *head_idx);
8191 }
8192
8193 *head_idx = (*head_idx + 1) % ring_size;
8194
8195 /* Don't feed more than max_read_entries */
8196 cnt++;
8197 if (cnt >= max_read_entries)
8198 break;
8199 }
8200 spin_unlock_irq(ring_lock);
8201
8202 return cnt;
8203 }
8204
8205 /**
8206 * lpfc_cmf_setup - Initialize idle_stat tracking
8207 * @phba: Pointer to HBA context object.
8208 *
8209 * This is called from HBA setup during driver load or when the HBA
8210 * comes online. this does all the initialization to support CMF and MI.
8211 **/
8212 static int
lpfc_cmf_setup(struct lpfc_hba * phba)8213 lpfc_cmf_setup(struct lpfc_hba *phba)
8214 {
8215 LPFC_MBOXQ_t *mboxq;
8216 struct lpfc_dmabuf *mp;
8217 struct lpfc_pc_sli4_params *sli4_params;
8218 int rc, cmf, mi_ver;
8219
8220 rc = lpfc_sli4_refresh_params(phba);
8221 if (unlikely(rc))
8222 return rc;
8223
8224 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8225 if (!mboxq)
8226 return -ENOMEM;
8227
8228 sli4_params = &phba->sli4_hba.pc_sli4_params;
8229
8230 /* Always try to enable MI feature if we can */
8231 if (sli4_params->mi_ver) {
8232 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8233 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8234 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8235 &mboxq->u.mqe.un.set_feature);
8236
8237 if (rc == MBX_SUCCESS) {
8238 if (mi_ver) {
8239 lpfc_printf_log(phba,
8240 KERN_WARNING, LOG_CGN_MGMT,
8241 "6215 MI is enabled\n");
8242 sli4_params->mi_ver = mi_ver;
8243 } else {
8244 lpfc_printf_log(phba,
8245 KERN_WARNING, LOG_CGN_MGMT,
8246 "6338 MI is disabled\n");
8247 sli4_params->mi_ver = 0;
8248 }
8249 } else {
8250 /* mi_ver is already set from GET_SLI4_PARAMETERS */
8251 lpfc_printf_log(phba, KERN_INFO,
8252 LOG_CGN_MGMT | LOG_INIT,
8253 "6245 Enable MI Mailbox x%x (x%x/x%x) "
8254 "failed, rc:x%x mi:x%x\n",
8255 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8256 lpfc_sli_config_mbox_subsys_get
8257 (phba, mboxq),
8258 lpfc_sli_config_mbox_opcode_get
8259 (phba, mboxq),
8260 rc, sli4_params->mi_ver);
8261 }
8262 } else {
8263 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8264 "6217 MI is disabled\n");
8265 }
8266
8267 /* Ensure FDMI is enabled for MI if enable_mi is set */
8268 if (sli4_params->mi_ver)
8269 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8270
8271 /* Always try to enable CMF feature if we can */
8272 if (sli4_params->cmf) {
8273 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8274 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8275 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8276 &mboxq->u.mqe.un.set_feature);
8277 if (rc == MBX_SUCCESS && cmf) {
8278 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8279 "6218 CMF is enabled: mode %d\n",
8280 phba->cmf_active_mode);
8281 } else {
8282 lpfc_printf_log(phba, KERN_WARNING,
8283 LOG_CGN_MGMT | LOG_INIT,
8284 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8285 "failed, rc:x%x dd:x%x\n",
8286 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8287 lpfc_sli_config_mbox_subsys_get
8288 (phba, mboxq),
8289 lpfc_sli_config_mbox_opcode_get
8290 (phba, mboxq),
8291 rc, cmf);
8292 sli4_params->cmf = 0;
8293 phba->cmf_active_mode = LPFC_CFG_OFF;
8294 goto no_cmf;
8295 }
8296
8297 /* Allocate Congestion Information Buffer */
8298 if (!phba->cgn_i) {
8299 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8300 if (mp)
8301 mp->virt = dma_alloc_coherent
8302 (&phba->pcidev->dev,
8303 sizeof(struct lpfc_cgn_info),
8304 &mp->phys, GFP_KERNEL);
8305 if (!mp || !mp->virt) {
8306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8307 "2640 Failed to alloc memory "
8308 "for Congestion Info\n");
8309 kfree(mp);
8310 sli4_params->cmf = 0;
8311 phba->cmf_active_mode = LPFC_CFG_OFF;
8312 goto no_cmf;
8313 }
8314 phba->cgn_i = mp;
8315
8316 /* initialize congestion buffer info */
8317 lpfc_init_congestion_buf(phba);
8318 lpfc_init_congestion_stat(phba);
8319
8320 /* Zero out Congestion Signal counters */
8321 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8322 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8323 }
8324
8325 rc = lpfc_sli4_cgn_params_read(phba);
8326 if (rc < 0) {
8327 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8328 "6242 Error reading Cgn Params (%d)\n",
8329 rc);
8330 /* Ensure CGN Mode is off */
8331 sli4_params->cmf = 0;
8332 } else if (!rc) {
8333 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8334 "6243 CGN Event empty object.\n");
8335 /* Ensure CGN Mode is off */
8336 sli4_params->cmf = 0;
8337 }
8338 } else {
8339 no_cmf:
8340 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8341 "6220 CMF is disabled\n");
8342 }
8343
8344 /* Only register congestion buffer with firmware if BOTH
8345 * CMF and E2E are enabled.
8346 */
8347 if (sli4_params->cmf && sli4_params->mi_ver) {
8348 rc = lpfc_reg_congestion_buf(phba);
8349 if (rc) {
8350 dma_free_coherent(&phba->pcidev->dev,
8351 sizeof(struct lpfc_cgn_info),
8352 phba->cgn_i->virt, phba->cgn_i->phys);
8353 kfree(phba->cgn_i);
8354 phba->cgn_i = NULL;
8355 /* Ensure CGN Mode is off */
8356 phba->cmf_active_mode = LPFC_CFG_OFF;
8357 return 0;
8358 }
8359 }
8360 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8361 "6470 Setup MI version %d CMF %d mode %d\n",
8362 sli4_params->mi_ver, sli4_params->cmf,
8363 phba->cmf_active_mode);
8364
8365 mempool_free(mboxq, phba->mbox_mem_pool);
8366
8367 /* Initialize atomic counters */
8368 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8369 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8370 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8371 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8372 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8373 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8374 atomic64_set(&phba->cgn_latency_evt, 0);
8375
8376 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8377
8378 /* Allocate RX Monitor Buffer */
8379 if (!phba->rx_monitor) {
8380 phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
8381 GFP_KERNEL);
8382
8383 if (!phba->rx_monitor) {
8384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8385 "2644 Failed to alloc memory "
8386 "for RX Monitor Buffer\n");
8387 return -ENOMEM;
8388 }
8389
8390 /* Instruct the rx_monitor object to instantiate its ring */
8391 if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8392 LPFC_MAX_RXMONITOR_ENTRY)) {
8393 kfree(phba->rx_monitor);
8394 phba->rx_monitor = NULL;
8395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8396 "2645 Failed to alloc memory "
8397 "for RX Monitor's Ring\n");
8398 return -ENOMEM;
8399 }
8400 }
8401
8402 return 0;
8403 }
8404
8405 static int
lpfc_set_host_tm(struct lpfc_hba * phba)8406 lpfc_set_host_tm(struct lpfc_hba *phba)
8407 {
8408 LPFC_MBOXQ_t *mboxq;
8409 uint32_t len, rc;
8410 struct timespec64 cur_time;
8411 struct tm broken;
8412 uint32_t month, day, year;
8413 uint32_t hour, minute, second;
8414 struct lpfc_mbx_set_host_date_time *tm;
8415
8416 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8417 if (!mboxq)
8418 return -ENOMEM;
8419
8420 len = sizeof(struct lpfc_mbx_set_host_data) -
8421 sizeof(struct lpfc_sli4_cfg_mhdr);
8422 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8423 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8424 LPFC_SLI4_MBX_EMBED);
8425
8426 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8427 mboxq->u.mqe.un.set_host_data.param_len =
8428 sizeof(struct lpfc_mbx_set_host_date_time);
8429 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8430 ktime_get_real_ts64(&cur_time);
8431 time64_to_tm(cur_time.tv_sec, 0, &broken);
8432 month = broken.tm_mon + 1;
8433 day = broken.tm_mday;
8434 year = broken.tm_year - 100;
8435 hour = broken.tm_hour;
8436 minute = broken.tm_min;
8437 second = broken.tm_sec;
8438 bf_set(lpfc_mbx_set_host_month, tm, month);
8439 bf_set(lpfc_mbx_set_host_day, tm, day);
8440 bf_set(lpfc_mbx_set_host_year, tm, year);
8441 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8442 bf_set(lpfc_mbx_set_host_min, tm, minute);
8443 bf_set(lpfc_mbx_set_host_sec, tm, second);
8444
8445 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8446 mempool_free(mboxq, phba->mbox_mem_pool);
8447 return rc;
8448 }
8449
8450 /**
8451 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8452 * @phba: Pointer to HBA context object.
8453 *
8454 * This function is the main SLI4 device initialization PCI function. This
8455 * function is called by the HBA initialization code, HBA reset code and
8456 * HBA error attention handler code. Caller is not required to hold any
8457 * locks.
8458 **/
8459 int
lpfc_sli4_hba_setup(struct lpfc_hba * phba)8460 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8461 {
8462 int rc, i, cnt, len, dd;
8463 LPFC_MBOXQ_t *mboxq;
8464 struct lpfc_mqe *mqe;
8465 uint8_t *vpd;
8466 uint32_t vpd_size;
8467 uint32_t ftr_rsp = 0;
8468 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8469 struct lpfc_vport *vport = phba->pport;
8470 struct lpfc_dmabuf *mp;
8471 struct lpfc_rqb *rqbp;
8472 u32 flg;
8473
8474 /* Perform a PCI function reset to start from clean */
8475 rc = lpfc_pci_function_reset(phba);
8476 if (unlikely(rc))
8477 return -ENODEV;
8478
8479 /* Check the HBA Host Status Register for readyness */
8480 rc = lpfc_sli4_post_status_check(phba);
8481 if (unlikely(rc))
8482 return -ENODEV;
8483 else {
8484 spin_lock_irq(&phba->hbalock);
8485 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8486 flg = phba->sli.sli_flag;
8487 spin_unlock_irq(&phba->hbalock);
8488 /* Allow a little time after setting SLI_ACTIVE for any polled
8489 * MBX commands to complete via BSG.
8490 */
8491 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8492 msleep(20);
8493 spin_lock_irq(&phba->hbalock);
8494 flg = phba->sli.sli_flag;
8495 spin_unlock_irq(&phba->hbalock);
8496 }
8497 }
8498
8499 lpfc_sli4_dip(phba);
8500
8501 /*
8502 * Allocate a single mailbox container for initializing the
8503 * port.
8504 */
8505 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8506 if (!mboxq)
8507 return -ENOMEM;
8508
8509 /* Issue READ_REV to collect vpd and FW information. */
8510 vpd_size = SLI4_PAGE_SIZE;
8511 vpd = kzalloc(vpd_size, GFP_KERNEL);
8512 if (!vpd) {
8513 rc = -ENOMEM;
8514 goto out_free_mbox;
8515 }
8516
8517 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8518 if (unlikely(rc)) {
8519 kfree(vpd);
8520 goto out_free_mbox;
8521 }
8522
8523 mqe = &mboxq->u.mqe;
8524 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8525 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8526 phba->hba_flag |= HBA_FCOE_MODE;
8527 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8528 } else {
8529 phba->hba_flag &= ~HBA_FCOE_MODE;
8530 }
8531
8532 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8533 LPFC_DCBX_CEE_MODE)
8534 phba->hba_flag |= HBA_FIP_SUPPORT;
8535 else
8536 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8537
8538 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8539
8540 if (phba->sli_rev != LPFC_SLI_REV4) {
8541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8542 "0376 READ_REV Error. SLI Level %d "
8543 "FCoE enabled %d\n",
8544 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8545 rc = -EIO;
8546 kfree(vpd);
8547 goto out_free_mbox;
8548 }
8549
8550 rc = lpfc_set_host_tm(phba);
8551 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8552 "6468 Set host date / time: Status x%x:\n", rc);
8553
8554 /*
8555 * Continue initialization with default values even if driver failed
8556 * to read FCoE param config regions, only read parameters if the
8557 * board is FCoE
8558 */
8559 if (phba->hba_flag & HBA_FCOE_MODE &&
8560 lpfc_sli4_read_fcoe_params(phba))
8561 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8562 "2570 Failed to read FCoE parameters\n");
8563
8564 /*
8565 * Retrieve sli4 device physical port name, failure of doing it
8566 * is considered as non-fatal.
8567 */
8568 rc = lpfc_sli4_retrieve_pport_name(phba);
8569 if (!rc)
8570 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8571 "3080 Successful retrieving SLI4 device "
8572 "physical port name: %s.\n", phba->Port);
8573
8574 rc = lpfc_sli4_get_ctl_attr(phba);
8575 if (!rc)
8576 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8577 "8351 Successful retrieving SLI4 device "
8578 "CTL ATTR\n");
8579
8580 /*
8581 * Evaluate the read rev and vpd data. Populate the driver
8582 * state with the results. If this routine fails, the failure
8583 * is not fatal as the driver will use generic values.
8584 */
8585 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8586 if (unlikely(!rc)) {
8587 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8588 "0377 Error %d parsing vpd. "
8589 "Using defaults.\n", rc);
8590 rc = 0;
8591 }
8592 kfree(vpd);
8593
8594 /* Save information as VPD data */
8595 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8596 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8597
8598 /*
8599 * This is because first G7 ASIC doesn't support the standard
8600 * 0x5a NVME cmd descriptor type/subtype
8601 */
8602 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8603 LPFC_SLI_INTF_IF_TYPE_6) &&
8604 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8605 (phba->vpd.rev.smRev == 0) &&
8606 (phba->cfg_nvme_embed_cmd == 1))
8607 phba->cfg_nvme_embed_cmd = 0;
8608
8609 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8610 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8611 &mqe->un.read_rev);
8612 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8613 &mqe->un.read_rev);
8614 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8615 &mqe->un.read_rev);
8616 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8617 &mqe->un.read_rev);
8618 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8619 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8620 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8621 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8622 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8623 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8624 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8625 "(%d):0380 READ_REV Status x%x "
8626 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8627 mboxq->vport ? mboxq->vport->vpi : 0,
8628 bf_get(lpfc_mqe_status, mqe),
8629 phba->vpd.rev.opFwName,
8630 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8631 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8632
8633 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8634 LPFC_SLI_INTF_IF_TYPE_0) {
8635 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8636 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8637 if (rc == MBX_SUCCESS) {
8638 phba->hba_flag |= HBA_RECOVERABLE_UE;
8639 /* Set 1Sec interval to detect UE */
8640 phba->eratt_poll_interval = 1;
8641 phba->sli4_hba.ue_to_sr = bf_get(
8642 lpfc_mbx_set_feature_UESR,
8643 &mboxq->u.mqe.un.set_feature);
8644 phba->sli4_hba.ue_to_rp = bf_get(
8645 lpfc_mbx_set_feature_UERP,
8646 &mboxq->u.mqe.un.set_feature);
8647 }
8648 }
8649
8650 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8651 /* Enable MDS Diagnostics only if the SLI Port supports it */
8652 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8654 if (rc != MBX_SUCCESS)
8655 phba->mds_diags_support = 0;
8656 }
8657
8658 /*
8659 * Discover the port's supported feature set and match it against the
8660 * hosts requests.
8661 */
8662 lpfc_request_features(phba, mboxq);
8663 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8664 if (unlikely(rc)) {
8665 rc = -EIO;
8666 goto out_free_mbox;
8667 }
8668
8669 /* Disable VMID if app header is not supported */
8670 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8671 &mqe->un.req_ftrs))) {
8672 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8673 phba->cfg_vmid_app_header = 0;
8674 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8675 "1242 vmid feature not supported\n");
8676 }
8677
8678 /*
8679 * The port must support FCP initiator mode as this is the
8680 * only mode running in the host.
8681 */
8682 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8683 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8684 "0378 No support for fcpi mode.\n");
8685 ftr_rsp++;
8686 }
8687
8688 /* Performance Hints are ONLY for FCoE */
8689 if (phba->hba_flag & HBA_FCOE_MODE) {
8690 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8691 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8692 else
8693 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8694 }
8695
8696 /*
8697 * If the port cannot support the host's requested features
8698 * then turn off the global config parameters to disable the
8699 * feature in the driver. This is not a fatal error.
8700 */
8701 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8702 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8703 phba->cfg_enable_bg = 0;
8704 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8705 ftr_rsp++;
8706 }
8707 }
8708
8709 if (phba->max_vpi && phba->cfg_enable_npiv &&
8710 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8711 ftr_rsp++;
8712
8713 if (ftr_rsp) {
8714 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8715 "0379 Feature Mismatch Data: x%08x %08x "
8716 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8717 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8718 phba->cfg_enable_npiv, phba->max_vpi);
8719 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8720 phba->cfg_enable_bg = 0;
8721 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8722 phba->cfg_enable_npiv = 0;
8723 }
8724
8725 /* These SLI3 features are assumed in SLI4 */
8726 spin_lock_irq(&phba->hbalock);
8727 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8728 spin_unlock_irq(&phba->hbalock);
8729
8730 /* Always try to enable dual dump feature if we can */
8731 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8732 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8733 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8734 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8736 "6448 Dual Dump is enabled\n");
8737 else
8738 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8739 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8740 "rc:x%x dd:x%x\n",
8741 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8742 lpfc_sli_config_mbox_subsys_get(
8743 phba, mboxq),
8744 lpfc_sli_config_mbox_opcode_get(
8745 phba, mboxq),
8746 rc, dd);
8747 /*
8748 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8749 * calls depends on these resources to complete port setup.
8750 */
8751 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8752 if (rc) {
8753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8754 "2920 Failed to alloc Resource IDs "
8755 "rc = x%x\n", rc);
8756 goto out_free_mbox;
8757 }
8758
8759 lpfc_set_host_data(phba, mboxq);
8760
8761 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8762 if (rc) {
8763 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8764 "2134 Failed to set host os driver version %x",
8765 rc);
8766 }
8767
8768 /* Read the port's service parameters. */
8769 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8770 if (rc) {
8771 phba->link_state = LPFC_HBA_ERROR;
8772 rc = -ENOMEM;
8773 goto out_free_mbox;
8774 }
8775
8776 mboxq->vport = vport;
8777 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8778 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8779 if (rc == MBX_SUCCESS) {
8780 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8781 rc = 0;
8782 }
8783
8784 /*
8785 * This memory was allocated by the lpfc_read_sparam routine but is
8786 * no longer needed. It is released and ctx_buf NULLed to prevent
8787 * unintended pointer access as the mbox is reused.
8788 */
8789 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8790 kfree(mp);
8791 mboxq->ctx_buf = NULL;
8792 if (unlikely(rc)) {
8793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8794 "0382 READ_SPARAM command failed "
8795 "status %d, mbxStatus x%x\n",
8796 rc, bf_get(lpfc_mqe_status, mqe));
8797 phba->link_state = LPFC_HBA_ERROR;
8798 rc = -EIO;
8799 goto out_free_mbox;
8800 }
8801
8802 lpfc_update_vport_wwn(vport);
8803
8804 /* Update the fc_host data structures with new wwn. */
8805 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8806 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8807
8808 /* Create all the SLI4 queues */
8809 rc = lpfc_sli4_queue_create(phba);
8810 if (rc) {
8811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8812 "3089 Failed to allocate queues\n");
8813 rc = -ENODEV;
8814 goto out_free_mbox;
8815 }
8816 /* Set up all the queues to the device */
8817 rc = lpfc_sli4_queue_setup(phba);
8818 if (unlikely(rc)) {
8819 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8820 "0381 Error %d during queue setup.\n ", rc);
8821 goto out_stop_timers;
8822 }
8823 /* Initialize the driver internal SLI layer lists. */
8824 lpfc_sli4_setup(phba);
8825 lpfc_sli4_queue_init(phba);
8826
8827 /* update host els xri-sgl sizes and mappings */
8828 rc = lpfc_sli4_els_sgl_update(phba);
8829 if (unlikely(rc)) {
8830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8831 "1400 Failed to update xri-sgl size and "
8832 "mapping: %d\n", rc);
8833 goto out_destroy_queue;
8834 }
8835
8836 /* register the els sgl pool to the port */
8837 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8838 phba->sli4_hba.els_xri_cnt);
8839 if (unlikely(rc < 0)) {
8840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8841 "0582 Error %d during els sgl post "
8842 "operation\n", rc);
8843 rc = -ENODEV;
8844 goto out_destroy_queue;
8845 }
8846 phba->sli4_hba.els_xri_cnt = rc;
8847
8848 if (phba->nvmet_support) {
8849 /* update host nvmet xri-sgl sizes and mappings */
8850 rc = lpfc_sli4_nvmet_sgl_update(phba);
8851 if (unlikely(rc)) {
8852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8853 "6308 Failed to update nvmet-sgl size "
8854 "and mapping: %d\n", rc);
8855 goto out_destroy_queue;
8856 }
8857
8858 /* register the nvmet sgl pool to the port */
8859 rc = lpfc_sli4_repost_sgl_list(
8860 phba,
8861 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8862 phba->sli4_hba.nvmet_xri_cnt);
8863 if (unlikely(rc < 0)) {
8864 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8865 "3117 Error %d during nvmet "
8866 "sgl post\n", rc);
8867 rc = -ENODEV;
8868 goto out_destroy_queue;
8869 }
8870 phba->sli4_hba.nvmet_xri_cnt = rc;
8871
8872 /* We allocate an iocbq for every receive context SGL.
8873 * The additional allocation is for abort and ls handling.
8874 */
8875 cnt = phba->sli4_hba.nvmet_xri_cnt +
8876 phba->sli4_hba.max_cfg_param.max_xri;
8877 } else {
8878 /* update host common xri-sgl sizes and mappings */
8879 rc = lpfc_sli4_io_sgl_update(phba);
8880 if (unlikely(rc)) {
8881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8882 "6082 Failed to update nvme-sgl size "
8883 "and mapping: %d\n", rc);
8884 goto out_destroy_queue;
8885 }
8886
8887 /* register the allocated common sgl pool to the port */
8888 rc = lpfc_sli4_repost_io_sgl_list(phba);
8889 if (unlikely(rc)) {
8890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8891 "6116 Error %d during nvme sgl post "
8892 "operation\n", rc);
8893 /* Some NVME buffers were moved to abort nvme list */
8894 /* A pci function reset will repost them */
8895 rc = -ENODEV;
8896 goto out_destroy_queue;
8897 }
8898 /* Each lpfc_io_buf job structure has an iocbq element.
8899 * This cnt provides for abort, els, ct and ls requests.
8900 */
8901 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8902 }
8903
8904 if (!phba->sli.iocbq_lookup) {
8905 /* Initialize and populate the iocb list per host */
8906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8907 "2821 initialize iocb list with %d entries\n",
8908 cnt);
8909 rc = lpfc_init_iocb_list(phba, cnt);
8910 if (rc) {
8911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8912 "1413 Failed to init iocb list.\n");
8913 goto out_destroy_queue;
8914 }
8915 }
8916
8917 if (phba->nvmet_support)
8918 lpfc_nvmet_create_targetport(phba);
8919
8920 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8921 /* Post initial buffers to all RQs created */
8922 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8923 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8924 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8925 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8926 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8927 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8928 rqbp->buffer_count = 0;
8929
8930 lpfc_post_rq_buffer(
8931 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8932 phba->sli4_hba.nvmet_mrq_data[i],
8933 phba->cfg_nvmet_mrq_post, i);
8934 }
8935 }
8936
8937 /* Post the rpi header region to the device. */
8938 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8939 if (unlikely(rc)) {
8940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8941 "0393 Error %d during rpi post operation\n",
8942 rc);
8943 rc = -ENODEV;
8944 goto out_free_iocblist;
8945 }
8946 lpfc_sli4_node_prep(phba);
8947
8948 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8949 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8950 /*
8951 * The FC Port needs to register FCFI (index 0)
8952 */
8953 lpfc_reg_fcfi(phba, mboxq);
8954 mboxq->vport = phba->pport;
8955 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8956 if (rc != MBX_SUCCESS)
8957 goto out_unset_queue;
8958 rc = 0;
8959 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8960 &mboxq->u.mqe.un.reg_fcfi);
8961 } else {
8962 /* We are a NVME Target mode with MRQ > 1 */
8963
8964 /* First register the FCFI */
8965 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8966 mboxq->vport = phba->pport;
8967 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8968 if (rc != MBX_SUCCESS)
8969 goto out_unset_queue;
8970 rc = 0;
8971 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8972 &mboxq->u.mqe.un.reg_fcfi_mrq);
8973
8974 /* Next register the MRQs */
8975 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8976 mboxq->vport = phba->pport;
8977 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8978 if (rc != MBX_SUCCESS)
8979 goto out_unset_queue;
8980 rc = 0;
8981 }
8982 /* Check if the port is configured to be disabled */
8983 lpfc_sli_read_link_ste(phba);
8984 }
8985
8986 /* Don't post more new bufs if repost already recovered
8987 * the nvme sgls.
8988 */
8989 if (phba->nvmet_support == 0) {
8990 if (phba->sli4_hba.io_xri_cnt == 0) {
8991 len = lpfc_new_io_buf(
8992 phba, phba->sli4_hba.io_xri_max);
8993 if (len == 0) {
8994 rc = -ENOMEM;
8995 goto out_unset_queue;
8996 }
8997
8998 if (phba->cfg_xri_rebalancing)
8999 lpfc_create_multixri_pools(phba);
9000 }
9001 } else {
9002 phba->cfg_xri_rebalancing = 0;
9003 }
9004
9005 /* Allow asynchronous mailbox command to go through */
9006 spin_lock_irq(&phba->hbalock);
9007 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9008 spin_unlock_irq(&phba->hbalock);
9009
9010 /* Post receive buffers to the device */
9011 lpfc_sli4_rb_setup(phba);
9012
9013 /* Reset HBA FCF states after HBA reset */
9014 phba->fcf.fcf_flag = 0;
9015 phba->fcf.current_rec.flag = 0;
9016
9017 /* Start the ELS watchdog timer */
9018 mod_timer(&vport->els_tmofunc,
9019 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
9020
9021 /* Start heart beat timer */
9022 mod_timer(&phba->hb_tmofunc,
9023 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
9024 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
9025 phba->last_completion_time = jiffies;
9026
9027 /* start eq_delay heartbeat */
9028 if (phba->cfg_auto_imax)
9029 queue_delayed_work(phba->wq, &phba->eq_delay_work,
9030 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9031
9032 /* start per phba idle_stat_delay heartbeat */
9033 lpfc_init_idle_stat_hb(phba);
9034
9035 /* Start error attention (ERATT) polling timer */
9036 mod_timer(&phba->eratt_poll,
9037 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9038
9039 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
9040 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
9041 rc = pci_enable_pcie_error_reporting(phba->pcidev);
9042 if (!rc) {
9043 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9044 "2829 This device supports "
9045 "Advanced Error Reporting (AER)\n");
9046 spin_lock_irq(&phba->hbalock);
9047 phba->hba_flag |= HBA_AER_ENABLED;
9048 spin_unlock_irq(&phba->hbalock);
9049 } else {
9050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9051 "2830 This device does not support "
9052 "Advanced Error Reporting (AER)\n");
9053 phba->cfg_aer_support = 0;
9054 }
9055 rc = 0;
9056 }
9057
9058 /*
9059 * The port is ready, set the host's link state to LINK_DOWN
9060 * in preparation for link interrupts.
9061 */
9062 spin_lock_irq(&phba->hbalock);
9063 phba->link_state = LPFC_LINK_DOWN;
9064
9065 /* Check if physical ports are trunked */
9066 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9067 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9068 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9069 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9070 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9071 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9072 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9073 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9074 spin_unlock_irq(&phba->hbalock);
9075
9076 /* Arm the CQs and then EQs on device */
9077 lpfc_sli4_arm_cqeq_intr(phba);
9078
9079 /* Indicate device interrupt mode */
9080 phba->sli4_hba.intr_enable = 1;
9081
9082 /* Setup CMF after HBA is initialized */
9083 lpfc_cmf_setup(phba);
9084
9085 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
9086 (phba->hba_flag & LINK_DISABLED)) {
9087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9088 "3103 Adapter Link is disabled.\n");
9089 lpfc_down_link(phba, mboxq);
9090 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9091 if (rc != MBX_SUCCESS) {
9092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9093 "3104 Adapter failed to issue "
9094 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
9095 goto out_io_buff_free;
9096 }
9097 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9098 /* don't perform init_link on SLI4 FC port loopback test */
9099 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9100 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9101 if (rc)
9102 goto out_io_buff_free;
9103 }
9104 }
9105 mempool_free(mboxq, phba->mbox_mem_pool);
9106
9107 /* Enable RAS FW log support */
9108 lpfc_sli4_ras_setup(phba);
9109
9110 phba->hba_flag |= HBA_SETUP;
9111 return rc;
9112
9113 out_io_buff_free:
9114 /* Free allocated IO Buffers */
9115 lpfc_io_free(phba);
9116 out_unset_queue:
9117 /* Unset all the queues set up in this routine when error out */
9118 lpfc_sli4_queue_unset(phba);
9119 out_free_iocblist:
9120 lpfc_free_iocb_list(phba);
9121 out_destroy_queue:
9122 lpfc_sli4_queue_destroy(phba);
9123 out_stop_timers:
9124 lpfc_stop_hba_timers(phba);
9125 out_free_mbox:
9126 mempool_free(mboxq, phba->mbox_mem_pool);
9127 return rc;
9128 }
9129
9130 /**
9131 * lpfc_mbox_timeout - Timeout call back function for mbox timer
9132 * @t: Context to fetch pointer to hba structure from.
9133 *
9134 * This is the callback function for mailbox timer. The mailbox
9135 * timer is armed when a new mailbox command is issued and the timer
9136 * is deleted when the mailbox complete. The function is called by
9137 * the kernel timer code when a mailbox does not complete within
9138 * expected time. This function wakes up the worker thread to
9139 * process the mailbox timeout and returns. All the processing is
9140 * done by the worker thread function lpfc_mbox_timeout_handler.
9141 **/
9142 void
lpfc_mbox_timeout(struct timer_list * t)9143 lpfc_mbox_timeout(struct timer_list *t)
9144 {
9145 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
9146 unsigned long iflag;
9147 uint32_t tmo_posted;
9148
9149 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9150 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9151 if (!tmo_posted)
9152 phba->pport->work_port_events |= WORKER_MBOX_TMO;
9153 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9154
9155 if (!tmo_posted)
9156 lpfc_worker_wake_up(phba);
9157 return;
9158 }
9159
9160 /**
9161 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9162 * are pending
9163 * @phba: Pointer to HBA context object.
9164 *
9165 * This function checks if any mailbox completions are present on the mailbox
9166 * completion queue.
9167 **/
9168 static bool
lpfc_sli4_mbox_completions_pending(struct lpfc_hba * phba)9169 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9170 {
9171
9172 uint32_t idx;
9173 struct lpfc_queue *mcq;
9174 struct lpfc_mcqe *mcqe;
9175 bool pending_completions = false;
9176 uint8_t qe_valid;
9177
9178 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9179 return false;
9180
9181 /* Check for completions on mailbox completion queue */
9182
9183 mcq = phba->sli4_hba.mbx_cq;
9184 idx = mcq->hba_index;
9185 qe_valid = mcq->qe_valid;
9186 while (bf_get_le32(lpfc_cqe_valid,
9187 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9188 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9189 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9190 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9191 pending_completions = true;
9192 break;
9193 }
9194 idx = (idx + 1) % mcq->entry_count;
9195 if (mcq->hba_index == idx)
9196 break;
9197
9198 /* if the index wrapped around, toggle the valid bit */
9199 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9200 qe_valid = (qe_valid) ? 0 : 1;
9201 }
9202 return pending_completions;
9203
9204 }
9205
9206 /**
9207 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9208 * that were missed.
9209 * @phba: Pointer to HBA context object.
9210 *
9211 * For sli4, it is possible to miss an interrupt. As such mbox completions
9212 * maybe missed causing erroneous mailbox timeouts to occur. This function
9213 * checks to see if mbox completions are on the mailbox completion queue
9214 * and will process all the completions associated with the eq for the
9215 * mailbox completion queue.
9216 **/
9217 static bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba * phba)9218 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9219 {
9220 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9221 uint32_t eqidx;
9222 struct lpfc_queue *fpeq = NULL;
9223 struct lpfc_queue *eq;
9224 bool mbox_pending;
9225
9226 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9227 return false;
9228
9229 /* Find the EQ associated with the mbox CQ */
9230 if (sli4_hba->hdwq) {
9231 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9232 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9233 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9234 fpeq = eq;
9235 break;
9236 }
9237 }
9238 }
9239 if (!fpeq)
9240 return false;
9241
9242 /* Turn off interrupts from this EQ */
9243
9244 sli4_hba->sli4_eq_clr_intr(fpeq);
9245
9246 /* Check to see if a mbox completion is pending */
9247
9248 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9249
9250 /*
9251 * If a mbox completion is pending, process all the events on EQ
9252 * associated with the mbox completion queue (this could include
9253 * mailbox commands, async events, els commands, receive queue data
9254 * and fcp commands)
9255 */
9256
9257 if (mbox_pending)
9258 /* process and rearm the EQ */
9259 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
9260 else
9261 /* Always clear and re-arm the EQ */
9262 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9263
9264 return mbox_pending;
9265
9266 }
9267
9268 /**
9269 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9270 * @phba: Pointer to HBA context object.
9271 *
9272 * This function is called from worker thread when a mailbox command times out.
9273 * The caller is not required to hold any locks. This function will reset the
9274 * HBA and recover all the pending commands.
9275 **/
9276 void
lpfc_mbox_timeout_handler(struct lpfc_hba * phba)9277 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9278 {
9279 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9280 MAILBOX_t *mb = NULL;
9281
9282 struct lpfc_sli *psli = &phba->sli;
9283
9284 /* If the mailbox completed, process the completion */
9285 lpfc_sli4_process_missed_mbox_completions(phba);
9286
9287 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9288 return;
9289
9290 if (pmbox != NULL)
9291 mb = &pmbox->u.mb;
9292 /* Check the pmbox pointer first. There is a race condition
9293 * between the mbox timeout handler getting executed in the
9294 * worklist and the mailbox actually completing. When this
9295 * race condition occurs, the mbox_active will be NULL.
9296 */
9297 spin_lock_irq(&phba->hbalock);
9298 if (pmbox == NULL) {
9299 lpfc_printf_log(phba, KERN_WARNING,
9300 LOG_MBOX | LOG_SLI,
9301 "0353 Active Mailbox cleared - mailbox timeout "
9302 "exiting\n");
9303 spin_unlock_irq(&phba->hbalock);
9304 return;
9305 }
9306
9307 /* Mbox cmd <mbxCommand> timeout */
9308 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9309 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9310 mb->mbxCommand,
9311 phba->pport->port_state,
9312 phba->sli.sli_flag,
9313 phba->sli.mbox_active);
9314 spin_unlock_irq(&phba->hbalock);
9315
9316 /* Setting state unknown so lpfc_sli_abort_iocb_ring
9317 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9318 * it to fail all outstanding SCSI IO.
9319 */
9320 spin_lock_irq(&phba->pport->work_port_lock);
9321 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9322 spin_unlock_irq(&phba->pport->work_port_lock);
9323 spin_lock_irq(&phba->hbalock);
9324 phba->link_state = LPFC_LINK_UNKNOWN;
9325 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9326 spin_unlock_irq(&phba->hbalock);
9327
9328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9329 "0345 Resetting board due to mailbox timeout\n");
9330
9331 /* Reset the HBA device */
9332 lpfc_reset_hba(phba);
9333 }
9334
9335 /**
9336 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9337 * @phba: Pointer to HBA context object.
9338 * @pmbox: Pointer to mailbox object.
9339 * @flag: Flag indicating how the mailbox need to be processed.
9340 *
9341 * This function is called by discovery code and HBA management code
9342 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9343 * function gets the hbalock to protect the data structures.
9344 * The mailbox command can be submitted in polling mode, in which case
9345 * this function will wait in a polling loop for the completion of the
9346 * mailbox.
9347 * If the mailbox is submitted in no_wait mode (not polling) the
9348 * function will submit the command and returns immediately without waiting
9349 * for the mailbox completion. The no_wait is supported only when HBA
9350 * is in SLI2/SLI3 mode - interrupts are enabled.
9351 * The SLI interface allows only one mailbox pending at a time. If the
9352 * mailbox is issued in polling mode and there is already a mailbox
9353 * pending, then the function will return an error. If the mailbox is issued
9354 * in NO_WAIT mode and there is a mailbox pending already, the function
9355 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9356 * The sli layer owns the mailbox object until the completion of mailbox
9357 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9358 * return codes the caller owns the mailbox command after the return of
9359 * the function.
9360 **/
9361 static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)9362 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9363 uint32_t flag)
9364 {
9365 MAILBOX_t *mbx;
9366 struct lpfc_sli *psli = &phba->sli;
9367 uint32_t status, evtctr;
9368 uint32_t ha_copy, hc_copy;
9369 int i;
9370 unsigned long timeout;
9371 unsigned long drvr_flag = 0;
9372 uint32_t word0, ldata;
9373 void __iomem *to_slim;
9374 int processing_queue = 0;
9375
9376 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9377 if (!pmbox) {
9378 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9379 /* processing mbox queue from intr_handler */
9380 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9381 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9382 return MBX_SUCCESS;
9383 }
9384 processing_queue = 1;
9385 pmbox = lpfc_mbox_get(phba);
9386 if (!pmbox) {
9387 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9388 return MBX_SUCCESS;
9389 }
9390 }
9391
9392 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9393 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9394 if(!pmbox->vport) {
9395 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9396 lpfc_printf_log(phba, KERN_ERR,
9397 LOG_MBOX | LOG_VPORT,
9398 "1806 Mbox x%x failed. No vport\n",
9399 pmbox->u.mb.mbxCommand);
9400 dump_stack();
9401 goto out_not_finished;
9402 }
9403 }
9404
9405 /* If the PCI channel is in offline state, do not post mbox. */
9406 if (unlikely(pci_channel_offline(phba->pcidev))) {
9407 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9408 goto out_not_finished;
9409 }
9410
9411 /* If HBA has a deferred error attention, fail the iocb. */
9412 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9414 goto out_not_finished;
9415 }
9416
9417 psli = &phba->sli;
9418
9419 mbx = &pmbox->u.mb;
9420 status = MBX_SUCCESS;
9421
9422 if (phba->link_state == LPFC_HBA_ERROR) {
9423 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9424
9425 /* Mbox command <mbxCommand> cannot issue */
9426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9427 "(%d):0311 Mailbox command x%x cannot "
9428 "issue Data: x%x x%x\n",
9429 pmbox->vport ? pmbox->vport->vpi : 0,
9430 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9431 goto out_not_finished;
9432 }
9433
9434 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9435 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9436 !(hc_copy & HC_MBINT_ENA)) {
9437 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9438 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9439 "(%d):2528 Mailbox command x%x cannot "
9440 "issue Data: x%x x%x\n",
9441 pmbox->vport ? pmbox->vport->vpi : 0,
9442 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9443 goto out_not_finished;
9444 }
9445 }
9446
9447 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9448 /* Polling for a mbox command when another one is already active
9449 * is not allowed in SLI. Also, the driver must have established
9450 * SLI2 mode to queue and process multiple mbox commands.
9451 */
9452
9453 if (flag & MBX_POLL) {
9454 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9455
9456 /* Mbox command <mbxCommand> cannot issue */
9457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9458 "(%d):2529 Mailbox command x%x "
9459 "cannot issue Data: x%x x%x\n",
9460 pmbox->vport ? pmbox->vport->vpi : 0,
9461 pmbox->u.mb.mbxCommand,
9462 psli->sli_flag, flag);
9463 goto out_not_finished;
9464 }
9465
9466 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9467 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9468 /* Mbox command <mbxCommand> cannot issue */
9469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9470 "(%d):2530 Mailbox command x%x "
9471 "cannot issue Data: x%x x%x\n",
9472 pmbox->vport ? pmbox->vport->vpi : 0,
9473 pmbox->u.mb.mbxCommand,
9474 psli->sli_flag, flag);
9475 goto out_not_finished;
9476 }
9477
9478 /* Another mailbox command is still being processed, queue this
9479 * command to be processed later.
9480 */
9481 lpfc_mbox_put(phba, pmbox);
9482
9483 /* Mbox cmd issue - BUSY */
9484 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9485 "(%d):0308 Mbox cmd issue - BUSY Data: "
9486 "x%x x%x x%x x%x\n",
9487 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9488 mbx->mbxCommand,
9489 phba->pport ? phba->pport->port_state : 0xff,
9490 psli->sli_flag, flag);
9491
9492 psli->slistat.mbox_busy++;
9493 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9494
9495 if (pmbox->vport) {
9496 lpfc_debugfs_disc_trc(pmbox->vport,
9497 LPFC_DISC_TRC_MBOX_VPORT,
9498 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9499 (uint32_t)mbx->mbxCommand,
9500 mbx->un.varWords[0], mbx->un.varWords[1]);
9501 }
9502 else {
9503 lpfc_debugfs_disc_trc(phba->pport,
9504 LPFC_DISC_TRC_MBOX,
9505 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9506 (uint32_t)mbx->mbxCommand,
9507 mbx->un.varWords[0], mbx->un.varWords[1]);
9508 }
9509
9510 return MBX_BUSY;
9511 }
9512
9513 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9514
9515 /* If we are not polling, we MUST be in SLI2 mode */
9516 if (flag != MBX_POLL) {
9517 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9518 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9519 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9520 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9521 /* Mbox command <mbxCommand> cannot issue */
9522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9523 "(%d):2531 Mailbox command x%x "
9524 "cannot issue Data: x%x x%x\n",
9525 pmbox->vport ? pmbox->vport->vpi : 0,
9526 pmbox->u.mb.mbxCommand,
9527 psli->sli_flag, flag);
9528 goto out_not_finished;
9529 }
9530 /* timeout active mbox command */
9531 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9532 1000);
9533 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9534 }
9535
9536 /* Mailbox cmd <cmd> issue */
9537 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9538 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9539 "x%x\n",
9540 pmbox->vport ? pmbox->vport->vpi : 0,
9541 mbx->mbxCommand,
9542 phba->pport ? phba->pport->port_state : 0xff,
9543 psli->sli_flag, flag);
9544
9545 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9546 if (pmbox->vport) {
9547 lpfc_debugfs_disc_trc(pmbox->vport,
9548 LPFC_DISC_TRC_MBOX_VPORT,
9549 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9550 (uint32_t)mbx->mbxCommand,
9551 mbx->un.varWords[0], mbx->un.varWords[1]);
9552 }
9553 else {
9554 lpfc_debugfs_disc_trc(phba->pport,
9555 LPFC_DISC_TRC_MBOX,
9556 "MBOX Send: cmd:x%x mb:x%x x%x",
9557 (uint32_t)mbx->mbxCommand,
9558 mbx->un.varWords[0], mbx->un.varWords[1]);
9559 }
9560 }
9561
9562 psli->slistat.mbox_cmd++;
9563 evtctr = psli->slistat.mbox_event;
9564
9565 /* next set own bit for the adapter and copy over command word */
9566 mbx->mbxOwner = OWN_CHIP;
9567
9568 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9569 /* Populate mbox extension offset word. */
9570 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9571 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9572 = (uint8_t *)phba->mbox_ext
9573 - (uint8_t *)phba->mbox;
9574 }
9575
9576 /* Copy the mailbox extension data */
9577 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9578 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9579 (uint8_t *)phba->mbox_ext,
9580 pmbox->in_ext_byte_len);
9581 }
9582 /* Copy command data to host SLIM area */
9583 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9584 } else {
9585 /* Populate mbox extension offset word. */
9586 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9587 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9588 = MAILBOX_HBA_EXT_OFFSET;
9589
9590 /* Copy the mailbox extension data */
9591 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9592 lpfc_memcpy_to_slim(phba->MBslimaddr +
9593 MAILBOX_HBA_EXT_OFFSET,
9594 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9595
9596 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9597 /* copy command data into host mbox for cmpl */
9598 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9599 MAILBOX_CMD_SIZE);
9600
9601 /* First copy mbox command data to HBA SLIM, skip past first
9602 word */
9603 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9604 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9605 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9606
9607 /* Next copy over first word, with mbxOwner set */
9608 ldata = *((uint32_t *)mbx);
9609 to_slim = phba->MBslimaddr;
9610 writel(ldata, to_slim);
9611 readl(to_slim); /* flush */
9612
9613 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9614 /* switch over to host mailbox */
9615 psli->sli_flag |= LPFC_SLI_ACTIVE;
9616 }
9617
9618 wmb();
9619
9620 switch (flag) {
9621 case MBX_NOWAIT:
9622 /* Set up reference to mailbox command */
9623 psli->mbox_active = pmbox;
9624 /* Interrupt board to do it */
9625 writel(CA_MBATT, phba->CAregaddr);
9626 readl(phba->CAregaddr); /* flush */
9627 /* Don't wait for it to finish, just return */
9628 break;
9629
9630 case MBX_POLL:
9631 /* Set up null reference to mailbox command */
9632 psli->mbox_active = NULL;
9633 /* Interrupt board to do it */
9634 writel(CA_MBATT, phba->CAregaddr);
9635 readl(phba->CAregaddr); /* flush */
9636
9637 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9638 /* First read mbox status word */
9639 word0 = *((uint32_t *)phba->mbox);
9640 word0 = le32_to_cpu(word0);
9641 } else {
9642 /* First read mbox status word */
9643 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9644 spin_unlock_irqrestore(&phba->hbalock,
9645 drvr_flag);
9646 goto out_not_finished;
9647 }
9648 }
9649
9650 /* Read the HBA Host Attention Register */
9651 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9652 spin_unlock_irqrestore(&phba->hbalock,
9653 drvr_flag);
9654 goto out_not_finished;
9655 }
9656 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9657 1000) + jiffies;
9658 i = 0;
9659 /* Wait for command to complete */
9660 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9661 (!(ha_copy & HA_MBATT) &&
9662 (phba->link_state > LPFC_WARM_START))) {
9663 if (time_after(jiffies, timeout)) {
9664 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9665 spin_unlock_irqrestore(&phba->hbalock,
9666 drvr_flag);
9667 goto out_not_finished;
9668 }
9669
9670 /* Check if we took a mbox interrupt while we were
9671 polling */
9672 if (((word0 & OWN_CHIP) != OWN_CHIP)
9673 && (evtctr != psli->slistat.mbox_event))
9674 break;
9675
9676 if (i++ > 10) {
9677 spin_unlock_irqrestore(&phba->hbalock,
9678 drvr_flag);
9679 msleep(1);
9680 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9681 }
9682
9683 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9684 /* First copy command data */
9685 word0 = *((uint32_t *)phba->mbox);
9686 word0 = le32_to_cpu(word0);
9687 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9688 MAILBOX_t *slimmb;
9689 uint32_t slimword0;
9690 /* Check real SLIM for any errors */
9691 slimword0 = readl(phba->MBslimaddr);
9692 slimmb = (MAILBOX_t *) & slimword0;
9693 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9694 && slimmb->mbxStatus) {
9695 psli->sli_flag &=
9696 ~LPFC_SLI_ACTIVE;
9697 word0 = slimword0;
9698 }
9699 }
9700 } else {
9701 /* First copy command data */
9702 word0 = readl(phba->MBslimaddr);
9703 }
9704 /* Read the HBA Host Attention Register */
9705 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9706 spin_unlock_irqrestore(&phba->hbalock,
9707 drvr_flag);
9708 goto out_not_finished;
9709 }
9710 }
9711
9712 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9713 /* copy results back to user */
9714 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9715 MAILBOX_CMD_SIZE);
9716 /* Copy the mailbox extension data */
9717 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9718 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9719 pmbox->ctx_buf,
9720 pmbox->out_ext_byte_len);
9721 }
9722 } else {
9723 /* First copy command data */
9724 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9725 MAILBOX_CMD_SIZE);
9726 /* Copy the mailbox extension data */
9727 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9728 lpfc_memcpy_from_slim(
9729 pmbox->ctx_buf,
9730 phba->MBslimaddr +
9731 MAILBOX_HBA_EXT_OFFSET,
9732 pmbox->out_ext_byte_len);
9733 }
9734 }
9735
9736 writel(HA_MBATT, phba->HAregaddr);
9737 readl(phba->HAregaddr); /* flush */
9738
9739 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9740 status = mbx->mbxStatus;
9741 }
9742
9743 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9744 return status;
9745
9746 out_not_finished:
9747 if (processing_queue) {
9748 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9749 lpfc_mbox_cmpl_put(phba, pmbox);
9750 }
9751 return MBX_NOT_FINISHED;
9752 }
9753
9754 /**
9755 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9756 * @phba: Pointer to HBA context object.
9757 *
9758 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9759 * the driver internal pending mailbox queue. It will then try to wait out the
9760 * possible outstanding mailbox command before return.
9761 *
9762 * Returns:
9763 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9764 * the outstanding mailbox command timed out.
9765 **/
9766 static int
lpfc_sli4_async_mbox_block(struct lpfc_hba * phba)9767 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9768 {
9769 struct lpfc_sli *psli = &phba->sli;
9770 LPFC_MBOXQ_t *mboxq;
9771 int rc = 0;
9772 unsigned long timeout = 0;
9773 u32 sli_flag;
9774 u8 cmd, subsys, opcode;
9775
9776 /* Mark the asynchronous mailbox command posting as blocked */
9777 spin_lock_irq(&phba->hbalock);
9778 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9779 /* Determine how long we might wait for the active mailbox
9780 * command to be gracefully completed by firmware.
9781 */
9782 if (phba->sli.mbox_active)
9783 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9784 phba->sli.mbox_active) *
9785 1000) + jiffies;
9786 spin_unlock_irq(&phba->hbalock);
9787
9788 /* Make sure the mailbox is really active */
9789 if (timeout)
9790 lpfc_sli4_process_missed_mbox_completions(phba);
9791
9792 /* Wait for the outstanding mailbox command to complete */
9793 while (phba->sli.mbox_active) {
9794 /* Check active mailbox complete status every 2ms */
9795 msleep(2);
9796 if (time_after(jiffies, timeout)) {
9797 /* Timeout, mark the outstanding cmd not complete */
9798
9799 /* Sanity check sli.mbox_active has not completed or
9800 * cancelled from another context during last 2ms sleep,
9801 * so take hbalock to be sure before logging.
9802 */
9803 spin_lock_irq(&phba->hbalock);
9804 if (phba->sli.mbox_active) {
9805 mboxq = phba->sli.mbox_active;
9806 cmd = mboxq->u.mb.mbxCommand;
9807 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9808 mboxq);
9809 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9810 mboxq);
9811 sli_flag = psli->sli_flag;
9812 spin_unlock_irq(&phba->hbalock);
9813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9814 "2352 Mailbox command x%x "
9815 "(x%x/x%x) sli_flag x%x could "
9816 "not complete\n",
9817 cmd, subsys, opcode,
9818 sli_flag);
9819 } else {
9820 spin_unlock_irq(&phba->hbalock);
9821 }
9822
9823 rc = 1;
9824 break;
9825 }
9826 }
9827
9828 /* Can not cleanly block async mailbox command, fails it */
9829 if (rc) {
9830 spin_lock_irq(&phba->hbalock);
9831 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9832 spin_unlock_irq(&phba->hbalock);
9833 }
9834 return rc;
9835 }
9836
9837 /**
9838 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9839 * @phba: Pointer to HBA context object.
9840 *
9841 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9842 * commands from the driver internal pending mailbox queue. It makes sure
9843 * that there is no outstanding mailbox command before resuming posting
9844 * asynchronous mailbox commands. If, for any reason, there is outstanding
9845 * mailbox command, it will try to wait it out before resuming asynchronous
9846 * mailbox command posting.
9847 **/
9848 static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba * phba)9849 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9850 {
9851 struct lpfc_sli *psli = &phba->sli;
9852
9853 spin_lock_irq(&phba->hbalock);
9854 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9855 /* Asynchronous mailbox posting is not blocked, do nothing */
9856 spin_unlock_irq(&phba->hbalock);
9857 return;
9858 }
9859
9860 /* Outstanding synchronous mailbox command is guaranteed to be done,
9861 * successful or timeout, after timing-out the outstanding mailbox
9862 * command shall always be removed, so just unblock posting async
9863 * mailbox command and resume
9864 */
9865 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9866 spin_unlock_irq(&phba->hbalock);
9867
9868 /* wake up worker thread to post asynchronous mailbox command */
9869 lpfc_worker_wake_up(phba);
9870 }
9871
9872 /**
9873 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9874 * @phba: Pointer to HBA context object.
9875 * @mboxq: Pointer to mailbox object.
9876 *
9877 * The function waits for the bootstrap mailbox register ready bit from
9878 * port for twice the regular mailbox command timeout value.
9879 *
9880 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9881 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9882 **/
9883 static int
lpfc_sli4_wait_bmbx_ready(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)9884 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9885 {
9886 uint32_t db_ready;
9887 unsigned long timeout;
9888 struct lpfc_register bmbx_reg;
9889
9890 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9891 * 1000) + jiffies;
9892
9893 do {
9894 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9895 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9896 if (!db_ready)
9897 mdelay(2);
9898
9899 if (time_after(jiffies, timeout))
9900 return MBXERR_ERROR;
9901 } while (!db_ready);
9902
9903 return 0;
9904 }
9905
9906 /**
9907 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9908 * @phba: Pointer to HBA context object.
9909 * @mboxq: Pointer to mailbox object.
9910 *
9911 * The function posts a mailbox to the port. The mailbox is expected
9912 * to be comletely filled in and ready for the port to operate on it.
9913 * This routine executes a synchronous completion operation on the
9914 * mailbox by polling for its completion.
9915 *
9916 * The caller must not be holding any locks when calling this routine.
9917 *
9918 * Returns:
9919 * MBX_SUCCESS - mailbox posted successfully
9920 * Any of the MBX error values.
9921 **/
9922 static int
lpfc_sli4_post_sync_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)9923 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9924 {
9925 int rc = MBX_SUCCESS;
9926 unsigned long iflag;
9927 uint32_t mcqe_status;
9928 uint32_t mbx_cmnd;
9929 struct lpfc_sli *psli = &phba->sli;
9930 struct lpfc_mqe *mb = &mboxq->u.mqe;
9931 struct lpfc_bmbx_create *mbox_rgn;
9932 struct dma_address *dma_address;
9933
9934 /*
9935 * Only one mailbox can be active to the bootstrap mailbox region
9936 * at a time and there is no queueing provided.
9937 */
9938 spin_lock_irqsave(&phba->hbalock, iflag);
9939 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9940 spin_unlock_irqrestore(&phba->hbalock, iflag);
9941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9942 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9943 "cannot issue Data: x%x x%x\n",
9944 mboxq->vport ? mboxq->vport->vpi : 0,
9945 mboxq->u.mb.mbxCommand,
9946 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9947 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9948 psli->sli_flag, MBX_POLL);
9949 return MBXERR_ERROR;
9950 }
9951 /* The server grabs the token and owns it until release */
9952 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9953 phba->sli.mbox_active = mboxq;
9954 spin_unlock_irqrestore(&phba->hbalock, iflag);
9955
9956 /* wait for bootstrap mbox register for readyness */
9957 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9958 if (rc)
9959 goto exit;
9960 /*
9961 * Initialize the bootstrap memory region to avoid stale data areas
9962 * in the mailbox post. Then copy the caller's mailbox contents to
9963 * the bmbx mailbox region.
9964 */
9965 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9966 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9967 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9968 sizeof(struct lpfc_mqe));
9969
9970 /* Post the high mailbox dma address to the port and wait for ready. */
9971 dma_address = &phba->sli4_hba.bmbx.dma_address;
9972 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9973
9974 /* wait for bootstrap mbox register for hi-address write done */
9975 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9976 if (rc)
9977 goto exit;
9978
9979 /* Post the low mailbox dma address to the port. */
9980 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9981
9982 /* wait for bootstrap mbox register for low address write done */
9983 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9984 if (rc)
9985 goto exit;
9986
9987 /*
9988 * Read the CQ to ensure the mailbox has completed.
9989 * If so, update the mailbox status so that the upper layers
9990 * can complete the request normally.
9991 */
9992 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9993 sizeof(struct lpfc_mqe));
9994 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9995 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9996 sizeof(struct lpfc_mcqe));
9997 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9998 /*
9999 * When the CQE status indicates a failure and the mailbox status
10000 * indicates success then copy the CQE status into the mailbox status
10001 * (and prefix it with x4000).
10002 */
10003 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
10004 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
10005 bf_set(lpfc_mqe_status, mb,
10006 (LPFC_MBX_ERROR_RANGE | mcqe_status));
10007 rc = MBXERR_ERROR;
10008 } else
10009 lpfc_sli4_swap_str(phba, mboxq);
10010
10011 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10012 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
10013 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
10014 " x%x x%x CQ: x%x x%x x%x x%x\n",
10015 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10016 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10017 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10018 bf_get(lpfc_mqe_status, mb),
10019 mb->un.mb_words[0], mb->un.mb_words[1],
10020 mb->un.mb_words[2], mb->un.mb_words[3],
10021 mb->un.mb_words[4], mb->un.mb_words[5],
10022 mb->un.mb_words[6], mb->un.mb_words[7],
10023 mb->un.mb_words[8], mb->un.mb_words[9],
10024 mb->un.mb_words[10], mb->un.mb_words[11],
10025 mb->un.mb_words[12], mboxq->mcqe.word0,
10026 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
10027 mboxq->mcqe.trailer);
10028 exit:
10029 /* We are holding the token, no needed for lock when release */
10030 spin_lock_irqsave(&phba->hbalock, iflag);
10031 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10032 phba->sli.mbox_active = NULL;
10033 spin_unlock_irqrestore(&phba->hbalock, iflag);
10034 return rc;
10035 }
10036
10037 /**
10038 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10039 * @phba: Pointer to HBA context object.
10040 * @mboxq: Pointer to mailbox object.
10041 * @flag: Flag indicating how the mailbox need to be processed.
10042 *
10043 * This function is called by discovery code and HBA management code to submit
10044 * a mailbox command to firmware with SLI-4 interface spec.
10045 *
10046 * Return codes the caller owns the mailbox command after the return of the
10047 * function.
10048 **/
10049 static int
lpfc_sli_issue_mbox_s4(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint32_t flag)10050 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10051 uint32_t flag)
10052 {
10053 struct lpfc_sli *psli = &phba->sli;
10054 unsigned long iflags;
10055 int rc;
10056
10057 /* dump from issue mailbox command if setup */
10058 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10059
10060 rc = lpfc_mbox_dev_check(phba);
10061 if (unlikely(rc)) {
10062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10063 "(%d):2544 Mailbox command x%x (x%x/x%x) "
10064 "cannot issue Data: x%x x%x\n",
10065 mboxq->vport ? mboxq->vport->vpi : 0,
10066 mboxq->u.mb.mbxCommand,
10067 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10068 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10069 psli->sli_flag, flag);
10070 goto out_not_finished;
10071 }
10072
10073 /* Detect polling mode and jump to a handler */
10074 if (!phba->sli4_hba.intr_enable) {
10075 if (flag == MBX_POLL)
10076 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10077 else
10078 rc = -EIO;
10079 if (rc != MBX_SUCCESS)
10080 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10081 "(%d):2541 Mailbox command x%x "
10082 "(x%x/x%x) failure: "
10083 "mqe_sta: x%x mcqe_sta: x%x/x%x "
10084 "Data: x%x x%x\n",
10085 mboxq->vport ? mboxq->vport->vpi : 0,
10086 mboxq->u.mb.mbxCommand,
10087 lpfc_sli_config_mbox_subsys_get(phba,
10088 mboxq),
10089 lpfc_sli_config_mbox_opcode_get(phba,
10090 mboxq),
10091 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10092 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10093 bf_get(lpfc_mcqe_ext_status,
10094 &mboxq->mcqe),
10095 psli->sli_flag, flag);
10096 return rc;
10097 } else if (flag == MBX_POLL) {
10098 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10099 "(%d):2542 Try to issue mailbox command "
10100 "x%x (x%x/x%x) synchronously ahead of async "
10101 "mailbox command queue: x%x x%x\n",
10102 mboxq->vport ? mboxq->vport->vpi : 0,
10103 mboxq->u.mb.mbxCommand,
10104 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10105 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10106 psli->sli_flag, flag);
10107 /* Try to block the asynchronous mailbox posting */
10108 rc = lpfc_sli4_async_mbox_block(phba);
10109 if (!rc) {
10110 /* Successfully blocked, now issue sync mbox cmd */
10111 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10112 if (rc != MBX_SUCCESS)
10113 lpfc_printf_log(phba, KERN_WARNING,
10114 LOG_MBOX | LOG_SLI,
10115 "(%d):2597 Sync Mailbox command "
10116 "x%x (x%x/x%x) failure: "
10117 "mqe_sta: x%x mcqe_sta: x%x/x%x "
10118 "Data: x%x x%x\n",
10119 mboxq->vport ? mboxq->vport->vpi : 0,
10120 mboxq->u.mb.mbxCommand,
10121 lpfc_sli_config_mbox_subsys_get(phba,
10122 mboxq),
10123 lpfc_sli_config_mbox_opcode_get(phba,
10124 mboxq),
10125 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10126 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10127 bf_get(lpfc_mcqe_ext_status,
10128 &mboxq->mcqe),
10129 psli->sli_flag, flag);
10130 /* Unblock the async mailbox posting afterward */
10131 lpfc_sli4_async_mbox_unblock(phba);
10132 }
10133 return rc;
10134 }
10135
10136 /* Now, interrupt mode asynchronous mailbox command */
10137 rc = lpfc_mbox_cmd_check(phba, mboxq);
10138 if (rc) {
10139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10140 "(%d):2543 Mailbox command x%x (x%x/x%x) "
10141 "cannot issue Data: x%x x%x\n",
10142 mboxq->vport ? mboxq->vport->vpi : 0,
10143 mboxq->u.mb.mbxCommand,
10144 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10145 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10146 psli->sli_flag, flag);
10147 goto out_not_finished;
10148 }
10149
10150 /* Put the mailbox command to the driver internal FIFO */
10151 psli->slistat.mbox_busy++;
10152 spin_lock_irqsave(&phba->hbalock, iflags);
10153 lpfc_mbox_put(phba, mboxq);
10154 spin_unlock_irqrestore(&phba->hbalock, iflags);
10155 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10156 "(%d):0354 Mbox cmd issue - Enqueue Data: "
10157 "x%x (x%x/x%x) x%x x%x x%x\n",
10158 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10159 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10160 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10161 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10162 phba->pport->port_state,
10163 psli->sli_flag, MBX_NOWAIT);
10164 /* Wake up worker thread to transport mailbox command from head */
10165 lpfc_worker_wake_up(phba);
10166
10167 return MBX_BUSY;
10168
10169 out_not_finished:
10170 return MBX_NOT_FINISHED;
10171 }
10172
10173 /**
10174 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10175 * @phba: Pointer to HBA context object.
10176 *
10177 * This function is called by worker thread to send a mailbox command to
10178 * SLI4 HBA firmware.
10179 *
10180 **/
10181 int
lpfc_sli4_post_async_mbox(struct lpfc_hba * phba)10182 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10183 {
10184 struct lpfc_sli *psli = &phba->sli;
10185 LPFC_MBOXQ_t *mboxq;
10186 int rc = MBX_SUCCESS;
10187 unsigned long iflags;
10188 struct lpfc_mqe *mqe;
10189 uint32_t mbx_cmnd;
10190
10191 /* Check interrupt mode before post async mailbox command */
10192 if (unlikely(!phba->sli4_hba.intr_enable))
10193 return MBX_NOT_FINISHED;
10194
10195 /* Check for mailbox command service token */
10196 spin_lock_irqsave(&phba->hbalock, iflags);
10197 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10198 spin_unlock_irqrestore(&phba->hbalock, iflags);
10199 return MBX_NOT_FINISHED;
10200 }
10201 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10202 spin_unlock_irqrestore(&phba->hbalock, iflags);
10203 return MBX_NOT_FINISHED;
10204 }
10205 if (unlikely(phba->sli.mbox_active)) {
10206 spin_unlock_irqrestore(&phba->hbalock, iflags);
10207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10208 "0384 There is pending active mailbox cmd\n");
10209 return MBX_NOT_FINISHED;
10210 }
10211 /* Take the mailbox command service token */
10212 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10213
10214 /* Get the next mailbox command from head of queue */
10215 mboxq = lpfc_mbox_get(phba);
10216
10217 /* If no more mailbox command waiting for post, we're done */
10218 if (!mboxq) {
10219 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10220 spin_unlock_irqrestore(&phba->hbalock, iflags);
10221 return MBX_SUCCESS;
10222 }
10223 phba->sli.mbox_active = mboxq;
10224 spin_unlock_irqrestore(&phba->hbalock, iflags);
10225
10226 /* Check device readiness for posting mailbox command */
10227 rc = lpfc_mbox_dev_check(phba);
10228 if (unlikely(rc))
10229 /* Driver clean routine will clean up pending mailbox */
10230 goto out_not_finished;
10231
10232 /* Prepare the mbox command to be posted */
10233 mqe = &mboxq->u.mqe;
10234 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10235
10236 /* Start timer for the mbox_tmo and log some mailbox post messages */
10237 mod_timer(&psli->mbox_tmo, (jiffies +
10238 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
10239
10240 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10241 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10242 "x%x x%x\n",
10243 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10244 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10245 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10246 phba->pport->port_state, psli->sli_flag);
10247
10248 if (mbx_cmnd != MBX_HEARTBEAT) {
10249 if (mboxq->vport) {
10250 lpfc_debugfs_disc_trc(mboxq->vport,
10251 LPFC_DISC_TRC_MBOX_VPORT,
10252 "MBOX Send vport: cmd:x%x mb:x%x x%x",
10253 mbx_cmnd, mqe->un.mb_words[0],
10254 mqe->un.mb_words[1]);
10255 } else {
10256 lpfc_debugfs_disc_trc(phba->pport,
10257 LPFC_DISC_TRC_MBOX,
10258 "MBOX Send: cmd:x%x mb:x%x x%x",
10259 mbx_cmnd, mqe->un.mb_words[0],
10260 mqe->un.mb_words[1]);
10261 }
10262 }
10263 psli->slistat.mbox_cmd++;
10264
10265 /* Post the mailbox command to the port */
10266 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10267 if (rc != MBX_SUCCESS) {
10268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10269 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10270 "cannot issue Data: x%x x%x\n",
10271 mboxq->vport ? mboxq->vport->vpi : 0,
10272 mboxq->u.mb.mbxCommand,
10273 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10274 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10275 psli->sli_flag, MBX_NOWAIT);
10276 goto out_not_finished;
10277 }
10278
10279 return rc;
10280
10281 out_not_finished:
10282 spin_lock_irqsave(&phba->hbalock, iflags);
10283 if (phba->sli.mbox_active) {
10284 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10285 __lpfc_mbox_cmpl_put(phba, mboxq);
10286 /* Release the token */
10287 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10288 phba->sli.mbox_active = NULL;
10289 }
10290 spin_unlock_irqrestore(&phba->hbalock, iflags);
10291
10292 return MBX_NOT_FINISHED;
10293 }
10294
10295 /**
10296 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10297 * @phba: Pointer to HBA context object.
10298 * @pmbox: Pointer to mailbox object.
10299 * @flag: Flag indicating how the mailbox need to be processed.
10300 *
10301 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10302 * the API jump table function pointer from the lpfc_hba struct.
10303 *
10304 * Return codes the caller owns the mailbox command after the return of the
10305 * function.
10306 **/
10307 int
lpfc_sli_issue_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)10308 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10309 {
10310 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10311 }
10312
10313 /**
10314 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10315 * @phba: The hba struct for which this call is being executed.
10316 * @dev_grp: The HBA PCI-Device group number.
10317 *
10318 * This routine sets up the mbox interface API function jump table in @phba
10319 * struct.
10320 * Returns: 0 - success, -ENODEV - failure.
10321 **/
10322 int
lpfc_mbox_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)10323 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10324 {
10325
10326 switch (dev_grp) {
10327 case LPFC_PCI_DEV_LP:
10328 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10329 phba->lpfc_sli_handle_slow_ring_event =
10330 lpfc_sli_handle_slow_ring_event_s3;
10331 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10332 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10333 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10334 break;
10335 case LPFC_PCI_DEV_OC:
10336 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10337 phba->lpfc_sli_handle_slow_ring_event =
10338 lpfc_sli_handle_slow_ring_event_s4;
10339 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10340 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10341 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10342 break;
10343 default:
10344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10345 "1420 Invalid HBA PCI-device group: 0x%x\n",
10346 dev_grp);
10347 return -ENODEV;
10348 }
10349 return 0;
10350 }
10351
10352 /**
10353 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10354 * @phba: Pointer to HBA context object.
10355 * @pring: Pointer to driver SLI ring object.
10356 * @piocb: Pointer to address of newly added command iocb.
10357 *
10358 * This function is called with hbalock held for SLI3 ports or
10359 * the ring lock held for SLI4 ports to add a command
10360 * iocb to the txq when SLI layer cannot submit the command iocb
10361 * to the ring.
10362 **/
10363 void
__lpfc_sli_ringtx_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)10364 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10365 struct lpfc_iocbq *piocb)
10366 {
10367 if (phba->sli_rev == LPFC_SLI_REV4)
10368 lockdep_assert_held(&pring->ring_lock);
10369 else
10370 lockdep_assert_held(&phba->hbalock);
10371 /* Insert the caller's iocb in the txq tail for later processing. */
10372 list_add_tail(&piocb->list, &pring->txq);
10373 }
10374
10375 /**
10376 * lpfc_sli_next_iocb - Get the next iocb in the txq
10377 * @phba: Pointer to HBA context object.
10378 * @pring: Pointer to driver SLI ring object.
10379 * @piocb: Pointer to address of newly added command iocb.
10380 *
10381 * This function is called with hbalock held before a new
10382 * iocb is submitted to the firmware. This function checks
10383 * txq to flush the iocbs in txq to Firmware before
10384 * submitting new iocbs to the Firmware.
10385 * If there are iocbs in the txq which need to be submitted
10386 * to firmware, lpfc_sli_next_iocb returns the first element
10387 * of the txq after dequeuing it from txq.
10388 * If there is no iocb in the txq then the function will return
10389 * *piocb and *piocb is set to NULL. Caller needs to check
10390 * *piocb to find if there are more commands in the txq.
10391 **/
10392 static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq ** piocb)10393 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10394 struct lpfc_iocbq **piocb)
10395 {
10396 struct lpfc_iocbq * nextiocb;
10397
10398 lockdep_assert_held(&phba->hbalock);
10399
10400 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10401 if (!nextiocb) {
10402 nextiocb = *piocb;
10403 *piocb = NULL;
10404 }
10405
10406 return nextiocb;
10407 }
10408
10409 /**
10410 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10411 * @phba: Pointer to HBA context object.
10412 * @ring_number: SLI ring number to issue iocb on.
10413 * @piocb: Pointer to command iocb.
10414 * @flag: Flag indicating if this command can be put into txq.
10415 *
10416 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10417 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10418 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10419 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10420 * this function allows only iocbs for posting buffers. This function finds
10421 * next available slot in the command ring and posts the command to the
10422 * available slot and writes the port attention register to request HBA start
10423 * processing new iocb. If there is no slot available in the ring and
10424 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10425 * the function returns IOCB_BUSY.
10426 *
10427 * This function is called with hbalock held. The function will return success
10428 * after it successfully submit the iocb to firmware or after adding to the
10429 * txq.
10430 **/
10431 static int
__lpfc_sli_issue_iocb_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10432 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10433 struct lpfc_iocbq *piocb, uint32_t flag)
10434 {
10435 struct lpfc_iocbq *nextiocb;
10436 IOCB_t *iocb;
10437 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10438
10439 lockdep_assert_held(&phba->hbalock);
10440
10441 if (piocb->cmd_cmpl && (!piocb->vport) &&
10442 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10443 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10444 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10445 "1807 IOCB x%x failed. No vport\n",
10446 piocb->iocb.ulpCommand);
10447 dump_stack();
10448 return IOCB_ERROR;
10449 }
10450
10451
10452 /* If the PCI channel is in offline state, do not post iocbs. */
10453 if (unlikely(pci_channel_offline(phba->pcidev)))
10454 return IOCB_ERROR;
10455
10456 /* If HBA has a deferred error attention, fail the iocb. */
10457 if (unlikely(phba->hba_flag & DEFER_ERATT))
10458 return IOCB_ERROR;
10459
10460 /*
10461 * We should never get an IOCB if we are in a < LINK_DOWN state
10462 */
10463 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10464 return IOCB_ERROR;
10465
10466 /*
10467 * Check to see if we are blocking IOCB processing because of a
10468 * outstanding event.
10469 */
10470 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10471 goto iocb_busy;
10472
10473 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10474 /*
10475 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10476 * can be issued if the link is not up.
10477 */
10478 switch (piocb->iocb.ulpCommand) {
10479 case CMD_QUE_RING_BUF_CN:
10480 case CMD_QUE_RING_BUF64_CN:
10481 /*
10482 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10483 * completion, cmd_cmpl MUST be 0.
10484 */
10485 if (piocb->cmd_cmpl)
10486 piocb->cmd_cmpl = NULL;
10487 fallthrough;
10488 case CMD_CREATE_XRI_CR:
10489 case CMD_CLOSE_XRI_CN:
10490 case CMD_CLOSE_XRI_CX:
10491 break;
10492 default:
10493 goto iocb_busy;
10494 }
10495
10496 /*
10497 * For FCP commands, we must be in a state where we can process link
10498 * attention events.
10499 */
10500 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10501 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10502 goto iocb_busy;
10503 }
10504
10505 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10506 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10507 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10508
10509 if (iocb)
10510 lpfc_sli_update_ring(phba, pring);
10511 else
10512 lpfc_sli_update_full_ring(phba, pring);
10513
10514 if (!piocb)
10515 return IOCB_SUCCESS;
10516
10517 goto out_busy;
10518
10519 iocb_busy:
10520 pring->stats.iocb_cmd_delay++;
10521
10522 out_busy:
10523
10524 if (!(flag & SLI_IOCB_RET_IOCB)) {
10525 __lpfc_sli_ringtx_put(phba, pring, piocb);
10526 return IOCB_SUCCESS;
10527 }
10528
10529 return IOCB_BUSY;
10530 }
10531
10532 /**
10533 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10534 * @phba: Pointer to HBA context object.
10535 * @ring_number: SLI ring number to issue wqe on.
10536 * @piocb: Pointer to command iocb.
10537 * @flag: Flag indicating if this command can be put into txq.
10538 *
10539 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10540 * send an iocb command to an HBA with SLI-3 interface spec.
10541 *
10542 * This function takes the hbalock before invoking the lockless version.
10543 * The function will return success after it successfully submit the wqe to
10544 * firmware or after adding to the txq.
10545 **/
10546 static int
__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10547 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10548 struct lpfc_iocbq *piocb, uint32_t flag)
10549 {
10550 unsigned long iflags;
10551 int rc;
10552
10553 spin_lock_irqsave(&phba->hbalock, iflags);
10554 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10555 spin_unlock_irqrestore(&phba->hbalock, iflags);
10556
10557 return rc;
10558 }
10559
10560 /**
10561 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10562 * @phba: Pointer to HBA context object.
10563 * @ring_number: SLI ring number to issue wqe on.
10564 * @piocb: Pointer to command iocb.
10565 * @flag: Flag indicating if this command can be put into txq.
10566 *
10567 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10568 * an wqe command to an HBA with SLI-4 interface spec.
10569 *
10570 * This function is a lockless version. The function will return success
10571 * after it successfully submit the wqe to firmware or after adding to the
10572 * txq.
10573 **/
10574 static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10575 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10576 struct lpfc_iocbq *piocb, uint32_t flag)
10577 {
10578 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10579
10580 lpfc_prep_embed_io(phba, lpfc_cmd);
10581 return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10582 }
10583
10584 void
lpfc_prep_embed_io(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)10585 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10586 {
10587 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10588 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10589 struct sli4_sge *sgl;
10590
10591 /* 128 byte wqe support here */
10592 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10593
10594 if (phba->fcp_embed_io) {
10595 struct fcp_cmnd *fcp_cmnd;
10596 u32 *ptr;
10597
10598 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10599
10600 /* Word 0-2 - FCP_CMND */
10601 wqe->generic.bde.tus.f.bdeFlags =
10602 BUFF_TYPE_BDE_IMMED;
10603 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10604 wqe->generic.bde.addrHigh = 0;
10605 wqe->generic.bde.addrLow = 88; /* Word 22 */
10606
10607 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10608 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10609
10610 /* Word 22-29 FCP CMND Payload */
10611 ptr = &wqe->words[22];
10612 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10613 } else {
10614 /* Word 0-2 - Inline BDE */
10615 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10616 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10617 wqe->generic.bde.addrHigh = sgl->addr_hi;
10618 wqe->generic.bde.addrLow = sgl->addr_lo;
10619
10620 /* Word 10 */
10621 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10622 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10623 }
10624
10625 /* add the VMID tags as per switch response */
10626 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10627 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10628 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10629 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10630 (piocb->vmid_tag.cs_ctl_vmid));
10631 } else if (phba->cfg_vmid_app_header) {
10632 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10633 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10634 wqe->words[31] = piocb->vmid_tag.app_id;
10635 }
10636 }
10637 }
10638
10639 /**
10640 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10641 * @phba: Pointer to HBA context object.
10642 * @ring_number: SLI ring number to issue iocb on.
10643 * @piocb: Pointer to command iocb.
10644 * @flag: Flag indicating if this command can be put into txq.
10645 *
10646 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10647 * an iocb command to an HBA with SLI-4 interface spec.
10648 *
10649 * This function is called with ringlock held. The function will return success
10650 * after it successfully submit the iocb to firmware or after adding to the
10651 * txq.
10652 **/
10653 static int
__lpfc_sli_issue_iocb_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10654 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10655 struct lpfc_iocbq *piocb, uint32_t flag)
10656 {
10657 struct lpfc_sglq *sglq;
10658 union lpfc_wqe128 *wqe;
10659 struct lpfc_queue *wq;
10660 struct lpfc_sli_ring *pring;
10661 u32 ulp_command = get_job_cmnd(phba, piocb);
10662
10663 /* Get the WQ */
10664 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10665 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10666 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10667 } else {
10668 wq = phba->sli4_hba.els_wq;
10669 }
10670
10671 /* Get corresponding ring */
10672 pring = wq->pring;
10673
10674 /*
10675 * The WQE can be either 64 or 128 bytes,
10676 */
10677
10678 lockdep_assert_held(&pring->ring_lock);
10679 wqe = &piocb->wqe;
10680 if (piocb->sli4_xritag == NO_XRI) {
10681 if (ulp_command == CMD_ABORT_XRI_CX)
10682 sglq = NULL;
10683 else {
10684 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10685 if (!sglq) {
10686 if (!(flag & SLI_IOCB_RET_IOCB)) {
10687 __lpfc_sli_ringtx_put(phba,
10688 pring,
10689 piocb);
10690 return IOCB_SUCCESS;
10691 } else {
10692 return IOCB_BUSY;
10693 }
10694 }
10695 }
10696 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
10697 /* These IO's already have an XRI and a mapped sgl. */
10698 sglq = NULL;
10699 }
10700 else {
10701 /*
10702 * This is a continuation of a commandi,(CX) so this
10703 * sglq is on the active list
10704 */
10705 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10706 if (!sglq)
10707 return IOCB_ERROR;
10708 }
10709
10710 if (sglq) {
10711 piocb->sli4_lxritag = sglq->sli4_lxritag;
10712 piocb->sli4_xritag = sglq->sli4_xritag;
10713
10714 /* ABTS sent by initiator to CT exchange, the
10715 * RX_ID field will be filled with the newly
10716 * allocated responder XRI.
10717 */
10718 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10719 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10720 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10721 piocb->sli4_xritag);
10722
10723 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10724 piocb->sli4_xritag);
10725
10726 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10727 return IOCB_ERROR;
10728 }
10729
10730 if (lpfc_sli4_wq_put(wq, wqe))
10731 return IOCB_ERROR;
10732
10733 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10734
10735 return 0;
10736 }
10737
10738 /*
10739 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10740 *
10741 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10742 * or IOCB for sli-3 function.
10743 * pointer from the lpfc_hba struct.
10744 *
10745 * Return codes:
10746 * IOCB_ERROR - Error
10747 * IOCB_SUCCESS - Success
10748 * IOCB_BUSY - Busy
10749 **/
10750 int
lpfc_sli_issue_fcp_io(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10751 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10752 struct lpfc_iocbq *piocb, uint32_t flag)
10753 {
10754 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10755 }
10756
10757 /*
10758 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10759 *
10760 * This routine wraps the actual lockless version for issusing IOCB function
10761 * pointer from the lpfc_hba struct.
10762 *
10763 * Return codes:
10764 * IOCB_ERROR - Error
10765 * IOCB_SUCCESS - Success
10766 * IOCB_BUSY - Busy
10767 **/
10768 int
__lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10769 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10770 struct lpfc_iocbq *piocb, uint32_t flag)
10771 {
10772 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10773 }
10774
10775 static void
__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10776 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10777 struct lpfc_vport *vport,
10778 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10779 u32 elscmd, u8 tmo, u8 expect_rsp)
10780 {
10781 struct lpfc_hba *phba = vport->phba;
10782 IOCB_t *cmd;
10783
10784 cmd = &cmdiocbq->iocb;
10785 memset(cmd, 0, sizeof(*cmd));
10786
10787 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10788 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10789 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10790
10791 if (expect_rsp) {
10792 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10793 cmd->un.elsreq64.remoteID = did; /* DID */
10794 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10795 cmd->ulpTimeout = tmo;
10796 } else {
10797 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10798 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10799 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10800 cmd->ulpPU = PARM_NPIV_DID;
10801 }
10802 cmd->ulpBdeCount = 1;
10803 cmd->ulpLe = 1;
10804 cmd->ulpClass = CLASS3;
10805
10806 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10807 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10808 if (expect_rsp) {
10809 cmd->un.elsreq64.myID = vport->fc_myDID;
10810
10811 /* For ELS_REQUEST64_CR, use the VPI by default */
10812 cmd->ulpContext = phba->vpi_ids[vport->vpi];
10813 }
10814
10815 cmd->ulpCt_h = 0;
10816 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10817 if (elscmd == ELS_CMD_ECHO)
10818 cmd->ulpCt_l = 0; /* context = invalid RPI */
10819 else
10820 cmd->ulpCt_l = 1; /* context = VPI */
10821 }
10822 }
10823
10824 static void
__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10825 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10826 struct lpfc_vport *vport,
10827 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10828 u32 elscmd, u8 tmo, u8 expect_rsp)
10829 {
10830 struct lpfc_hba *phba = vport->phba;
10831 union lpfc_wqe128 *wqe;
10832 struct ulp_bde64_le *bde;
10833 u8 els_id;
10834
10835 wqe = &cmdiocbq->wqe;
10836 memset(wqe, 0, sizeof(*wqe));
10837
10838 /* Word 0 - 2 BDE */
10839 bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10840 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10841 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10842 bde->type_size = cpu_to_le32(cmd_size);
10843 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10844
10845 if (expect_rsp) {
10846 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10847
10848 /* Transfer length */
10849 wqe->els_req.payload_len = cmd_size;
10850 wqe->els_req.max_response_payload_len = FCELSSIZE;
10851
10852 /* DID */
10853 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10854
10855 /* Word 11 - ELS_ID */
10856 switch (elscmd) {
10857 case ELS_CMD_PLOGI:
10858 els_id = LPFC_ELS_ID_PLOGI;
10859 break;
10860 case ELS_CMD_FLOGI:
10861 els_id = LPFC_ELS_ID_FLOGI;
10862 break;
10863 case ELS_CMD_LOGO:
10864 els_id = LPFC_ELS_ID_LOGO;
10865 break;
10866 case ELS_CMD_FDISC:
10867 if (!vport->fc_myDID) {
10868 els_id = LPFC_ELS_ID_FDISC;
10869 break;
10870 }
10871 fallthrough;
10872 default:
10873 els_id = LPFC_ELS_ID_DEFAULT;
10874 break;
10875 }
10876
10877 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10878 } else {
10879 /* DID */
10880 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10881
10882 /* Transfer length */
10883 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10884
10885 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10886 CMD_XMIT_ELS_RSP64_WQE);
10887 }
10888
10889 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10890 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10891 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10892
10893 /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10894 * For SLI4, since the driver controls VPIs we also want to include
10895 * all ELS pt2pt protocol traffic as well.
10896 */
10897 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10898 (vport->fc_flag & FC_PT2PT)) {
10899 if (expect_rsp) {
10900 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10901
10902 /* For ELS_REQUEST64_WQE, use the VPI by default */
10903 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10904 phba->vpi_ids[vport->vpi]);
10905 }
10906
10907 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10908 if (elscmd == ELS_CMD_ECHO)
10909 bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10910 else
10911 bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10912 }
10913 }
10914
10915 void
lpfc_sli_prep_els_req_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10916 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10917 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10918 u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10919 u8 expect_rsp)
10920 {
10921 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10922 elscmd, tmo, expect_rsp);
10923 }
10924
10925 static void
__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10926 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10927 u16 rpi, u32 num_entry, u8 tmo)
10928 {
10929 IOCB_t *cmd;
10930
10931 cmd = &cmdiocbq->iocb;
10932 memset(cmd, 0, sizeof(*cmd));
10933
10934 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10935 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10936 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10937 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10938
10939 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10940 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10941 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10942
10943 cmd->ulpContext = rpi;
10944 cmd->ulpClass = CLASS3;
10945 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10946 cmd->ulpBdeCount = 1;
10947 cmd->ulpLe = 1;
10948 cmd->ulpOwner = OWN_CHIP;
10949 cmd->ulpTimeout = tmo;
10950 }
10951
10952 static void
__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10953 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10954 u16 rpi, u32 num_entry, u8 tmo)
10955 {
10956 union lpfc_wqe128 *cmdwqe;
10957 struct ulp_bde64_le *bde, *bpl;
10958 u32 xmit_len = 0, total_len = 0, size, type, i;
10959
10960 cmdwqe = &cmdiocbq->wqe;
10961 memset(cmdwqe, 0, sizeof(*cmdwqe));
10962
10963 /* Calculate total_len and xmit_len */
10964 bpl = (struct ulp_bde64_le *)bmp->virt;
10965 for (i = 0; i < num_entry; i++) {
10966 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10967 total_len += size;
10968 }
10969 for (i = 0; i < num_entry; i++) {
10970 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10971 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10972 if (type != ULP_BDE64_TYPE_BDE_64)
10973 break;
10974 xmit_len += size;
10975 }
10976
10977 /* Words 0 - 2 */
10978 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10979 bde->addr_low = bpl->addr_low;
10980 bde->addr_high = bpl->addr_high;
10981 bde->type_size = cpu_to_le32(xmit_len);
10982 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10983
10984 /* Word 3 */
10985 cmdwqe->gen_req.request_payload_len = xmit_len;
10986
10987 /* Word 5 */
10988 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10989 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10990 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10991 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10992
10993 /* Word 6 */
10994 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10995
10996 /* Word 7 */
10997 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10998 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10999 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
11000 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
11001
11002 /* Word 12 */
11003 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
11004 }
11005
11006 void
lpfc_sli_prep_gen_req(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)11007 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11008 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
11009 {
11010 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
11011 }
11012
11013 static void
__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 num_entry,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11014 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
11015 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11016 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11017 {
11018 IOCB_t *icmd;
11019
11020 icmd = &cmdiocbq->iocb;
11021 memset(icmd, 0, sizeof(*icmd));
11022
11023 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11024 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11025 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11026 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11027 icmd->un.xseq64.w5.hcsw.Fctl = LA;
11028 if (last_seq)
11029 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11030 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11031 icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11032 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11033
11034 icmd->ulpBdeCount = 1;
11035 icmd->ulpLe = 1;
11036 icmd->ulpClass = CLASS3;
11037
11038 switch (cr_cx_cmd) {
11039 case CMD_XMIT_SEQUENCE64_CR:
11040 icmd->ulpContext = rpi;
11041 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11042 break;
11043 case CMD_XMIT_SEQUENCE64_CX:
11044 icmd->ulpContext = ox_id;
11045 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11046 break;
11047 default:
11048 break;
11049 }
11050 }
11051
11052 static void
__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 full_size,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11053 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11054 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11055 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11056 {
11057 union lpfc_wqe128 *wqe;
11058 struct ulp_bde64 *bpl;
11059
11060 wqe = &cmdiocbq->wqe;
11061 memset(wqe, 0, sizeof(*wqe));
11062
11063 /* Words 0 - 2 */
11064 bpl = (struct ulp_bde64 *)bmp->virt;
11065 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11066 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11067 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11068
11069 /* Word 5 */
11070 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11071 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11072 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11073 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11074 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11075
11076 /* Word 6 */
11077 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11078
11079 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11080 CMD_XMIT_SEQUENCE64_WQE);
11081
11082 /* Word 7 */
11083 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11084
11085 /* Word 9 */
11086 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11087
11088 /* Word 12 */
11089 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
11090 wqe->xmit_sequence.xmit_len = full_size;
11091 else
11092 wqe->xmit_sequence.xmit_len =
11093 wqe->xmit_sequence.bde.tus.f.bdeSize;
11094 }
11095
11096 void
lpfc_sli_prep_xmit_seq64(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 num_entry,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11097 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11098 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11099 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11100 {
11101 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11102 rctl, last_seq, cr_cx_cmd);
11103 }
11104
11105 static void
__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11106 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11107 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11108 bool wqec)
11109 {
11110 IOCB_t *icmd = NULL;
11111
11112 icmd = &cmdiocbq->iocb;
11113 memset(icmd, 0, sizeof(*icmd));
11114
11115 /* Word 5 */
11116 icmd->un.acxri.abortContextTag = ulp_context;
11117 icmd->un.acxri.abortIoTag = iotag;
11118
11119 if (ia) {
11120 /* Word 7 */
11121 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11122 } else {
11123 /* Word 3 */
11124 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11125
11126 /* Word 7 */
11127 icmd->ulpClass = ulp_class;
11128 icmd->ulpCommand = CMD_ABORT_XRI_CN;
11129 }
11130
11131 /* Word 7 */
11132 icmd->ulpLe = 1;
11133 }
11134
11135 static void
__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11136 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11137 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11138 bool wqec)
11139 {
11140 union lpfc_wqe128 *wqe;
11141
11142 wqe = &cmdiocbq->wqe;
11143 memset(wqe, 0, sizeof(*wqe));
11144
11145 /* Word 3 */
11146 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11147 if (ia)
11148 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11149 else
11150 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11151
11152 /* Word 7 */
11153 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11154
11155 /* Word 8 */
11156 wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11157
11158 /* Word 9 */
11159 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11160
11161 /* Word 10 */
11162 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11163
11164 /* Word 11 */
11165 if (wqec)
11166 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11167 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11168 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11169 }
11170
11171 void
lpfc_sli_prep_abort_xri(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11172 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11173 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11174 bool ia, bool wqec)
11175 {
11176 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11177 cqid, ia, wqec);
11178 }
11179
11180 /**
11181 * lpfc_sli_api_table_setup - Set up sli api function jump table
11182 * @phba: The hba struct for which this call is being executed.
11183 * @dev_grp: The HBA PCI-Device group number.
11184 *
11185 * This routine sets up the SLI interface API function jump table in @phba
11186 * struct.
11187 * Returns: 0 - success, -ENODEV - failure.
11188 **/
11189 int
lpfc_sli_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)11190 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11191 {
11192
11193 switch (dev_grp) {
11194 case LPFC_PCI_DEV_LP:
11195 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11196 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11197 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11198 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11199 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11200 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11201 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11202 break;
11203 case LPFC_PCI_DEV_OC:
11204 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11205 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11206 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11207 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11208 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11209 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11210 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11211 break;
11212 default:
11213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11214 "1419 Invalid HBA PCI-device group: 0x%x\n",
11215 dev_grp);
11216 return -ENODEV;
11217 }
11218 return 0;
11219 }
11220
11221 /**
11222 * lpfc_sli4_calc_ring - Calculates which ring to use
11223 * @phba: Pointer to HBA context object.
11224 * @piocb: Pointer to command iocb.
11225 *
11226 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11227 * hba_wqidx, thus we need to calculate the corresponding ring.
11228 * Since ABORTS must go on the same WQ of the command they are
11229 * aborting, we use command's hba_wqidx.
11230 */
11231 struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)11232 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11233 {
11234 struct lpfc_io_buf *lpfc_cmd;
11235
11236 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11237 if (unlikely(!phba->sli4_hba.hdwq))
11238 return NULL;
11239 /*
11240 * for abort iocb hba_wqidx should already
11241 * be setup based on what work queue we used.
11242 */
11243 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11244 lpfc_cmd = piocb->io_buf;
11245 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11246 }
11247 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11248 } else {
11249 if (unlikely(!phba->sli4_hba.els_wq))
11250 return NULL;
11251 piocb->hba_wqidx = 0;
11252 return phba->sli4_hba.els_wq->pring;
11253 }
11254 }
11255
11256 /**
11257 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11258 * @phba: Pointer to HBA context object.
11259 * @ring_number: Ring number
11260 * @piocb: Pointer to command iocb.
11261 * @flag: Flag indicating if this command can be put into txq.
11262 *
11263 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11264 * function. This function gets the hbalock and calls
11265 * __lpfc_sli_issue_iocb function and will return the error returned
11266 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11267 * functions which do not hold hbalock.
11268 **/
11269 int
lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)11270 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11271 struct lpfc_iocbq *piocb, uint32_t flag)
11272 {
11273 struct lpfc_sli_ring *pring;
11274 struct lpfc_queue *eq;
11275 unsigned long iflags;
11276 int rc;
11277
11278 /* If the PCI channel is in offline state, do not post iocbs. */
11279 if (unlikely(pci_channel_offline(phba->pcidev)))
11280 return IOCB_ERROR;
11281
11282 if (phba->sli_rev == LPFC_SLI_REV4) {
11283 lpfc_sli_prep_wqe(phba, piocb);
11284
11285 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11286
11287 pring = lpfc_sli4_calc_ring(phba, piocb);
11288 if (unlikely(pring == NULL))
11289 return IOCB_ERROR;
11290
11291 spin_lock_irqsave(&pring->ring_lock, iflags);
11292 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11293 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11294
11295 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11296 } else {
11297 /* For now, SLI2/3 will still use hbalock */
11298 spin_lock_irqsave(&phba->hbalock, iflags);
11299 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11300 spin_unlock_irqrestore(&phba->hbalock, iflags);
11301 }
11302 return rc;
11303 }
11304
11305 /**
11306 * lpfc_extra_ring_setup - Extra ring setup function
11307 * @phba: Pointer to HBA context object.
11308 *
11309 * This function is called while driver attaches with the
11310 * HBA to setup the extra ring. The extra ring is used
11311 * only when driver needs to support target mode functionality
11312 * or IP over FC functionalities.
11313 *
11314 * This function is called with no lock held. SLI3 only.
11315 **/
11316 static int
lpfc_extra_ring_setup(struct lpfc_hba * phba)11317 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11318 {
11319 struct lpfc_sli *psli;
11320 struct lpfc_sli_ring *pring;
11321
11322 psli = &phba->sli;
11323
11324 /* Adjust cmd/rsp ring iocb entries more evenly */
11325
11326 /* Take some away from the FCP ring */
11327 pring = &psli->sli3_ring[LPFC_FCP_RING];
11328 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11329 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11330 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11331 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11332
11333 /* and give them to the extra ring */
11334 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11335
11336 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11337 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11338 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11339 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11340
11341 /* Setup default profile for this ring */
11342 pring->iotag_max = 4096;
11343 pring->num_mask = 1;
11344 pring->prt[0].profile = 0; /* Mask 0 */
11345 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11346 pring->prt[0].type = phba->cfg_multi_ring_type;
11347 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11348 return 0;
11349 }
11350
11351 static void
lpfc_sli_post_recovery_event(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)11352 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11353 struct lpfc_nodelist *ndlp)
11354 {
11355 unsigned long iflags;
11356 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11357
11358 spin_lock_irqsave(&phba->hbalock, iflags);
11359 if (!list_empty(&evtp->evt_listp)) {
11360 spin_unlock_irqrestore(&phba->hbalock, iflags);
11361 return;
11362 }
11363
11364 /* Incrementing the reference count until the queued work is done. */
11365 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11366 if (!evtp->evt_arg1) {
11367 spin_unlock_irqrestore(&phba->hbalock, iflags);
11368 return;
11369 }
11370 evtp->evt = LPFC_EVT_RECOVER_PORT;
11371 list_add_tail(&evtp->evt_listp, &phba->work_list);
11372 spin_unlock_irqrestore(&phba->hbalock, iflags);
11373
11374 lpfc_worker_wake_up(phba);
11375 }
11376
11377 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11378 * @phba: Pointer to HBA context object.
11379 * @iocbq: Pointer to iocb object.
11380 *
11381 * The async_event handler calls this routine when it receives
11382 * an ASYNC_STATUS_CN event from the port. The port generates
11383 * this event when an Abort Sequence request to an rport fails
11384 * twice in succession. The abort could be originated by the
11385 * driver or by the port. The ABTS could have been for an ELS
11386 * or FCP IO. The port only generates this event when an ABTS
11387 * fails to complete after one retry.
11388 */
11389 static void
lpfc_sli_abts_err_handler(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)11390 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11391 struct lpfc_iocbq *iocbq)
11392 {
11393 struct lpfc_nodelist *ndlp = NULL;
11394 uint16_t rpi = 0, vpi = 0;
11395 struct lpfc_vport *vport = NULL;
11396
11397 /* The rpi in the ulpContext is vport-sensitive. */
11398 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11399 rpi = iocbq->iocb.ulpContext;
11400
11401 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11402 "3092 Port generated ABTS async event "
11403 "on vpi %d rpi %d status 0x%x\n",
11404 vpi, rpi, iocbq->iocb.ulpStatus);
11405
11406 vport = lpfc_find_vport_by_vpid(phba, vpi);
11407 if (!vport)
11408 goto err_exit;
11409 ndlp = lpfc_findnode_rpi(vport, rpi);
11410 if (!ndlp)
11411 goto err_exit;
11412
11413 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11414 lpfc_sli_abts_recover_port(vport, ndlp);
11415 return;
11416
11417 err_exit:
11418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11419 "3095 Event Context not found, no "
11420 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11421 vpi, rpi, iocbq->iocb.ulpStatus,
11422 iocbq->iocb.ulpContext);
11423 }
11424
11425 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11426 * @phba: pointer to HBA context object.
11427 * @ndlp: nodelist pointer for the impacted rport.
11428 * @axri: pointer to the wcqe containing the failed exchange.
11429 *
11430 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11431 * port. The port generates this event when an abort exchange request to an
11432 * rport fails twice in succession with no reply. The abort could be originated
11433 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11434 */
11435 void
lpfc_sli4_abts_err_handler(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct sli4_wcqe_xri_aborted * axri)11436 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11437 struct lpfc_nodelist *ndlp,
11438 struct sli4_wcqe_xri_aborted *axri)
11439 {
11440 uint32_t ext_status = 0;
11441
11442 if (!ndlp) {
11443 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11444 "3115 Node Context not found, driver "
11445 "ignoring abts err event\n");
11446 return;
11447 }
11448
11449 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11450 "3116 Port generated FCP XRI ABORT event on "
11451 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11452 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11453 bf_get(lpfc_wcqe_xa_xri, axri),
11454 bf_get(lpfc_wcqe_xa_status, axri),
11455 axri->parameter);
11456
11457 /*
11458 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11459 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11460 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11461 */
11462 ext_status = axri->parameter & IOERR_PARAM_MASK;
11463 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11464 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11465 lpfc_sli_post_recovery_event(phba, ndlp);
11466 }
11467
11468 /**
11469 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11470 * @phba: Pointer to HBA context object.
11471 * @pring: Pointer to driver SLI ring object.
11472 * @iocbq: Pointer to iocb object.
11473 *
11474 * This function is called by the slow ring event handler
11475 * function when there is an ASYNC event iocb in the ring.
11476 * This function is called with no lock held.
11477 * Currently this function handles only temperature related
11478 * ASYNC events. The function decodes the temperature sensor
11479 * event message and posts events for the management applications.
11480 **/
11481 static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * iocbq)11482 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11483 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11484 {
11485 IOCB_t *icmd;
11486 uint16_t evt_code;
11487 struct temp_event temp_event_data;
11488 struct Scsi_Host *shost;
11489 uint32_t *iocb_w;
11490
11491 icmd = &iocbq->iocb;
11492 evt_code = icmd->un.asyncstat.evt_code;
11493
11494 switch (evt_code) {
11495 case ASYNC_TEMP_WARN:
11496 case ASYNC_TEMP_SAFE:
11497 temp_event_data.data = (uint32_t) icmd->ulpContext;
11498 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11499 if (evt_code == ASYNC_TEMP_WARN) {
11500 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11502 "0347 Adapter is very hot, please take "
11503 "corrective action. temperature : %d Celsius\n",
11504 (uint32_t) icmd->ulpContext);
11505 } else {
11506 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11508 "0340 Adapter temperature is OK now. "
11509 "temperature : %d Celsius\n",
11510 (uint32_t) icmd->ulpContext);
11511 }
11512
11513 /* Send temperature change event to applications */
11514 shost = lpfc_shost_from_vport(phba->pport);
11515 fc_host_post_vendor_event(shost, fc_get_event_number(),
11516 sizeof(temp_event_data), (char *) &temp_event_data,
11517 LPFC_NL_VENDOR_ID);
11518 break;
11519 case ASYNC_STATUS_CN:
11520 lpfc_sli_abts_err_handler(phba, iocbq);
11521 break;
11522 default:
11523 iocb_w = (uint32_t *) icmd;
11524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11525 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11526 " evt_code 0x%x\n"
11527 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11528 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11529 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11530 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11531 pring->ringno, icmd->un.asyncstat.evt_code,
11532 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11533 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11534 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11535 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11536
11537 break;
11538 }
11539 }
11540
11541
11542 /**
11543 * lpfc_sli4_setup - SLI ring setup function
11544 * @phba: Pointer to HBA context object.
11545 *
11546 * lpfc_sli_setup sets up rings of the SLI interface with
11547 * number of iocbs per ring and iotags. This function is
11548 * called while driver attach to the HBA and before the
11549 * interrupts are enabled. So there is no need for locking.
11550 *
11551 * This function always returns 0.
11552 **/
11553 int
lpfc_sli4_setup(struct lpfc_hba * phba)11554 lpfc_sli4_setup(struct lpfc_hba *phba)
11555 {
11556 struct lpfc_sli_ring *pring;
11557
11558 pring = phba->sli4_hba.els_wq->pring;
11559 pring->num_mask = LPFC_MAX_RING_MASK;
11560 pring->prt[0].profile = 0; /* Mask 0 */
11561 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11562 pring->prt[0].type = FC_TYPE_ELS;
11563 pring->prt[0].lpfc_sli_rcv_unsol_event =
11564 lpfc_els_unsol_event;
11565 pring->prt[1].profile = 0; /* Mask 1 */
11566 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11567 pring->prt[1].type = FC_TYPE_ELS;
11568 pring->prt[1].lpfc_sli_rcv_unsol_event =
11569 lpfc_els_unsol_event;
11570 pring->prt[2].profile = 0; /* Mask 2 */
11571 /* NameServer Inquiry */
11572 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11573 /* NameServer */
11574 pring->prt[2].type = FC_TYPE_CT;
11575 pring->prt[2].lpfc_sli_rcv_unsol_event =
11576 lpfc_ct_unsol_event;
11577 pring->prt[3].profile = 0; /* Mask 3 */
11578 /* NameServer response */
11579 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11580 /* NameServer */
11581 pring->prt[3].type = FC_TYPE_CT;
11582 pring->prt[3].lpfc_sli_rcv_unsol_event =
11583 lpfc_ct_unsol_event;
11584 return 0;
11585 }
11586
11587 /**
11588 * lpfc_sli_setup - SLI ring setup function
11589 * @phba: Pointer to HBA context object.
11590 *
11591 * lpfc_sli_setup sets up rings of the SLI interface with
11592 * number of iocbs per ring and iotags. This function is
11593 * called while driver attach to the HBA and before the
11594 * interrupts are enabled. So there is no need for locking.
11595 *
11596 * This function always returns 0. SLI3 only.
11597 **/
11598 int
lpfc_sli_setup(struct lpfc_hba * phba)11599 lpfc_sli_setup(struct lpfc_hba *phba)
11600 {
11601 int i, totiocbsize = 0;
11602 struct lpfc_sli *psli = &phba->sli;
11603 struct lpfc_sli_ring *pring;
11604
11605 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11606 psli->sli_flag = 0;
11607
11608 psli->iocbq_lookup = NULL;
11609 psli->iocbq_lookup_len = 0;
11610 psli->last_iotag = 0;
11611
11612 for (i = 0; i < psli->num_rings; i++) {
11613 pring = &psli->sli3_ring[i];
11614 switch (i) {
11615 case LPFC_FCP_RING: /* ring 0 - FCP */
11616 /* numCiocb and numRiocb are used in config_port */
11617 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11618 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11619 pring->sli.sli3.numCiocb +=
11620 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11621 pring->sli.sli3.numRiocb +=
11622 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11623 pring->sli.sli3.numCiocb +=
11624 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11625 pring->sli.sli3.numRiocb +=
11626 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11627 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11628 SLI3_IOCB_CMD_SIZE :
11629 SLI2_IOCB_CMD_SIZE;
11630 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11631 SLI3_IOCB_RSP_SIZE :
11632 SLI2_IOCB_RSP_SIZE;
11633 pring->iotag_ctr = 0;
11634 pring->iotag_max =
11635 (phba->cfg_hba_queue_depth * 2);
11636 pring->fast_iotag = pring->iotag_max;
11637 pring->num_mask = 0;
11638 break;
11639 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11640 /* numCiocb and numRiocb are used in config_port */
11641 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11642 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11643 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11644 SLI3_IOCB_CMD_SIZE :
11645 SLI2_IOCB_CMD_SIZE;
11646 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11647 SLI3_IOCB_RSP_SIZE :
11648 SLI2_IOCB_RSP_SIZE;
11649 pring->iotag_max = phba->cfg_hba_queue_depth;
11650 pring->num_mask = 0;
11651 break;
11652 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11653 /* numCiocb and numRiocb are used in config_port */
11654 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11655 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11656 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11657 SLI3_IOCB_CMD_SIZE :
11658 SLI2_IOCB_CMD_SIZE;
11659 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11660 SLI3_IOCB_RSP_SIZE :
11661 SLI2_IOCB_RSP_SIZE;
11662 pring->fast_iotag = 0;
11663 pring->iotag_ctr = 0;
11664 pring->iotag_max = 4096;
11665 pring->lpfc_sli_rcv_async_status =
11666 lpfc_sli_async_event_handler;
11667 pring->num_mask = LPFC_MAX_RING_MASK;
11668 pring->prt[0].profile = 0; /* Mask 0 */
11669 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11670 pring->prt[0].type = FC_TYPE_ELS;
11671 pring->prt[0].lpfc_sli_rcv_unsol_event =
11672 lpfc_els_unsol_event;
11673 pring->prt[1].profile = 0; /* Mask 1 */
11674 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11675 pring->prt[1].type = FC_TYPE_ELS;
11676 pring->prt[1].lpfc_sli_rcv_unsol_event =
11677 lpfc_els_unsol_event;
11678 pring->prt[2].profile = 0; /* Mask 2 */
11679 /* NameServer Inquiry */
11680 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11681 /* NameServer */
11682 pring->prt[2].type = FC_TYPE_CT;
11683 pring->prt[2].lpfc_sli_rcv_unsol_event =
11684 lpfc_ct_unsol_event;
11685 pring->prt[3].profile = 0; /* Mask 3 */
11686 /* NameServer response */
11687 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11688 /* NameServer */
11689 pring->prt[3].type = FC_TYPE_CT;
11690 pring->prt[3].lpfc_sli_rcv_unsol_event =
11691 lpfc_ct_unsol_event;
11692 break;
11693 }
11694 totiocbsize += (pring->sli.sli3.numCiocb *
11695 pring->sli.sli3.sizeCiocb) +
11696 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11697 }
11698 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11699 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11700 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11701 "SLI2 SLIM Data: x%x x%lx\n",
11702 phba->brd_no, totiocbsize,
11703 (unsigned long) MAX_SLIM_IOCB_SIZE);
11704 }
11705 if (phba->cfg_multi_ring_support == 2)
11706 lpfc_extra_ring_setup(phba);
11707
11708 return 0;
11709 }
11710
11711 /**
11712 * lpfc_sli4_queue_init - Queue initialization function
11713 * @phba: Pointer to HBA context object.
11714 *
11715 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11716 * ring. This function also initializes ring indices of each ring.
11717 * This function is called during the initialization of the SLI
11718 * interface of an HBA.
11719 * This function is called with no lock held and always returns
11720 * 1.
11721 **/
11722 void
lpfc_sli4_queue_init(struct lpfc_hba * phba)11723 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11724 {
11725 struct lpfc_sli *psli;
11726 struct lpfc_sli_ring *pring;
11727 int i;
11728
11729 psli = &phba->sli;
11730 spin_lock_irq(&phba->hbalock);
11731 INIT_LIST_HEAD(&psli->mboxq);
11732 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11733 /* Initialize list headers for txq and txcmplq as double linked lists */
11734 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11735 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11736 pring->flag = 0;
11737 pring->ringno = LPFC_FCP_RING;
11738 pring->txcmplq_cnt = 0;
11739 INIT_LIST_HEAD(&pring->txq);
11740 INIT_LIST_HEAD(&pring->txcmplq);
11741 INIT_LIST_HEAD(&pring->iocb_continueq);
11742 spin_lock_init(&pring->ring_lock);
11743 }
11744 pring = phba->sli4_hba.els_wq->pring;
11745 pring->flag = 0;
11746 pring->ringno = LPFC_ELS_RING;
11747 pring->txcmplq_cnt = 0;
11748 INIT_LIST_HEAD(&pring->txq);
11749 INIT_LIST_HEAD(&pring->txcmplq);
11750 INIT_LIST_HEAD(&pring->iocb_continueq);
11751 spin_lock_init(&pring->ring_lock);
11752
11753 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11754 pring = phba->sli4_hba.nvmels_wq->pring;
11755 pring->flag = 0;
11756 pring->ringno = LPFC_ELS_RING;
11757 pring->txcmplq_cnt = 0;
11758 INIT_LIST_HEAD(&pring->txq);
11759 INIT_LIST_HEAD(&pring->txcmplq);
11760 INIT_LIST_HEAD(&pring->iocb_continueq);
11761 spin_lock_init(&pring->ring_lock);
11762 }
11763
11764 spin_unlock_irq(&phba->hbalock);
11765 }
11766
11767 /**
11768 * lpfc_sli_queue_init - Queue initialization function
11769 * @phba: Pointer to HBA context object.
11770 *
11771 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11772 * ring. This function also initializes ring indices of each ring.
11773 * This function is called during the initialization of the SLI
11774 * interface of an HBA.
11775 * This function is called with no lock held and always returns
11776 * 1.
11777 **/
11778 void
lpfc_sli_queue_init(struct lpfc_hba * phba)11779 lpfc_sli_queue_init(struct lpfc_hba *phba)
11780 {
11781 struct lpfc_sli *psli;
11782 struct lpfc_sli_ring *pring;
11783 int i;
11784
11785 psli = &phba->sli;
11786 spin_lock_irq(&phba->hbalock);
11787 INIT_LIST_HEAD(&psli->mboxq);
11788 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11789 /* Initialize list headers for txq and txcmplq as double linked lists */
11790 for (i = 0; i < psli->num_rings; i++) {
11791 pring = &psli->sli3_ring[i];
11792 pring->ringno = i;
11793 pring->sli.sli3.next_cmdidx = 0;
11794 pring->sli.sli3.local_getidx = 0;
11795 pring->sli.sli3.cmdidx = 0;
11796 INIT_LIST_HEAD(&pring->iocb_continueq);
11797 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11798 INIT_LIST_HEAD(&pring->postbufq);
11799 pring->flag = 0;
11800 INIT_LIST_HEAD(&pring->txq);
11801 INIT_LIST_HEAD(&pring->txcmplq);
11802 spin_lock_init(&pring->ring_lock);
11803 }
11804 spin_unlock_irq(&phba->hbalock);
11805 }
11806
11807 /**
11808 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11809 * @phba: Pointer to HBA context object.
11810 *
11811 * This routine flushes the mailbox command subsystem. It will unconditionally
11812 * flush all the mailbox commands in the three possible stages in the mailbox
11813 * command sub-system: pending mailbox command queue; the outstanding mailbox
11814 * command; and completed mailbox command queue. It is caller's responsibility
11815 * to make sure that the driver is in the proper state to flush the mailbox
11816 * command sub-system. Namely, the posting of mailbox commands into the
11817 * pending mailbox command queue from the various clients must be stopped;
11818 * either the HBA is in a state that it will never works on the outstanding
11819 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11820 * mailbox command has been completed.
11821 **/
11822 static void
lpfc_sli_mbox_sys_flush(struct lpfc_hba * phba)11823 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11824 {
11825 LIST_HEAD(completions);
11826 struct lpfc_sli *psli = &phba->sli;
11827 LPFC_MBOXQ_t *pmb;
11828 unsigned long iflag;
11829
11830 /* Disable softirqs, including timers from obtaining phba->hbalock */
11831 local_bh_disable();
11832
11833 /* Flush all the mailbox commands in the mbox system */
11834 spin_lock_irqsave(&phba->hbalock, iflag);
11835
11836 /* The pending mailbox command queue */
11837 list_splice_init(&phba->sli.mboxq, &completions);
11838 /* The outstanding active mailbox command */
11839 if (psli->mbox_active) {
11840 list_add_tail(&psli->mbox_active->list, &completions);
11841 psli->mbox_active = NULL;
11842 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11843 }
11844 /* The completed mailbox command queue */
11845 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11846 spin_unlock_irqrestore(&phba->hbalock, iflag);
11847
11848 /* Enable softirqs again, done with phba->hbalock */
11849 local_bh_enable();
11850
11851 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11852 while (!list_empty(&completions)) {
11853 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11854 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11855 if (pmb->mbox_cmpl)
11856 pmb->mbox_cmpl(phba, pmb);
11857 }
11858 }
11859
11860 /**
11861 * lpfc_sli_host_down - Vport cleanup function
11862 * @vport: Pointer to virtual port object.
11863 *
11864 * lpfc_sli_host_down is called to clean up the resources
11865 * associated with a vport before destroying virtual
11866 * port data structures.
11867 * This function does following operations:
11868 * - Free discovery resources associated with this virtual
11869 * port.
11870 * - Free iocbs associated with this virtual port in
11871 * the txq.
11872 * - Send abort for all iocb commands associated with this
11873 * vport in txcmplq.
11874 *
11875 * This function is called with no lock held and always returns 1.
11876 **/
11877 int
lpfc_sli_host_down(struct lpfc_vport * vport)11878 lpfc_sli_host_down(struct lpfc_vport *vport)
11879 {
11880 LIST_HEAD(completions);
11881 struct lpfc_hba *phba = vport->phba;
11882 struct lpfc_sli *psli = &phba->sli;
11883 struct lpfc_queue *qp = NULL;
11884 struct lpfc_sli_ring *pring;
11885 struct lpfc_iocbq *iocb, *next_iocb;
11886 int i;
11887 unsigned long flags = 0;
11888 uint16_t prev_pring_flag;
11889
11890 lpfc_cleanup_discovery_resources(vport);
11891
11892 spin_lock_irqsave(&phba->hbalock, flags);
11893
11894 /*
11895 * Error everything on the txq since these iocbs
11896 * have not been given to the FW yet.
11897 * Also issue ABTS for everything on the txcmplq
11898 */
11899 if (phba->sli_rev != LPFC_SLI_REV4) {
11900 for (i = 0; i < psli->num_rings; i++) {
11901 pring = &psli->sli3_ring[i];
11902 prev_pring_flag = pring->flag;
11903 /* Only slow rings */
11904 if (pring->ringno == LPFC_ELS_RING) {
11905 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11906 /* Set the lpfc data pending flag */
11907 set_bit(LPFC_DATA_READY, &phba->data_flags);
11908 }
11909 list_for_each_entry_safe(iocb, next_iocb,
11910 &pring->txq, list) {
11911 if (iocb->vport != vport)
11912 continue;
11913 list_move_tail(&iocb->list, &completions);
11914 }
11915 list_for_each_entry_safe(iocb, next_iocb,
11916 &pring->txcmplq, list) {
11917 if (iocb->vport != vport)
11918 continue;
11919 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11920 NULL);
11921 }
11922 pring->flag = prev_pring_flag;
11923 }
11924 } else {
11925 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11926 pring = qp->pring;
11927 if (!pring)
11928 continue;
11929 if (pring == phba->sli4_hba.els_wq->pring) {
11930 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11931 /* Set the lpfc data pending flag */
11932 set_bit(LPFC_DATA_READY, &phba->data_flags);
11933 }
11934 prev_pring_flag = pring->flag;
11935 spin_lock(&pring->ring_lock);
11936 list_for_each_entry_safe(iocb, next_iocb,
11937 &pring->txq, list) {
11938 if (iocb->vport != vport)
11939 continue;
11940 list_move_tail(&iocb->list, &completions);
11941 }
11942 spin_unlock(&pring->ring_lock);
11943 list_for_each_entry_safe(iocb, next_iocb,
11944 &pring->txcmplq, list) {
11945 if (iocb->vport != vport)
11946 continue;
11947 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11948 NULL);
11949 }
11950 pring->flag = prev_pring_flag;
11951 }
11952 }
11953 spin_unlock_irqrestore(&phba->hbalock, flags);
11954
11955 /* Make sure HBA is alive */
11956 lpfc_issue_hb_tmo(phba);
11957
11958 /* Cancel all the IOCBs from the completions list */
11959 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11960 IOERR_SLI_DOWN);
11961 return 1;
11962 }
11963
11964 /**
11965 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11966 * @phba: Pointer to HBA context object.
11967 *
11968 * This function cleans up all iocb, buffers, mailbox commands
11969 * while shutting down the HBA. This function is called with no
11970 * lock held and always returns 1.
11971 * This function does the following to cleanup driver resources:
11972 * - Free discovery resources for each virtual port
11973 * - Cleanup any pending fabric iocbs
11974 * - Iterate through the iocb txq and free each entry
11975 * in the list.
11976 * - Free up any buffer posted to the HBA
11977 * - Free mailbox commands in the mailbox queue.
11978 **/
11979 int
lpfc_sli_hba_down(struct lpfc_hba * phba)11980 lpfc_sli_hba_down(struct lpfc_hba *phba)
11981 {
11982 LIST_HEAD(completions);
11983 struct lpfc_sli *psli = &phba->sli;
11984 struct lpfc_queue *qp = NULL;
11985 struct lpfc_sli_ring *pring;
11986 struct lpfc_dmabuf *buf_ptr;
11987 unsigned long flags = 0;
11988 int i;
11989
11990 /* Shutdown the mailbox command sub-system */
11991 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11992
11993 lpfc_hba_down_prep(phba);
11994
11995 /* Disable softirqs, including timers from obtaining phba->hbalock */
11996 local_bh_disable();
11997
11998 lpfc_fabric_abort_hba(phba);
11999
12000 spin_lock_irqsave(&phba->hbalock, flags);
12001
12002 /*
12003 * Error everything on the txq since these iocbs
12004 * have not been given to the FW yet.
12005 */
12006 if (phba->sli_rev != LPFC_SLI_REV4) {
12007 for (i = 0; i < psli->num_rings; i++) {
12008 pring = &psli->sli3_ring[i];
12009 /* Only slow rings */
12010 if (pring->ringno == LPFC_ELS_RING) {
12011 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12012 /* Set the lpfc data pending flag */
12013 set_bit(LPFC_DATA_READY, &phba->data_flags);
12014 }
12015 list_splice_init(&pring->txq, &completions);
12016 }
12017 } else {
12018 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12019 pring = qp->pring;
12020 if (!pring)
12021 continue;
12022 spin_lock(&pring->ring_lock);
12023 list_splice_init(&pring->txq, &completions);
12024 spin_unlock(&pring->ring_lock);
12025 if (pring == phba->sli4_hba.els_wq->pring) {
12026 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12027 /* Set the lpfc data pending flag */
12028 set_bit(LPFC_DATA_READY, &phba->data_flags);
12029 }
12030 }
12031 }
12032 spin_unlock_irqrestore(&phba->hbalock, flags);
12033
12034 /* Cancel all the IOCBs from the completions list */
12035 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12036 IOERR_SLI_DOWN);
12037
12038 spin_lock_irqsave(&phba->hbalock, flags);
12039 list_splice_init(&phba->elsbuf, &completions);
12040 phba->elsbuf_cnt = 0;
12041 phba->elsbuf_prev_cnt = 0;
12042 spin_unlock_irqrestore(&phba->hbalock, flags);
12043
12044 while (!list_empty(&completions)) {
12045 list_remove_head(&completions, buf_ptr,
12046 struct lpfc_dmabuf, list);
12047 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12048 kfree(buf_ptr);
12049 }
12050
12051 /* Enable softirqs again, done with phba->hbalock */
12052 local_bh_enable();
12053
12054 /* Return any active mbox cmds */
12055 del_timer_sync(&psli->mbox_tmo);
12056
12057 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12058 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12059 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12060
12061 return 1;
12062 }
12063
12064 /**
12065 * lpfc_sli_pcimem_bcopy - SLI memory copy function
12066 * @srcp: Source memory pointer.
12067 * @destp: Destination memory pointer.
12068 * @cnt: Number of words required to be copied.
12069 *
12070 * This function is used for copying data between driver memory
12071 * and the SLI memory. This function also changes the endianness
12072 * of each word if native endianness is different from SLI
12073 * endianness. This function can be called with or without
12074 * lock.
12075 **/
12076 void
lpfc_sli_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)12077 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12078 {
12079 uint32_t *src = srcp;
12080 uint32_t *dest = destp;
12081 uint32_t ldata;
12082 int i;
12083
12084 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12085 ldata = *src;
12086 ldata = le32_to_cpu(ldata);
12087 *dest = ldata;
12088 src++;
12089 dest++;
12090 }
12091 }
12092
12093
12094 /**
12095 * lpfc_sli_bemem_bcopy - SLI memory copy function
12096 * @srcp: Source memory pointer.
12097 * @destp: Destination memory pointer.
12098 * @cnt: Number of words required to be copied.
12099 *
12100 * This function is used for copying data between a data structure
12101 * with big endian representation to local endianness.
12102 * This function can be called with or without lock.
12103 **/
12104 void
lpfc_sli_bemem_bcopy(void * srcp,void * destp,uint32_t cnt)12105 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12106 {
12107 uint32_t *src = srcp;
12108 uint32_t *dest = destp;
12109 uint32_t ldata;
12110 int i;
12111
12112 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12113 ldata = *src;
12114 ldata = be32_to_cpu(ldata);
12115 *dest = ldata;
12116 src++;
12117 dest++;
12118 }
12119 }
12120
12121 /**
12122 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12123 * @phba: Pointer to HBA context object.
12124 * @pring: Pointer to driver SLI ring object.
12125 * @mp: Pointer to driver buffer object.
12126 *
12127 * This function is called with no lock held.
12128 * It always return zero after adding the buffer to the postbufq
12129 * buffer list.
12130 **/
12131 int
lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_dmabuf * mp)12132 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12133 struct lpfc_dmabuf *mp)
12134 {
12135 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12136 later */
12137 spin_lock_irq(&phba->hbalock);
12138 list_add_tail(&mp->list, &pring->postbufq);
12139 pring->postbufq_cnt++;
12140 spin_unlock_irq(&phba->hbalock);
12141 return 0;
12142 }
12143
12144 /**
12145 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12146 * @phba: Pointer to HBA context object.
12147 *
12148 * When HBQ is enabled, buffers are searched based on tags. This function
12149 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12150 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12151 * does not conflict with tags of buffer posted for unsolicited events.
12152 * The function returns the allocated tag. The function is called with
12153 * no locks held.
12154 **/
12155 uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba * phba)12156 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12157 {
12158 spin_lock_irq(&phba->hbalock);
12159 phba->buffer_tag_count++;
12160 /*
12161 * Always set the QUE_BUFTAG_BIT to distiguish between
12162 * a tag assigned by HBQ.
12163 */
12164 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12165 spin_unlock_irq(&phba->hbalock);
12166 return phba->buffer_tag_count;
12167 }
12168
12169 /**
12170 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12171 * @phba: Pointer to HBA context object.
12172 * @pring: Pointer to driver SLI ring object.
12173 * @tag: Buffer tag.
12174 *
12175 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12176 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12177 * iocb is posted to the response ring with the tag of the buffer.
12178 * This function searches the pring->postbufq list using the tag
12179 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12180 * iocb. If the buffer is found then lpfc_dmabuf object of the
12181 * buffer is returned to the caller else NULL is returned.
12182 * This function is called with no lock held.
12183 **/
12184 struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)12185 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12186 uint32_t tag)
12187 {
12188 struct lpfc_dmabuf *mp, *next_mp;
12189 struct list_head *slp = &pring->postbufq;
12190
12191 /* Search postbufq, from the beginning, looking for a match on tag */
12192 spin_lock_irq(&phba->hbalock);
12193 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12194 if (mp->buffer_tag == tag) {
12195 list_del_init(&mp->list);
12196 pring->postbufq_cnt--;
12197 spin_unlock_irq(&phba->hbalock);
12198 return mp;
12199 }
12200 }
12201
12202 spin_unlock_irq(&phba->hbalock);
12203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12204 "0402 Cannot find virtual addr for buffer tag on "
12205 "ring %d Data x%lx x%px x%px x%x\n",
12206 pring->ringno, (unsigned long) tag,
12207 slp->next, slp->prev, pring->postbufq_cnt);
12208
12209 return NULL;
12210 }
12211
12212 /**
12213 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12214 * @phba: Pointer to HBA context object.
12215 * @pring: Pointer to driver SLI ring object.
12216 * @phys: DMA address of the buffer.
12217 *
12218 * This function searches the buffer list using the dma_address
12219 * of unsolicited event to find the driver's lpfc_dmabuf object
12220 * corresponding to the dma_address. The function returns the
12221 * lpfc_dmabuf object if a buffer is found else it returns NULL.
12222 * This function is called by the ct and els unsolicited event
12223 * handlers to get the buffer associated with the unsolicited
12224 * event.
12225 *
12226 * This function is called with no lock held.
12227 **/
12228 struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,dma_addr_t phys)12229 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12230 dma_addr_t phys)
12231 {
12232 struct lpfc_dmabuf *mp, *next_mp;
12233 struct list_head *slp = &pring->postbufq;
12234
12235 /* Search postbufq, from the beginning, looking for a match on phys */
12236 spin_lock_irq(&phba->hbalock);
12237 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12238 if (mp->phys == phys) {
12239 list_del_init(&mp->list);
12240 pring->postbufq_cnt--;
12241 spin_unlock_irq(&phba->hbalock);
12242 return mp;
12243 }
12244 }
12245
12246 spin_unlock_irq(&phba->hbalock);
12247 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12248 "0410 Cannot find virtual addr for mapped buf on "
12249 "ring %d Data x%llx x%px x%px x%x\n",
12250 pring->ringno, (unsigned long long)phys,
12251 slp->next, slp->prev, pring->postbufq_cnt);
12252 return NULL;
12253 }
12254
12255 /**
12256 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12257 * @phba: Pointer to HBA context object.
12258 * @cmdiocb: Pointer to driver command iocb object.
12259 * @rspiocb: Pointer to driver response iocb object.
12260 *
12261 * This function is the completion handler for the abort iocbs for
12262 * ELS commands. This function is called from the ELS ring event
12263 * handler with no lock held. This function frees memory resources
12264 * associated with the abort iocb.
12265 **/
12266 static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12267 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12268 struct lpfc_iocbq *rspiocb)
12269 {
12270 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12271 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12272 u8 cmnd = get_job_cmnd(phba, cmdiocb);
12273
12274 if (ulp_status) {
12275 /*
12276 * Assume that the port already completed and returned, or
12277 * will return the iocb. Just Log the message.
12278 */
12279 if (phba->sli_rev < LPFC_SLI_REV4) {
12280 if (cmnd == CMD_ABORT_XRI_CX &&
12281 ulp_status == IOSTAT_LOCAL_REJECT &&
12282 ulp_word4 == IOERR_ABORT_REQUESTED) {
12283 goto release_iocb;
12284 }
12285 }
12286
12287 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12288 "0327 Cannot abort els iocb x%px "
12289 "with io cmd xri %x abort tag : x%x, "
12290 "abort status %x abort code %x\n",
12291 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12292 (phba->sli_rev == LPFC_SLI_REV4) ?
12293 get_wqe_reqtag(cmdiocb) :
12294 cmdiocb->iocb.un.acxri.abortContextTag,
12295 ulp_status, ulp_word4);
12296
12297 }
12298 release_iocb:
12299 lpfc_sli_release_iocbq(phba, cmdiocb);
12300 return;
12301 }
12302
12303 /**
12304 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12305 * @phba: Pointer to HBA context object.
12306 * @cmdiocb: Pointer to driver command iocb object.
12307 * @rspiocb: Pointer to driver response iocb object.
12308 *
12309 * The function is called from SLI ring event handler with no
12310 * lock held. This function is the completion handler for ELS commands
12311 * which are aborted. The function frees memory resources used for
12312 * the aborted ELS commands.
12313 **/
12314 void
lpfc_ignore_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12315 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12316 struct lpfc_iocbq *rspiocb)
12317 {
12318 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12319 IOCB_t *irsp;
12320 LPFC_MBOXQ_t *mbox;
12321 u32 ulp_command, ulp_status, ulp_word4, iotag;
12322
12323 ulp_command = get_job_cmnd(phba, cmdiocb);
12324 ulp_status = get_job_ulpstatus(phba, rspiocb);
12325 ulp_word4 = get_job_word4(phba, rspiocb);
12326
12327 if (phba->sli_rev == LPFC_SLI_REV4) {
12328 iotag = get_wqe_reqtag(cmdiocb);
12329 } else {
12330 irsp = &rspiocb->iocb;
12331 iotag = irsp->ulpIoTag;
12332
12333 /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12334 * The MBX_REG_LOGIN64 mbox command is freed back to the
12335 * mbox_mem_pool here.
12336 */
12337 if (cmdiocb->context_un.mbox) {
12338 mbox = cmdiocb->context_un.mbox;
12339 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12340 cmdiocb->context_un.mbox = NULL;
12341 }
12342 }
12343
12344 /* ELS cmd tag <ulpIoTag> completes */
12345 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12346 "0139 Ignoring ELS cmd code x%x completion Data: "
12347 "x%x x%x x%x x%px\n",
12348 ulp_command, ulp_status, ulp_word4, iotag,
12349 cmdiocb->ndlp);
12350 /*
12351 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12352 * if exchange is busy.
12353 */
12354 if (ulp_command == CMD_GEN_REQUEST64_CR)
12355 lpfc_ct_free_iocb(phba, cmdiocb);
12356 else
12357 lpfc_els_free_iocb(phba, cmdiocb);
12358
12359 lpfc_nlp_put(ndlp);
12360 }
12361
12362 /**
12363 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12364 * @phba: Pointer to HBA context object.
12365 * @pring: Pointer to driver SLI ring object.
12366 * @cmdiocb: Pointer to driver command iocb object.
12367 * @cmpl: completion function.
12368 *
12369 * This function issues an abort iocb for the provided command iocb. In case
12370 * of unloading, the abort iocb will not be issued to commands on the ELS
12371 * ring. Instead, the callback function shall be changed to those commands
12372 * so that nothing happens when them finishes. This function is called with
12373 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12374 * when the command iocb is an abort request.
12375 *
12376 **/
12377 int
lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb,void * cmpl)12378 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12379 struct lpfc_iocbq *cmdiocb, void *cmpl)
12380 {
12381 struct lpfc_vport *vport = cmdiocb->vport;
12382 struct lpfc_iocbq *abtsiocbp;
12383 int retval = IOCB_ERROR;
12384 unsigned long iflags;
12385 struct lpfc_nodelist *ndlp = NULL;
12386 u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12387 u16 ulp_context, iotag;
12388 bool ia;
12389
12390 /*
12391 * There are certain command types we don't want to abort. And we
12392 * don't want to abort commands that are already in the process of
12393 * being aborted.
12394 */
12395 if (ulp_command == CMD_ABORT_XRI_WQE ||
12396 ulp_command == CMD_ABORT_XRI_CN ||
12397 ulp_command == CMD_CLOSE_XRI_CN ||
12398 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12399 return IOCB_ABORTING;
12400
12401 if (!pring) {
12402 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12403 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12404 else
12405 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12406 return retval;
12407 }
12408
12409 /*
12410 * If we're unloading, don't abort iocb on the ELS ring, but change
12411 * the callback so that nothing happens when it finishes.
12412 */
12413 if ((vport->load_flag & FC_UNLOADING) &&
12414 pring->ringno == LPFC_ELS_RING) {
12415 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12416 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12417 else
12418 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12419 return retval;
12420 }
12421
12422 /* issue ABTS for this IOCB based on iotag */
12423 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12424 if (abtsiocbp == NULL)
12425 return IOCB_NORESOURCE;
12426
12427 /* This signals the response to set the correct status
12428 * before calling the completion handler
12429 */
12430 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12431
12432 if (phba->sli_rev == LPFC_SLI_REV4) {
12433 ulp_context = cmdiocb->sli4_xritag;
12434 iotag = abtsiocbp->iotag;
12435 } else {
12436 iotag = cmdiocb->iocb.ulpIoTag;
12437 if (pring->ringno == LPFC_ELS_RING) {
12438 ndlp = cmdiocb->ndlp;
12439 ulp_context = ndlp->nlp_rpi;
12440 } else {
12441 ulp_context = cmdiocb->iocb.ulpContext;
12442 }
12443 }
12444
12445 if (phba->link_state < LPFC_LINK_UP ||
12446 (phba->sli_rev == LPFC_SLI_REV4 &&
12447 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12448 (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12449 ia = true;
12450 else
12451 ia = false;
12452
12453 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12454 cmdiocb->iocb.ulpClass,
12455 LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12456
12457 abtsiocbp->vport = vport;
12458
12459 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12460 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12461 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12462 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12463
12464 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12465 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12466
12467 if (cmpl)
12468 abtsiocbp->cmd_cmpl = cmpl;
12469 else
12470 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12471 abtsiocbp->vport = vport;
12472
12473 if (phba->sli_rev == LPFC_SLI_REV4) {
12474 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12475 if (unlikely(pring == NULL))
12476 goto abort_iotag_exit;
12477 /* Note: both hbalock and ring_lock need to be set here */
12478 spin_lock_irqsave(&pring->ring_lock, iflags);
12479 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12480 abtsiocbp, 0);
12481 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12482 } else {
12483 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12484 abtsiocbp, 0);
12485 }
12486
12487 abort_iotag_exit:
12488
12489 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12490 "0339 Abort IO XRI x%x, Original iotag x%x, "
12491 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12492 "retval x%x\n",
12493 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12494 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12495 retval);
12496 if (retval) {
12497 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12498 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12499 }
12500
12501 /*
12502 * Caller to this routine should check for IOCB_ERROR
12503 * and handle it properly. This routine no longer removes
12504 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12505 */
12506 return retval;
12507 }
12508
12509 /**
12510 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12511 * @phba: pointer to lpfc HBA data structure.
12512 *
12513 * This routine will abort all pending and outstanding iocbs to an HBA.
12514 **/
12515 void
lpfc_sli_hba_iocb_abort(struct lpfc_hba * phba)12516 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12517 {
12518 struct lpfc_sli *psli = &phba->sli;
12519 struct lpfc_sli_ring *pring;
12520 struct lpfc_queue *qp = NULL;
12521 int i;
12522
12523 if (phba->sli_rev != LPFC_SLI_REV4) {
12524 for (i = 0; i < psli->num_rings; i++) {
12525 pring = &psli->sli3_ring[i];
12526 lpfc_sli_abort_iocb_ring(phba, pring);
12527 }
12528 return;
12529 }
12530 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12531 pring = qp->pring;
12532 if (!pring)
12533 continue;
12534 lpfc_sli_abort_iocb_ring(phba, pring);
12535 }
12536 }
12537
12538 /**
12539 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12540 * @iocbq: Pointer to iocb object.
12541 * @vport: Pointer to driver virtual port object.
12542 *
12543 * This function acts as an iocb filter for functions which abort FCP iocbs.
12544 *
12545 * Return values
12546 * -ENODEV, if a null iocb or vport ptr is encountered
12547 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12548 * driver already started the abort process, or is an abort iocb itself
12549 * 0, passes criteria for aborting the FCP I/O iocb
12550 **/
12551 static int
lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport)12552 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12553 struct lpfc_vport *vport)
12554 {
12555 u8 ulp_command;
12556
12557 /* No null ptr vports */
12558 if (!iocbq || iocbq->vport != vport)
12559 return -ENODEV;
12560
12561 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12562 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12563 */
12564 ulp_command = get_job_cmnd(vport->phba, iocbq);
12565 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12566 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12567 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12568 (ulp_command == CMD_ABORT_XRI_CN ||
12569 ulp_command == CMD_CLOSE_XRI_CN ||
12570 ulp_command == CMD_ABORT_XRI_WQE))
12571 return -EINVAL;
12572
12573 return 0;
12574 }
12575
12576 /**
12577 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12578 * @iocbq: Pointer to driver iocb object.
12579 * @vport: Pointer to driver virtual port object.
12580 * @tgt_id: SCSI ID of the target.
12581 * @lun_id: LUN ID of the scsi device.
12582 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12583 *
12584 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12585 * host.
12586 *
12587 * It will return
12588 * 0 if the filtering criteria is met for the given iocb and will return
12589 * 1 if the filtering criteria is not met.
12590 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12591 * given iocb is for the SCSI device specified by vport, tgt_id and
12592 * lun_id parameter.
12593 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12594 * given iocb is for the SCSI target specified by vport and tgt_id
12595 * parameters.
12596 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12597 * given iocb is for the SCSI host associated with the given vport.
12598 * This function is called with no locks held.
12599 **/
12600 static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)12601 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12602 uint16_t tgt_id, uint64_t lun_id,
12603 lpfc_ctx_cmd ctx_cmd)
12604 {
12605 struct lpfc_io_buf *lpfc_cmd;
12606 int rc = 1;
12607
12608 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12609
12610 if (lpfc_cmd->pCmd == NULL)
12611 return rc;
12612
12613 switch (ctx_cmd) {
12614 case LPFC_CTX_LUN:
12615 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12616 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12617 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12618 rc = 0;
12619 break;
12620 case LPFC_CTX_TGT:
12621 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12622 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12623 rc = 0;
12624 break;
12625 case LPFC_CTX_HOST:
12626 rc = 0;
12627 break;
12628 default:
12629 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12630 __func__, ctx_cmd);
12631 break;
12632 }
12633
12634 return rc;
12635 }
12636
12637 /**
12638 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12639 * @vport: Pointer to virtual port.
12640 * @tgt_id: SCSI ID of the target.
12641 * @lun_id: LUN ID of the scsi device.
12642 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12643 *
12644 * This function returns number of FCP commands pending for the vport.
12645 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12646 * commands pending on the vport associated with SCSI device specified
12647 * by tgt_id and lun_id parameters.
12648 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12649 * commands pending on the vport associated with SCSI target specified
12650 * by tgt_id parameter.
12651 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12652 * commands pending on the vport.
12653 * This function returns the number of iocbs which satisfy the filter.
12654 * This function is called without any lock held.
12655 **/
12656 int
lpfc_sli_sum_iocb(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)12657 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12658 lpfc_ctx_cmd ctx_cmd)
12659 {
12660 struct lpfc_hba *phba = vport->phba;
12661 struct lpfc_iocbq *iocbq;
12662 int sum, i;
12663 unsigned long iflags;
12664 u8 ulp_command;
12665
12666 spin_lock_irqsave(&phba->hbalock, iflags);
12667 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12668 iocbq = phba->sli.iocbq_lookup[i];
12669
12670 if (!iocbq || iocbq->vport != vport)
12671 continue;
12672 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12673 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12674 continue;
12675
12676 /* Include counting outstanding aborts */
12677 ulp_command = get_job_cmnd(phba, iocbq);
12678 if (ulp_command == CMD_ABORT_XRI_CN ||
12679 ulp_command == CMD_CLOSE_XRI_CN ||
12680 ulp_command == CMD_ABORT_XRI_WQE) {
12681 sum++;
12682 continue;
12683 }
12684
12685 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12686 ctx_cmd) == 0)
12687 sum++;
12688 }
12689 spin_unlock_irqrestore(&phba->hbalock, iflags);
12690
12691 return sum;
12692 }
12693
12694 /**
12695 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12696 * @phba: Pointer to HBA context object
12697 * @cmdiocb: Pointer to command iocb object.
12698 * @rspiocb: Pointer to response iocb object.
12699 *
12700 * This function is called when an aborted FCP iocb completes. This
12701 * function is called by the ring event handler with no lock held.
12702 * This function frees the iocb.
12703 **/
12704 void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12705 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12706 struct lpfc_iocbq *rspiocb)
12707 {
12708 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12709 "3096 ABORT_XRI_CX completing on rpi x%x "
12710 "original iotag x%x, abort cmd iotag x%x "
12711 "status 0x%x, reason 0x%x\n",
12712 (phba->sli_rev == LPFC_SLI_REV4) ?
12713 cmdiocb->sli4_xritag :
12714 cmdiocb->iocb.un.acxri.abortContextTag,
12715 get_job_abtsiotag(phba, cmdiocb),
12716 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12717 get_job_word4(phba, rspiocb));
12718 lpfc_sli_release_iocbq(phba, cmdiocb);
12719 return;
12720 }
12721
12722 /**
12723 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12724 * @vport: Pointer to virtual port.
12725 * @tgt_id: SCSI ID of the target.
12726 * @lun_id: LUN ID of the scsi device.
12727 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12728 *
12729 * This function sends an abort command for every SCSI command
12730 * associated with the given virtual port pending on the ring
12731 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12732 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12733 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12734 * followed by lpfc_sli_validate_fcp_iocb.
12735 *
12736 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12737 * FCP iocbs associated with lun specified by tgt_id and lun_id
12738 * parameters
12739 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12740 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12741 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12742 * FCP iocbs associated with virtual port.
12743 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12744 * lpfc_sli4_calc_ring is used.
12745 * This function returns number of iocbs it failed to abort.
12746 * This function is called with no locks held.
12747 **/
12748 int
lpfc_sli_abort_iocb(struct lpfc_vport * vport,u16 tgt_id,u64 lun_id,lpfc_ctx_cmd abort_cmd)12749 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12750 lpfc_ctx_cmd abort_cmd)
12751 {
12752 struct lpfc_hba *phba = vport->phba;
12753 struct lpfc_sli_ring *pring = NULL;
12754 struct lpfc_iocbq *iocbq;
12755 int errcnt = 0, ret_val = 0;
12756 unsigned long iflags;
12757 int i;
12758
12759 /* all I/Os are in process of being flushed */
12760 if (phba->hba_flag & HBA_IOQ_FLUSH)
12761 return errcnt;
12762
12763 for (i = 1; i <= phba->sli.last_iotag; i++) {
12764 iocbq = phba->sli.iocbq_lookup[i];
12765
12766 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12767 continue;
12768
12769 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12770 abort_cmd) != 0)
12771 continue;
12772
12773 spin_lock_irqsave(&phba->hbalock, iflags);
12774 if (phba->sli_rev == LPFC_SLI_REV3) {
12775 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12776 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12777 pring = lpfc_sli4_calc_ring(phba, iocbq);
12778 }
12779 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12780 lpfc_sli_abort_fcp_cmpl);
12781 spin_unlock_irqrestore(&phba->hbalock, iflags);
12782 if (ret_val != IOCB_SUCCESS)
12783 errcnt++;
12784 }
12785
12786 return errcnt;
12787 }
12788
12789 /**
12790 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12791 * @vport: Pointer to virtual port.
12792 * @pring: Pointer to driver SLI ring object.
12793 * @tgt_id: SCSI ID of the target.
12794 * @lun_id: LUN ID of the scsi device.
12795 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12796 *
12797 * This function sends an abort command for every SCSI command
12798 * associated with the given virtual port pending on the ring
12799 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12800 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12801 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12802 * followed by lpfc_sli_validate_fcp_iocb.
12803 *
12804 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12805 * FCP iocbs associated with lun specified by tgt_id and lun_id
12806 * parameters
12807 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12808 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12809 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12810 * FCP iocbs associated with virtual port.
12811 * This function returns number of iocbs it aborted .
12812 * This function is called with no locks held right after a taskmgmt
12813 * command is sent.
12814 **/
12815 int
lpfc_sli_abort_taskmgmt(struct lpfc_vport * vport,struct lpfc_sli_ring * pring,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd cmd)12816 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12817 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12818 {
12819 struct lpfc_hba *phba = vport->phba;
12820 struct lpfc_io_buf *lpfc_cmd;
12821 struct lpfc_iocbq *abtsiocbq;
12822 struct lpfc_nodelist *ndlp = NULL;
12823 struct lpfc_iocbq *iocbq;
12824 int sum, i, ret_val;
12825 unsigned long iflags;
12826 struct lpfc_sli_ring *pring_s4 = NULL;
12827 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12828 bool ia;
12829
12830 spin_lock_irqsave(&phba->hbalock, iflags);
12831
12832 /* all I/Os are in process of being flushed */
12833 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12834 spin_unlock_irqrestore(&phba->hbalock, iflags);
12835 return 0;
12836 }
12837 sum = 0;
12838
12839 for (i = 1; i <= phba->sli.last_iotag; i++) {
12840 iocbq = phba->sli.iocbq_lookup[i];
12841
12842 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12843 continue;
12844
12845 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12846 cmd) != 0)
12847 continue;
12848
12849 /* Guard against IO completion being called at same time */
12850 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12851 spin_lock(&lpfc_cmd->buf_lock);
12852
12853 if (!lpfc_cmd->pCmd) {
12854 spin_unlock(&lpfc_cmd->buf_lock);
12855 continue;
12856 }
12857
12858 if (phba->sli_rev == LPFC_SLI_REV4) {
12859 pring_s4 =
12860 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12861 if (!pring_s4) {
12862 spin_unlock(&lpfc_cmd->buf_lock);
12863 continue;
12864 }
12865 /* Note: both hbalock and ring_lock must be set here */
12866 spin_lock(&pring_s4->ring_lock);
12867 }
12868
12869 /*
12870 * If the iocbq is already being aborted, don't take a second
12871 * action, but do count it.
12872 */
12873 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12874 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12875 if (phba->sli_rev == LPFC_SLI_REV4)
12876 spin_unlock(&pring_s4->ring_lock);
12877 spin_unlock(&lpfc_cmd->buf_lock);
12878 continue;
12879 }
12880
12881 /* issue ABTS for this IOCB based on iotag */
12882 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12883 if (!abtsiocbq) {
12884 if (phba->sli_rev == LPFC_SLI_REV4)
12885 spin_unlock(&pring_s4->ring_lock);
12886 spin_unlock(&lpfc_cmd->buf_lock);
12887 continue;
12888 }
12889
12890 if (phba->sli_rev == LPFC_SLI_REV4) {
12891 iotag = abtsiocbq->iotag;
12892 ulp_context = iocbq->sli4_xritag;
12893 cqid = lpfc_cmd->hdwq->io_cq_map;
12894 } else {
12895 iotag = iocbq->iocb.ulpIoTag;
12896 if (pring->ringno == LPFC_ELS_RING) {
12897 ndlp = iocbq->ndlp;
12898 ulp_context = ndlp->nlp_rpi;
12899 } else {
12900 ulp_context = iocbq->iocb.ulpContext;
12901 }
12902 }
12903
12904 ndlp = lpfc_cmd->rdata->pnode;
12905
12906 if (lpfc_is_link_up(phba) &&
12907 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12908 !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12909 ia = false;
12910 else
12911 ia = true;
12912
12913 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12914 iocbq->iocb.ulpClass, cqid,
12915 ia, false);
12916
12917 abtsiocbq->vport = vport;
12918
12919 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12920 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12921 if (iocbq->cmd_flag & LPFC_IO_FCP)
12922 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12923 if (iocbq->cmd_flag & LPFC_IO_FOF)
12924 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12925
12926 /* Setup callback routine and issue the command. */
12927 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12928
12929 /*
12930 * Indicate the IO is being aborted by the driver and set
12931 * the caller's flag into the aborted IO.
12932 */
12933 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12934
12935 if (phba->sli_rev == LPFC_SLI_REV4) {
12936 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12937 abtsiocbq, 0);
12938 spin_unlock(&pring_s4->ring_lock);
12939 } else {
12940 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12941 abtsiocbq, 0);
12942 }
12943
12944 spin_unlock(&lpfc_cmd->buf_lock);
12945
12946 if (ret_val == IOCB_ERROR)
12947 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12948 else
12949 sum++;
12950 }
12951 spin_unlock_irqrestore(&phba->hbalock, iflags);
12952 return sum;
12953 }
12954
12955 /**
12956 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12957 * @phba: Pointer to HBA context object.
12958 * @cmdiocbq: Pointer to command iocb.
12959 * @rspiocbq: Pointer to response iocb.
12960 *
12961 * This function is the completion handler for iocbs issued using
12962 * lpfc_sli_issue_iocb_wait function. This function is called by the
12963 * ring event handler function without any lock held. This function
12964 * can be called from both worker thread context and interrupt
12965 * context. This function also can be called from other thread which
12966 * cleans up the SLI layer objects.
12967 * This function copy the contents of the response iocb to the
12968 * response iocb memory object provided by the caller of
12969 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12970 * sleeps for the iocb completion.
12971 **/
12972 static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)12973 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12974 struct lpfc_iocbq *cmdiocbq,
12975 struct lpfc_iocbq *rspiocbq)
12976 {
12977 wait_queue_head_t *pdone_q;
12978 unsigned long iflags;
12979 struct lpfc_io_buf *lpfc_cmd;
12980 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12981
12982 spin_lock_irqsave(&phba->hbalock, iflags);
12983 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12984
12985 /*
12986 * A time out has occurred for the iocb. If a time out
12987 * completion handler has been supplied, call it. Otherwise,
12988 * just free the iocbq.
12989 */
12990
12991 spin_unlock_irqrestore(&phba->hbalock, iflags);
12992 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12993 cmdiocbq->wait_cmd_cmpl = NULL;
12994 if (cmdiocbq->cmd_cmpl)
12995 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
12996 else
12997 lpfc_sli_release_iocbq(phba, cmdiocbq);
12998 return;
12999 }
13000
13001 /* Copy the contents of the local rspiocb into the caller's buffer. */
13002 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13003 if (cmdiocbq->rsp_iocb && rspiocbq)
13004 memcpy((char *)cmdiocbq->rsp_iocb + offset,
13005 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13006
13007 /* Set the exchange busy flag for task management commands */
13008 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13009 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13010 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13011 cur_iocbq);
13012 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13013 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13014 else
13015 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13016 }
13017
13018 pdone_q = cmdiocbq->context_un.wait_queue;
13019 if (pdone_q)
13020 wake_up(pdone_q);
13021 spin_unlock_irqrestore(&phba->hbalock, iflags);
13022 return;
13023 }
13024
13025 /**
13026 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13027 * @phba: Pointer to HBA context object..
13028 * @piocbq: Pointer to command iocb.
13029 * @flag: Flag to test.
13030 *
13031 * This routine grabs the hbalock and then test the cmd_flag to
13032 * see if the passed in flag is set.
13033 * Returns:
13034 * 1 if flag is set.
13035 * 0 if flag is not set.
13036 **/
13037 static int
lpfc_chk_iocb_flg(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,uint32_t flag)13038 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13039 struct lpfc_iocbq *piocbq, uint32_t flag)
13040 {
13041 unsigned long iflags;
13042 int ret;
13043
13044 spin_lock_irqsave(&phba->hbalock, iflags);
13045 ret = piocbq->cmd_flag & flag;
13046 spin_unlock_irqrestore(&phba->hbalock, iflags);
13047 return ret;
13048
13049 }
13050
13051 /**
13052 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13053 * @phba: Pointer to HBA context object..
13054 * @ring_number: Ring number
13055 * @piocb: Pointer to command iocb.
13056 * @prspiocbq: Pointer to response iocb.
13057 * @timeout: Timeout in number of seconds.
13058 *
13059 * This function issues the iocb to firmware and waits for the
13060 * iocb to complete. The cmd_cmpl field of the shall be used
13061 * to handle iocbs which time out. If the field is NULL, the
13062 * function shall free the iocbq structure. If more clean up is
13063 * needed, the caller is expected to provide a completion function
13064 * that will provide the needed clean up. If the iocb command is
13065 * not completed within timeout seconds, the function will either
13066 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13067 * completion function set in the cmd_cmpl field and then return
13068 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
13069 * resources if this function returns IOCB_TIMEDOUT.
13070 * The function waits for the iocb completion using an
13071 * non-interruptible wait.
13072 * This function will sleep while waiting for iocb completion.
13073 * So, this function should not be called from any context which
13074 * does not allow sleeping. Due to the same reason, this function
13075 * cannot be called with interrupt disabled.
13076 * This function assumes that the iocb completions occur while
13077 * this function sleep. So, this function cannot be called from
13078 * the thread which process iocb completion for this ring.
13079 * This function clears the cmd_flag of the iocb object before
13080 * issuing the iocb and the iocb completion handler sets this
13081 * flag and wakes this thread when the iocb completes.
13082 * The contents of the response iocb will be copied to prspiocbq
13083 * by the completion handler when the command completes.
13084 * This function returns IOCB_SUCCESS when success.
13085 * This function is called with no lock held.
13086 **/
13087 int
lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,struct lpfc_iocbq * prspiocbq,uint32_t timeout)13088 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13089 uint32_t ring_number,
13090 struct lpfc_iocbq *piocb,
13091 struct lpfc_iocbq *prspiocbq,
13092 uint32_t timeout)
13093 {
13094 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13095 long timeleft, timeout_req = 0;
13096 int retval = IOCB_SUCCESS;
13097 uint32_t creg_val;
13098 struct lpfc_iocbq *iocb;
13099 int txq_cnt = 0;
13100 int txcmplq_cnt = 0;
13101 struct lpfc_sli_ring *pring;
13102 unsigned long iflags;
13103 bool iocb_completed = true;
13104
13105 if (phba->sli_rev >= LPFC_SLI_REV4) {
13106 lpfc_sli_prep_wqe(phba, piocb);
13107
13108 pring = lpfc_sli4_calc_ring(phba, piocb);
13109 } else
13110 pring = &phba->sli.sli3_ring[ring_number];
13111 /*
13112 * If the caller has provided a response iocbq buffer, then rsp_iocb
13113 * is NULL or its an error.
13114 */
13115 if (prspiocbq) {
13116 if (piocb->rsp_iocb)
13117 return IOCB_ERROR;
13118 piocb->rsp_iocb = prspiocbq;
13119 }
13120
13121 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13122 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13123 piocb->context_un.wait_queue = &done_q;
13124 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13125
13126 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13127 if (lpfc_readl(phba->HCregaddr, &creg_val))
13128 return IOCB_ERROR;
13129 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13130 writel(creg_val, phba->HCregaddr);
13131 readl(phba->HCregaddr); /* flush */
13132 }
13133
13134 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13135 SLI_IOCB_RET_IOCB);
13136 if (retval == IOCB_SUCCESS) {
13137 timeout_req = msecs_to_jiffies(timeout * 1000);
13138 timeleft = wait_event_timeout(done_q,
13139 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13140 timeout_req);
13141 spin_lock_irqsave(&phba->hbalock, iflags);
13142 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13143
13144 /*
13145 * IOCB timed out. Inform the wake iocb wait
13146 * completion function and set local status
13147 */
13148
13149 iocb_completed = false;
13150 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13151 }
13152 spin_unlock_irqrestore(&phba->hbalock, iflags);
13153 if (iocb_completed) {
13154 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13155 "0331 IOCB wake signaled\n");
13156 /* Note: we are not indicating if the IOCB has a success
13157 * status or not - that's for the caller to check.
13158 * IOCB_SUCCESS means just that the command was sent and
13159 * completed. Not that it completed successfully.
13160 * */
13161 } else if (timeleft == 0) {
13162 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13163 "0338 IOCB wait timeout error - no "
13164 "wake response Data x%x\n", timeout);
13165 retval = IOCB_TIMEDOUT;
13166 } else {
13167 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13168 "0330 IOCB wake NOT set, "
13169 "Data x%x x%lx\n",
13170 timeout, (timeleft / jiffies));
13171 retval = IOCB_TIMEDOUT;
13172 }
13173 } else if (retval == IOCB_BUSY) {
13174 if (phba->cfg_log_verbose & LOG_SLI) {
13175 list_for_each_entry(iocb, &pring->txq, list) {
13176 txq_cnt++;
13177 }
13178 list_for_each_entry(iocb, &pring->txcmplq, list) {
13179 txcmplq_cnt++;
13180 }
13181 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13182 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13183 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13184 }
13185 return retval;
13186 } else {
13187 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13188 "0332 IOCB wait issue failed, Data x%x\n",
13189 retval);
13190 retval = IOCB_ERROR;
13191 }
13192
13193 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13194 if (lpfc_readl(phba->HCregaddr, &creg_val))
13195 return IOCB_ERROR;
13196 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13197 writel(creg_val, phba->HCregaddr);
13198 readl(phba->HCregaddr); /* flush */
13199 }
13200
13201 if (prspiocbq)
13202 piocb->rsp_iocb = NULL;
13203
13204 piocb->context_un.wait_queue = NULL;
13205 piocb->cmd_cmpl = NULL;
13206 return retval;
13207 }
13208
13209 /**
13210 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13211 * @phba: Pointer to HBA context object.
13212 * @pmboxq: Pointer to driver mailbox object.
13213 * @timeout: Timeout in number of seconds.
13214 *
13215 * This function issues the mailbox to firmware and waits for the
13216 * mailbox command to complete. If the mailbox command is not
13217 * completed within timeout seconds, it returns MBX_TIMEOUT.
13218 * The function waits for the mailbox completion using an
13219 * interruptible wait. If the thread is woken up due to a
13220 * signal, MBX_TIMEOUT error is returned to the caller. Caller
13221 * should not free the mailbox resources, if this function returns
13222 * MBX_TIMEOUT.
13223 * This function will sleep while waiting for mailbox completion.
13224 * So, this function should not be called from any context which
13225 * does not allow sleeping. Due to the same reason, this function
13226 * cannot be called with interrupt disabled.
13227 * This function assumes that the mailbox completion occurs while
13228 * this function sleep. So, this function cannot be called from
13229 * the worker thread which processes mailbox completion.
13230 * This function is called in the context of HBA management
13231 * applications.
13232 * This function returns MBX_SUCCESS when successful.
13233 * This function is called with no lock held.
13234 **/
13235 int
lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq,uint32_t timeout)13236 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13237 uint32_t timeout)
13238 {
13239 struct completion mbox_done;
13240 int retval;
13241 unsigned long flag;
13242
13243 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13244 /* setup wake call as IOCB callback */
13245 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13246
13247 /* setup context3 field to pass wait_queue pointer to wake function */
13248 init_completion(&mbox_done);
13249 pmboxq->context3 = &mbox_done;
13250 /* now issue the command */
13251 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13252 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13253 wait_for_completion_timeout(&mbox_done,
13254 msecs_to_jiffies(timeout * 1000));
13255
13256 spin_lock_irqsave(&phba->hbalock, flag);
13257 pmboxq->context3 = NULL;
13258 /*
13259 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13260 * else do not free the resources.
13261 */
13262 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13263 retval = MBX_SUCCESS;
13264 } else {
13265 retval = MBX_TIMEOUT;
13266 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13267 }
13268 spin_unlock_irqrestore(&phba->hbalock, flag);
13269 }
13270 return retval;
13271 }
13272
13273 /**
13274 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13275 * @phba: Pointer to HBA context.
13276 * @mbx_action: Mailbox shutdown options.
13277 *
13278 * This function is called to shutdown the driver's mailbox sub-system.
13279 * It first marks the mailbox sub-system is in a block state to prevent
13280 * the asynchronous mailbox command from issued off the pending mailbox
13281 * command queue. If the mailbox command sub-system shutdown is due to
13282 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13283 * the mailbox sub-system flush routine to forcefully bring down the
13284 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13285 * as with offline or HBA function reset), this routine will wait for the
13286 * outstanding mailbox command to complete before invoking the mailbox
13287 * sub-system flush routine to gracefully bring down mailbox sub-system.
13288 **/
13289 void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba * phba,int mbx_action)13290 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13291 {
13292 struct lpfc_sli *psli = &phba->sli;
13293 unsigned long timeout;
13294
13295 if (mbx_action == LPFC_MBX_NO_WAIT) {
13296 /* delay 100ms for port state */
13297 msleep(100);
13298 lpfc_sli_mbox_sys_flush(phba);
13299 return;
13300 }
13301 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13302
13303 /* Disable softirqs, including timers from obtaining phba->hbalock */
13304 local_bh_disable();
13305
13306 spin_lock_irq(&phba->hbalock);
13307 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13308
13309 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13310 /* Determine how long we might wait for the active mailbox
13311 * command to be gracefully completed by firmware.
13312 */
13313 if (phba->sli.mbox_active)
13314 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13315 phba->sli.mbox_active) *
13316 1000) + jiffies;
13317 spin_unlock_irq(&phba->hbalock);
13318
13319 /* Enable softirqs again, done with phba->hbalock */
13320 local_bh_enable();
13321
13322 while (phba->sli.mbox_active) {
13323 /* Check active mailbox complete status every 2ms */
13324 msleep(2);
13325 if (time_after(jiffies, timeout))
13326 /* Timeout, let the mailbox flush routine to
13327 * forcefully release active mailbox command
13328 */
13329 break;
13330 }
13331 } else {
13332 spin_unlock_irq(&phba->hbalock);
13333
13334 /* Enable softirqs again, done with phba->hbalock */
13335 local_bh_enable();
13336 }
13337
13338 lpfc_sli_mbox_sys_flush(phba);
13339 }
13340
13341 /**
13342 * lpfc_sli_eratt_read - read sli-3 error attention events
13343 * @phba: Pointer to HBA context.
13344 *
13345 * This function is called to read the SLI3 device error attention registers
13346 * for possible error attention events. The caller must hold the hostlock
13347 * with spin_lock_irq().
13348 *
13349 * This function returns 1 when there is Error Attention in the Host Attention
13350 * Register and returns 0 otherwise.
13351 **/
13352 static int
lpfc_sli_eratt_read(struct lpfc_hba * phba)13353 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13354 {
13355 uint32_t ha_copy;
13356
13357 /* Read chip Host Attention (HA) register */
13358 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13359 goto unplug_err;
13360
13361 if (ha_copy & HA_ERATT) {
13362 /* Read host status register to retrieve error event */
13363 if (lpfc_sli_read_hs(phba))
13364 goto unplug_err;
13365
13366 /* Check if there is a deferred error condition is active */
13367 if ((HS_FFER1 & phba->work_hs) &&
13368 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13369 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13370 phba->hba_flag |= DEFER_ERATT;
13371 /* Clear all interrupt enable conditions */
13372 writel(0, phba->HCregaddr);
13373 readl(phba->HCregaddr);
13374 }
13375
13376 /* Set the driver HA work bitmap */
13377 phba->work_ha |= HA_ERATT;
13378 /* Indicate polling handles this ERATT */
13379 phba->hba_flag |= HBA_ERATT_HANDLED;
13380 return 1;
13381 }
13382 return 0;
13383
13384 unplug_err:
13385 /* Set the driver HS work bitmap */
13386 phba->work_hs |= UNPLUG_ERR;
13387 /* Set the driver HA work bitmap */
13388 phba->work_ha |= HA_ERATT;
13389 /* Indicate polling handles this ERATT */
13390 phba->hba_flag |= HBA_ERATT_HANDLED;
13391 return 1;
13392 }
13393
13394 /**
13395 * lpfc_sli4_eratt_read - read sli-4 error attention events
13396 * @phba: Pointer to HBA context.
13397 *
13398 * This function is called to read the SLI4 device error attention registers
13399 * for possible error attention events. The caller must hold the hostlock
13400 * with spin_lock_irq().
13401 *
13402 * This function returns 1 when there is Error Attention in the Host Attention
13403 * Register and returns 0 otherwise.
13404 **/
13405 static int
lpfc_sli4_eratt_read(struct lpfc_hba * phba)13406 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13407 {
13408 uint32_t uerr_sta_hi, uerr_sta_lo;
13409 uint32_t if_type, portsmphr;
13410 struct lpfc_register portstat_reg;
13411 u32 logmask;
13412
13413 /*
13414 * For now, use the SLI4 device internal unrecoverable error
13415 * registers for error attention. This can be changed later.
13416 */
13417 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13418 switch (if_type) {
13419 case LPFC_SLI_INTF_IF_TYPE_0:
13420 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13421 &uerr_sta_lo) ||
13422 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13423 &uerr_sta_hi)) {
13424 phba->work_hs |= UNPLUG_ERR;
13425 phba->work_ha |= HA_ERATT;
13426 phba->hba_flag |= HBA_ERATT_HANDLED;
13427 return 1;
13428 }
13429 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13430 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13432 "1423 HBA Unrecoverable error: "
13433 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13434 "ue_mask_lo_reg=0x%x, "
13435 "ue_mask_hi_reg=0x%x\n",
13436 uerr_sta_lo, uerr_sta_hi,
13437 phba->sli4_hba.ue_mask_lo,
13438 phba->sli4_hba.ue_mask_hi);
13439 phba->work_status[0] = uerr_sta_lo;
13440 phba->work_status[1] = uerr_sta_hi;
13441 phba->work_ha |= HA_ERATT;
13442 phba->hba_flag |= HBA_ERATT_HANDLED;
13443 return 1;
13444 }
13445 break;
13446 case LPFC_SLI_INTF_IF_TYPE_2:
13447 case LPFC_SLI_INTF_IF_TYPE_6:
13448 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13449 &portstat_reg.word0) ||
13450 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13451 &portsmphr)){
13452 phba->work_hs |= UNPLUG_ERR;
13453 phba->work_ha |= HA_ERATT;
13454 phba->hba_flag |= HBA_ERATT_HANDLED;
13455 return 1;
13456 }
13457 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13458 phba->work_status[0] =
13459 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13460 phba->work_status[1] =
13461 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13462 logmask = LOG_TRACE_EVENT;
13463 if (phba->work_status[0] ==
13464 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13465 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13466 logmask = LOG_SLI;
13467 lpfc_printf_log(phba, KERN_ERR, logmask,
13468 "2885 Port Status Event: "
13469 "port status reg 0x%x, "
13470 "port smphr reg 0x%x, "
13471 "error 1=0x%x, error 2=0x%x\n",
13472 portstat_reg.word0,
13473 portsmphr,
13474 phba->work_status[0],
13475 phba->work_status[1]);
13476 phba->work_ha |= HA_ERATT;
13477 phba->hba_flag |= HBA_ERATT_HANDLED;
13478 return 1;
13479 }
13480 break;
13481 case LPFC_SLI_INTF_IF_TYPE_1:
13482 default:
13483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13484 "2886 HBA Error Attention on unsupported "
13485 "if type %d.", if_type);
13486 return 1;
13487 }
13488
13489 return 0;
13490 }
13491
13492 /**
13493 * lpfc_sli_check_eratt - check error attention events
13494 * @phba: Pointer to HBA context.
13495 *
13496 * This function is called from timer soft interrupt context to check HBA's
13497 * error attention register bit for error attention events.
13498 *
13499 * This function returns 1 when there is Error Attention in the Host Attention
13500 * Register and returns 0 otherwise.
13501 **/
13502 int
lpfc_sli_check_eratt(struct lpfc_hba * phba)13503 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13504 {
13505 uint32_t ha_copy;
13506
13507 /* If somebody is waiting to handle an eratt, don't process it
13508 * here. The brdkill function will do this.
13509 */
13510 if (phba->link_flag & LS_IGNORE_ERATT)
13511 return 0;
13512
13513 /* Check if interrupt handler handles this ERATT */
13514 spin_lock_irq(&phba->hbalock);
13515 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13516 /* Interrupt handler has handled ERATT */
13517 spin_unlock_irq(&phba->hbalock);
13518 return 0;
13519 }
13520
13521 /*
13522 * If there is deferred error attention, do not check for error
13523 * attention
13524 */
13525 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13526 spin_unlock_irq(&phba->hbalock);
13527 return 0;
13528 }
13529
13530 /* If PCI channel is offline, don't process it */
13531 if (unlikely(pci_channel_offline(phba->pcidev))) {
13532 spin_unlock_irq(&phba->hbalock);
13533 return 0;
13534 }
13535
13536 switch (phba->sli_rev) {
13537 case LPFC_SLI_REV2:
13538 case LPFC_SLI_REV3:
13539 /* Read chip Host Attention (HA) register */
13540 ha_copy = lpfc_sli_eratt_read(phba);
13541 break;
13542 case LPFC_SLI_REV4:
13543 /* Read device Uncoverable Error (UERR) registers */
13544 ha_copy = lpfc_sli4_eratt_read(phba);
13545 break;
13546 default:
13547 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13548 "0299 Invalid SLI revision (%d)\n",
13549 phba->sli_rev);
13550 ha_copy = 0;
13551 break;
13552 }
13553 spin_unlock_irq(&phba->hbalock);
13554
13555 return ha_copy;
13556 }
13557
13558 /**
13559 * lpfc_intr_state_check - Check device state for interrupt handling
13560 * @phba: Pointer to HBA context.
13561 *
13562 * This inline routine checks whether a device or its PCI slot is in a state
13563 * that the interrupt should be handled.
13564 *
13565 * This function returns 0 if the device or the PCI slot is in a state that
13566 * interrupt should be handled, otherwise -EIO.
13567 */
13568 static inline int
lpfc_intr_state_check(struct lpfc_hba * phba)13569 lpfc_intr_state_check(struct lpfc_hba *phba)
13570 {
13571 /* If the pci channel is offline, ignore all the interrupts */
13572 if (unlikely(pci_channel_offline(phba->pcidev)))
13573 return -EIO;
13574
13575 /* Update device level interrupt statistics */
13576 phba->sli.slistat.sli_intr++;
13577
13578 /* Ignore all interrupts during initialization. */
13579 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13580 return -EIO;
13581
13582 return 0;
13583 }
13584
13585 /**
13586 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13587 * @irq: Interrupt number.
13588 * @dev_id: The device context pointer.
13589 *
13590 * This function is directly called from the PCI layer as an interrupt
13591 * service routine when device with SLI-3 interface spec is enabled with
13592 * MSI-X multi-message interrupt mode and there are slow-path events in
13593 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13594 * interrupt mode, this function is called as part of the device-level
13595 * interrupt handler. When the PCI slot is in error recovery or the HBA
13596 * is undergoing initialization, the interrupt handler will not process
13597 * the interrupt. The link attention and ELS ring attention events are
13598 * handled by the worker thread. The interrupt handler signals the worker
13599 * thread and returns for these events. This function is called without
13600 * any lock held. It gets the hbalock to access and update SLI data
13601 * structures.
13602 *
13603 * This function returns IRQ_HANDLED when interrupt is handled else it
13604 * returns IRQ_NONE.
13605 **/
13606 irqreturn_t
lpfc_sli_sp_intr_handler(int irq,void * dev_id)13607 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13608 {
13609 struct lpfc_hba *phba;
13610 uint32_t ha_copy, hc_copy;
13611 uint32_t work_ha_copy;
13612 unsigned long status;
13613 unsigned long iflag;
13614 uint32_t control;
13615
13616 MAILBOX_t *mbox, *pmbox;
13617 struct lpfc_vport *vport;
13618 struct lpfc_nodelist *ndlp;
13619 struct lpfc_dmabuf *mp;
13620 LPFC_MBOXQ_t *pmb;
13621 int rc;
13622
13623 /*
13624 * Get the driver's phba structure from the dev_id and
13625 * assume the HBA is not interrupting.
13626 */
13627 phba = (struct lpfc_hba *)dev_id;
13628
13629 if (unlikely(!phba))
13630 return IRQ_NONE;
13631
13632 /*
13633 * Stuff needs to be attented to when this function is invoked as an
13634 * individual interrupt handler in MSI-X multi-message interrupt mode
13635 */
13636 if (phba->intr_type == MSIX) {
13637 /* Check device state for handling interrupt */
13638 if (lpfc_intr_state_check(phba))
13639 return IRQ_NONE;
13640 /* Need to read HA REG for slow-path events */
13641 spin_lock_irqsave(&phba->hbalock, iflag);
13642 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13643 goto unplug_error;
13644 /* If somebody is waiting to handle an eratt don't process it
13645 * here. The brdkill function will do this.
13646 */
13647 if (phba->link_flag & LS_IGNORE_ERATT)
13648 ha_copy &= ~HA_ERATT;
13649 /* Check the need for handling ERATT in interrupt handler */
13650 if (ha_copy & HA_ERATT) {
13651 if (phba->hba_flag & HBA_ERATT_HANDLED)
13652 /* ERATT polling has handled ERATT */
13653 ha_copy &= ~HA_ERATT;
13654 else
13655 /* Indicate interrupt handler handles ERATT */
13656 phba->hba_flag |= HBA_ERATT_HANDLED;
13657 }
13658
13659 /*
13660 * If there is deferred error attention, do not check for any
13661 * interrupt.
13662 */
13663 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13664 spin_unlock_irqrestore(&phba->hbalock, iflag);
13665 return IRQ_NONE;
13666 }
13667
13668 /* Clear up only attention source related to slow-path */
13669 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13670 goto unplug_error;
13671
13672 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13673 HC_LAINT_ENA | HC_ERINT_ENA),
13674 phba->HCregaddr);
13675 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13676 phba->HAregaddr);
13677 writel(hc_copy, phba->HCregaddr);
13678 readl(phba->HAregaddr); /* flush */
13679 spin_unlock_irqrestore(&phba->hbalock, iflag);
13680 } else
13681 ha_copy = phba->ha_copy;
13682
13683 work_ha_copy = ha_copy & phba->work_ha_mask;
13684
13685 if (work_ha_copy) {
13686 if (work_ha_copy & HA_LATT) {
13687 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13688 /*
13689 * Turn off Link Attention interrupts
13690 * until CLEAR_LA done
13691 */
13692 spin_lock_irqsave(&phba->hbalock, iflag);
13693 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13694 if (lpfc_readl(phba->HCregaddr, &control))
13695 goto unplug_error;
13696 control &= ~HC_LAINT_ENA;
13697 writel(control, phba->HCregaddr);
13698 readl(phba->HCregaddr); /* flush */
13699 spin_unlock_irqrestore(&phba->hbalock, iflag);
13700 }
13701 else
13702 work_ha_copy &= ~HA_LATT;
13703 }
13704
13705 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13706 /*
13707 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13708 * the only slow ring.
13709 */
13710 status = (work_ha_copy &
13711 (HA_RXMASK << (4*LPFC_ELS_RING)));
13712 status >>= (4*LPFC_ELS_RING);
13713 if (status & HA_RXMASK) {
13714 spin_lock_irqsave(&phba->hbalock, iflag);
13715 if (lpfc_readl(phba->HCregaddr, &control))
13716 goto unplug_error;
13717
13718 lpfc_debugfs_slow_ring_trc(phba,
13719 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13720 control, status,
13721 (uint32_t)phba->sli.slistat.sli_intr);
13722
13723 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13724 lpfc_debugfs_slow_ring_trc(phba,
13725 "ISR Disable ring:"
13726 "pwork:x%x hawork:x%x wait:x%x",
13727 phba->work_ha, work_ha_copy,
13728 (uint32_t)((unsigned long)
13729 &phba->work_waitq));
13730
13731 control &=
13732 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13733 writel(control, phba->HCregaddr);
13734 readl(phba->HCregaddr); /* flush */
13735 }
13736 else {
13737 lpfc_debugfs_slow_ring_trc(phba,
13738 "ISR slow ring: pwork:"
13739 "x%x hawork:x%x wait:x%x",
13740 phba->work_ha, work_ha_copy,
13741 (uint32_t)((unsigned long)
13742 &phba->work_waitq));
13743 }
13744 spin_unlock_irqrestore(&phba->hbalock, iflag);
13745 }
13746 }
13747 spin_lock_irqsave(&phba->hbalock, iflag);
13748 if (work_ha_copy & HA_ERATT) {
13749 if (lpfc_sli_read_hs(phba))
13750 goto unplug_error;
13751 /*
13752 * Check if there is a deferred error condition
13753 * is active
13754 */
13755 if ((HS_FFER1 & phba->work_hs) &&
13756 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13757 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13758 phba->work_hs)) {
13759 phba->hba_flag |= DEFER_ERATT;
13760 /* Clear all interrupt enable conditions */
13761 writel(0, phba->HCregaddr);
13762 readl(phba->HCregaddr);
13763 }
13764 }
13765
13766 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13767 pmb = phba->sli.mbox_active;
13768 pmbox = &pmb->u.mb;
13769 mbox = phba->mbox;
13770 vport = pmb->vport;
13771
13772 /* First check out the status word */
13773 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13774 if (pmbox->mbxOwner != OWN_HOST) {
13775 spin_unlock_irqrestore(&phba->hbalock, iflag);
13776 /*
13777 * Stray Mailbox Interrupt, mbxCommand <cmd>
13778 * mbxStatus <status>
13779 */
13780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13781 "(%d):0304 Stray Mailbox "
13782 "Interrupt mbxCommand x%x "
13783 "mbxStatus x%x\n",
13784 (vport ? vport->vpi : 0),
13785 pmbox->mbxCommand,
13786 pmbox->mbxStatus);
13787 /* clear mailbox attention bit */
13788 work_ha_copy &= ~HA_MBATT;
13789 } else {
13790 phba->sli.mbox_active = NULL;
13791 spin_unlock_irqrestore(&phba->hbalock, iflag);
13792 phba->last_completion_time = jiffies;
13793 del_timer(&phba->sli.mbox_tmo);
13794 if (pmb->mbox_cmpl) {
13795 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13796 MAILBOX_CMD_SIZE);
13797 if (pmb->out_ext_byte_len &&
13798 pmb->ctx_buf)
13799 lpfc_sli_pcimem_bcopy(
13800 phba->mbox_ext,
13801 pmb->ctx_buf,
13802 pmb->out_ext_byte_len);
13803 }
13804 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13805 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13806
13807 lpfc_debugfs_disc_trc(vport,
13808 LPFC_DISC_TRC_MBOX_VPORT,
13809 "MBOX dflt rpi: : "
13810 "status:x%x rpi:x%x",
13811 (uint32_t)pmbox->mbxStatus,
13812 pmbox->un.varWords[0], 0);
13813
13814 if (!pmbox->mbxStatus) {
13815 mp = (struct lpfc_dmabuf *)
13816 (pmb->ctx_buf);
13817 ndlp = (struct lpfc_nodelist *)
13818 pmb->ctx_ndlp;
13819
13820 /* Reg_LOGIN of dflt RPI was
13821 * successful. new lets get
13822 * rid of the RPI using the
13823 * same mbox buffer.
13824 */
13825 lpfc_unreg_login(phba,
13826 vport->vpi,
13827 pmbox->un.varWords[0],
13828 pmb);
13829 pmb->mbox_cmpl =
13830 lpfc_mbx_cmpl_dflt_rpi;
13831 pmb->ctx_buf = mp;
13832 pmb->ctx_ndlp = ndlp;
13833 pmb->vport = vport;
13834 rc = lpfc_sli_issue_mbox(phba,
13835 pmb,
13836 MBX_NOWAIT);
13837 if (rc != MBX_BUSY)
13838 lpfc_printf_log(phba,
13839 KERN_ERR,
13840 LOG_TRACE_EVENT,
13841 "0350 rc should have"
13842 "been MBX_BUSY\n");
13843 if (rc != MBX_NOT_FINISHED)
13844 goto send_current_mbox;
13845 }
13846 }
13847 spin_lock_irqsave(
13848 &phba->pport->work_port_lock,
13849 iflag);
13850 phba->pport->work_port_events &=
13851 ~WORKER_MBOX_TMO;
13852 spin_unlock_irqrestore(
13853 &phba->pport->work_port_lock,
13854 iflag);
13855
13856 /* Do NOT queue MBX_HEARTBEAT to the worker
13857 * thread for processing.
13858 */
13859 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13860 /* Process mbox now */
13861 phba->sli.mbox_active = NULL;
13862 phba->sli.sli_flag &=
13863 ~LPFC_SLI_MBOX_ACTIVE;
13864 if (pmb->mbox_cmpl)
13865 pmb->mbox_cmpl(phba, pmb);
13866 } else {
13867 /* Queue to worker thread to process */
13868 lpfc_mbox_cmpl_put(phba, pmb);
13869 }
13870 }
13871 } else
13872 spin_unlock_irqrestore(&phba->hbalock, iflag);
13873
13874 if ((work_ha_copy & HA_MBATT) &&
13875 (phba->sli.mbox_active == NULL)) {
13876 send_current_mbox:
13877 /* Process next mailbox command if there is one */
13878 do {
13879 rc = lpfc_sli_issue_mbox(phba, NULL,
13880 MBX_NOWAIT);
13881 } while (rc == MBX_NOT_FINISHED);
13882 if (rc != MBX_SUCCESS)
13883 lpfc_printf_log(phba, KERN_ERR,
13884 LOG_TRACE_EVENT,
13885 "0349 rc should be "
13886 "MBX_SUCCESS\n");
13887 }
13888
13889 spin_lock_irqsave(&phba->hbalock, iflag);
13890 phba->work_ha |= work_ha_copy;
13891 spin_unlock_irqrestore(&phba->hbalock, iflag);
13892 lpfc_worker_wake_up(phba);
13893 }
13894 return IRQ_HANDLED;
13895 unplug_error:
13896 spin_unlock_irqrestore(&phba->hbalock, iflag);
13897 return IRQ_HANDLED;
13898
13899 } /* lpfc_sli_sp_intr_handler */
13900
13901 /**
13902 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13903 * @irq: Interrupt number.
13904 * @dev_id: The device context pointer.
13905 *
13906 * This function is directly called from the PCI layer as an interrupt
13907 * service routine when device with SLI-3 interface spec is enabled with
13908 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13909 * ring event in the HBA. However, when the device is enabled with either
13910 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13911 * device-level interrupt handler. When the PCI slot is in error recovery
13912 * or the HBA is undergoing initialization, the interrupt handler will not
13913 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13914 * the intrrupt context. This function is called without any lock held.
13915 * It gets the hbalock to access and update SLI data structures.
13916 *
13917 * This function returns IRQ_HANDLED when interrupt is handled else it
13918 * returns IRQ_NONE.
13919 **/
13920 irqreturn_t
lpfc_sli_fp_intr_handler(int irq,void * dev_id)13921 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13922 {
13923 struct lpfc_hba *phba;
13924 uint32_t ha_copy;
13925 unsigned long status;
13926 unsigned long iflag;
13927 struct lpfc_sli_ring *pring;
13928
13929 /* Get the driver's phba structure from the dev_id and
13930 * assume the HBA is not interrupting.
13931 */
13932 phba = (struct lpfc_hba *) dev_id;
13933
13934 if (unlikely(!phba))
13935 return IRQ_NONE;
13936
13937 /*
13938 * Stuff needs to be attented to when this function is invoked as an
13939 * individual interrupt handler in MSI-X multi-message interrupt mode
13940 */
13941 if (phba->intr_type == MSIX) {
13942 /* Check device state for handling interrupt */
13943 if (lpfc_intr_state_check(phba))
13944 return IRQ_NONE;
13945 /* Need to read HA REG for FCP ring and other ring events */
13946 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13947 return IRQ_HANDLED;
13948 /* Clear up only attention source related to fast-path */
13949 spin_lock_irqsave(&phba->hbalock, iflag);
13950 /*
13951 * If there is deferred error attention, do not check for
13952 * any interrupt.
13953 */
13954 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13955 spin_unlock_irqrestore(&phba->hbalock, iflag);
13956 return IRQ_NONE;
13957 }
13958 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13959 phba->HAregaddr);
13960 readl(phba->HAregaddr); /* flush */
13961 spin_unlock_irqrestore(&phba->hbalock, iflag);
13962 } else
13963 ha_copy = phba->ha_copy;
13964
13965 /*
13966 * Process all events on FCP ring. Take the optimized path for FCP IO.
13967 */
13968 ha_copy &= ~(phba->work_ha_mask);
13969
13970 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13971 status >>= (4*LPFC_FCP_RING);
13972 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13973 if (status & HA_RXMASK)
13974 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13975
13976 if (phba->cfg_multi_ring_support == 2) {
13977 /*
13978 * Process all events on extra ring. Take the optimized path
13979 * for extra ring IO.
13980 */
13981 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13982 status >>= (4*LPFC_EXTRA_RING);
13983 if (status & HA_RXMASK) {
13984 lpfc_sli_handle_fast_ring_event(phba,
13985 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13986 status);
13987 }
13988 }
13989 return IRQ_HANDLED;
13990 } /* lpfc_sli_fp_intr_handler */
13991
13992 /**
13993 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13994 * @irq: Interrupt number.
13995 * @dev_id: The device context pointer.
13996 *
13997 * This function is the HBA device-level interrupt handler to device with
13998 * SLI-3 interface spec, called from the PCI layer when either MSI or
13999 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14000 * requires driver attention. This function invokes the slow-path interrupt
14001 * attention handling function and fast-path interrupt attention handling
14002 * function in turn to process the relevant HBA attention events. This
14003 * function is called without any lock held. It gets the hbalock to access
14004 * and update SLI data structures.
14005 *
14006 * This function returns IRQ_HANDLED when interrupt is handled, else it
14007 * returns IRQ_NONE.
14008 **/
14009 irqreturn_t
lpfc_sli_intr_handler(int irq,void * dev_id)14010 lpfc_sli_intr_handler(int irq, void *dev_id)
14011 {
14012 struct lpfc_hba *phba;
14013 irqreturn_t sp_irq_rc, fp_irq_rc;
14014 unsigned long status1, status2;
14015 uint32_t hc_copy;
14016
14017 /*
14018 * Get the driver's phba structure from the dev_id and
14019 * assume the HBA is not interrupting.
14020 */
14021 phba = (struct lpfc_hba *) dev_id;
14022
14023 if (unlikely(!phba))
14024 return IRQ_NONE;
14025
14026 /* Check device state for handling interrupt */
14027 if (lpfc_intr_state_check(phba))
14028 return IRQ_NONE;
14029
14030 spin_lock(&phba->hbalock);
14031 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14032 spin_unlock(&phba->hbalock);
14033 return IRQ_HANDLED;
14034 }
14035
14036 if (unlikely(!phba->ha_copy)) {
14037 spin_unlock(&phba->hbalock);
14038 return IRQ_NONE;
14039 } else if (phba->ha_copy & HA_ERATT) {
14040 if (phba->hba_flag & HBA_ERATT_HANDLED)
14041 /* ERATT polling has handled ERATT */
14042 phba->ha_copy &= ~HA_ERATT;
14043 else
14044 /* Indicate interrupt handler handles ERATT */
14045 phba->hba_flag |= HBA_ERATT_HANDLED;
14046 }
14047
14048 /*
14049 * If there is deferred error attention, do not check for any interrupt.
14050 */
14051 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14052 spin_unlock(&phba->hbalock);
14053 return IRQ_NONE;
14054 }
14055
14056 /* Clear attention sources except link and error attentions */
14057 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14058 spin_unlock(&phba->hbalock);
14059 return IRQ_HANDLED;
14060 }
14061 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14062 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14063 phba->HCregaddr);
14064 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14065 writel(hc_copy, phba->HCregaddr);
14066 readl(phba->HAregaddr); /* flush */
14067 spin_unlock(&phba->hbalock);
14068
14069 /*
14070 * Invokes slow-path host attention interrupt handling as appropriate.
14071 */
14072
14073 /* status of events with mailbox and link attention */
14074 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14075
14076 /* status of events with ELS ring */
14077 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
14078 status2 >>= (4*LPFC_ELS_RING);
14079
14080 if (status1 || (status2 & HA_RXMASK))
14081 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14082 else
14083 sp_irq_rc = IRQ_NONE;
14084
14085 /*
14086 * Invoke fast-path host attention interrupt handling as appropriate.
14087 */
14088
14089 /* status of events with FCP ring */
14090 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14091 status1 >>= (4*LPFC_FCP_RING);
14092
14093 /* status of events with extra ring */
14094 if (phba->cfg_multi_ring_support == 2) {
14095 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14096 status2 >>= (4*LPFC_EXTRA_RING);
14097 } else
14098 status2 = 0;
14099
14100 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14101 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14102 else
14103 fp_irq_rc = IRQ_NONE;
14104
14105 /* Return device-level interrupt handling status */
14106 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14107 } /* lpfc_sli_intr_handler */
14108
14109 /**
14110 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14111 * @phba: pointer to lpfc hba data structure.
14112 *
14113 * This routine is invoked by the worker thread to process all the pending
14114 * SLI4 els abort xri events.
14115 **/
lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba * phba)14116 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14117 {
14118 struct lpfc_cq_event *cq_event;
14119 unsigned long iflags;
14120
14121 /* First, declare the els xri abort event has been handled */
14122 spin_lock_irqsave(&phba->hbalock, iflags);
14123 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14124 spin_unlock_irqrestore(&phba->hbalock, iflags);
14125
14126 /* Now, handle all the els xri abort events */
14127 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14128 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14129 /* Get the first event from the head of the event queue */
14130 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14131 cq_event, struct lpfc_cq_event, list);
14132 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14133 iflags);
14134 /* Notify aborted XRI for ELS work queue */
14135 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14136
14137 /* Free the event processed back to the free pool */
14138 lpfc_sli4_cq_event_release(phba, cq_event);
14139 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14140 iflags);
14141 }
14142 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14143 }
14144
14145 /**
14146 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14147 * @phba: Pointer to HBA context object.
14148 * @irspiocbq: Pointer to work-queue completion queue entry.
14149 *
14150 * This routine handles an ELS work-queue completion event and construct
14151 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14152 * discovery engine to handle.
14153 *
14154 * Return: Pointer to the receive IOCBQ, NULL otherwise.
14155 **/
14156 static struct lpfc_iocbq *
lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba * phba,struct lpfc_iocbq * irspiocbq)14157 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14158 struct lpfc_iocbq *irspiocbq)
14159 {
14160 struct lpfc_sli_ring *pring;
14161 struct lpfc_iocbq *cmdiocbq;
14162 struct lpfc_wcqe_complete *wcqe;
14163 unsigned long iflags;
14164
14165 pring = lpfc_phba_elsring(phba);
14166 if (unlikely(!pring))
14167 return NULL;
14168
14169 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14170 spin_lock_irqsave(&pring->ring_lock, iflags);
14171 pring->stats.iocb_event++;
14172 /* Look up the ELS command IOCB and create pseudo response IOCB */
14173 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14174 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14175 if (unlikely(!cmdiocbq)) {
14176 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14177 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14178 "0386 ELS complete with no corresponding "
14179 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14180 wcqe->word0, wcqe->total_data_placed,
14181 wcqe->parameter, wcqe->word3);
14182 lpfc_sli_release_iocbq(phba, irspiocbq);
14183 return NULL;
14184 }
14185
14186 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14187 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14188
14189 /* Put the iocb back on the txcmplq */
14190 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14191 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14192
14193 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14194 spin_lock_irqsave(&phba->hbalock, iflags);
14195 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14196 spin_unlock_irqrestore(&phba->hbalock, iflags);
14197 }
14198
14199 return irspiocbq;
14200 }
14201
14202 inline struct lpfc_cq_event *
lpfc_cq_event_setup(struct lpfc_hba * phba,void * entry,int size)14203 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14204 {
14205 struct lpfc_cq_event *cq_event;
14206
14207 /* Allocate a new internal CQ_EVENT entry */
14208 cq_event = lpfc_sli4_cq_event_alloc(phba);
14209 if (!cq_event) {
14210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14211 "0602 Failed to alloc CQ_EVENT entry\n");
14212 return NULL;
14213 }
14214
14215 /* Move the CQE into the event */
14216 memcpy(&cq_event->cqe, entry, size);
14217 return cq_event;
14218 }
14219
14220 /**
14221 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14222 * @phba: Pointer to HBA context object.
14223 * @mcqe: Pointer to mailbox completion queue entry.
14224 *
14225 * This routine process a mailbox completion queue entry with asynchronous
14226 * event.
14227 *
14228 * Return: true if work posted to worker thread, otherwise false.
14229 **/
14230 static bool
lpfc_sli4_sp_handle_async_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)14231 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14232 {
14233 struct lpfc_cq_event *cq_event;
14234 unsigned long iflags;
14235
14236 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14237 "0392 Async Event: word0:x%x, word1:x%x, "
14238 "word2:x%x, word3:x%x\n", mcqe->word0,
14239 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14240
14241 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14242 if (!cq_event)
14243 return false;
14244
14245 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14246 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14247 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14248
14249 /* Set the async event flag */
14250 spin_lock_irqsave(&phba->hbalock, iflags);
14251 phba->hba_flag |= ASYNC_EVENT;
14252 spin_unlock_irqrestore(&phba->hbalock, iflags);
14253
14254 return true;
14255 }
14256
14257 /**
14258 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14259 * @phba: Pointer to HBA context object.
14260 * @mcqe: Pointer to mailbox completion queue entry.
14261 *
14262 * This routine process a mailbox completion queue entry with mailbox
14263 * completion event.
14264 *
14265 * Return: true if work posted to worker thread, otherwise false.
14266 **/
14267 static bool
lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)14268 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14269 {
14270 uint32_t mcqe_status;
14271 MAILBOX_t *mbox, *pmbox;
14272 struct lpfc_mqe *mqe;
14273 struct lpfc_vport *vport;
14274 struct lpfc_nodelist *ndlp;
14275 struct lpfc_dmabuf *mp;
14276 unsigned long iflags;
14277 LPFC_MBOXQ_t *pmb;
14278 bool workposted = false;
14279 int rc;
14280
14281 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14282 if (!bf_get(lpfc_trailer_completed, mcqe))
14283 goto out_no_mqe_complete;
14284
14285 /* Get the reference to the active mbox command */
14286 spin_lock_irqsave(&phba->hbalock, iflags);
14287 pmb = phba->sli.mbox_active;
14288 if (unlikely(!pmb)) {
14289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14290 "1832 No pending MBOX command to handle\n");
14291 spin_unlock_irqrestore(&phba->hbalock, iflags);
14292 goto out_no_mqe_complete;
14293 }
14294 spin_unlock_irqrestore(&phba->hbalock, iflags);
14295 mqe = &pmb->u.mqe;
14296 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14297 mbox = phba->mbox;
14298 vport = pmb->vport;
14299
14300 /* Reset heartbeat timer */
14301 phba->last_completion_time = jiffies;
14302 del_timer(&phba->sli.mbox_tmo);
14303
14304 /* Move mbox data to caller's mailbox region, do endian swapping */
14305 if (pmb->mbox_cmpl && mbox)
14306 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14307
14308 /*
14309 * For mcqe errors, conditionally move a modified error code to
14310 * the mbox so that the error will not be missed.
14311 */
14312 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14313 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14314 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14315 bf_set(lpfc_mqe_status, mqe,
14316 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14317 }
14318 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14319 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14320 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14321 "MBOX dflt rpi: status:x%x rpi:x%x",
14322 mcqe_status,
14323 pmbox->un.varWords[0], 0);
14324 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14325 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14326 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14327
14328 /* Reg_LOGIN of dflt RPI was successful. Mark the
14329 * node as having an UNREG_LOGIN in progress to stop
14330 * an unsolicited PLOGI from the same NPortId from
14331 * starting another mailbox transaction.
14332 */
14333 spin_lock_irqsave(&ndlp->lock, iflags);
14334 ndlp->nlp_flag |= NLP_UNREG_INP;
14335 spin_unlock_irqrestore(&ndlp->lock, iflags);
14336 lpfc_unreg_login(phba, vport->vpi,
14337 pmbox->un.varWords[0], pmb);
14338 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14339 pmb->ctx_buf = mp;
14340
14341 /* No reference taken here. This is a default
14342 * RPI reg/immediate unreg cycle. The reference was
14343 * taken in the reg rpi path and is released when
14344 * this mailbox completes.
14345 */
14346 pmb->ctx_ndlp = ndlp;
14347 pmb->vport = vport;
14348 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14349 if (rc != MBX_BUSY)
14350 lpfc_printf_log(phba, KERN_ERR,
14351 LOG_TRACE_EVENT,
14352 "0385 rc should "
14353 "have been MBX_BUSY\n");
14354 if (rc != MBX_NOT_FINISHED)
14355 goto send_current_mbox;
14356 }
14357 }
14358 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14359 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14360 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14361
14362 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14363 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14364 spin_lock_irqsave(&phba->hbalock, iflags);
14365 /* Release the mailbox command posting token */
14366 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14367 phba->sli.mbox_active = NULL;
14368 if (bf_get(lpfc_trailer_consumed, mcqe))
14369 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14370 spin_unlock_irqrestore(&phba->hbalock, iflags);
14371
14372 /* Post the next mbox command, if there is one */
14373 lpfc_sli4_post_async_mbox(phba);
14374
14375 /* Process cmpl now */
14376 if (pmb->mbox_cmpl)
14377 pmb->mbox_cmpl(phba, pmb);
14378 return false;
14379 }
14380
14381 /* There is mailbox completion work to queue to the worker thread */
14382 spin_lock_irqsave(&phba->hbalock, iflags);
14383 __lpfc_mbox_cmpl_put(phba, pmb);
14384 phba->work_ha |= HA_MBATT;
14385 spin_unlock_irqrestore(&phba->hbalock, iflags);
14386 workposted = true;
14387
14388 send_current_mbox:
14389 spin_lock_irqsave(&phba->hbalock, iflags);
14390 /* Release the mailbox command posting token */
14391 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14392 /* Setting active mailbox pointer need to be in sync to flag clear */
14393 phba->sli.mbox_active = NULL;
14394 if (bf_get(lpfc_trailer_consumed, mcqe))
14395 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14396 spin_unlock_irqrestore(&phba->hbalock, iflags);
14397 /* Wake up worker thread to post the next pending mailbox command */
14398 lpfc_worker_wake_up(phba);
14399 return workposted;
14400
14401 out_no_mqe_complete:
14402 spin_lock_irqsave(&phba->hbalock, iflags);
14403 if (bf_get(lpfc_trailer_consumed, mcqe))
14404 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14405 spin_unlock_irqrestore(&phba->hbalock, iflags);
14406 return false;
14407 }
14408
14409 /**
14410 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14411 * @phba: Pointer to HBA context object.
14412 * @cq: Pointer to associated CQ
14413 * @cqe: Pointer to mailbox completion queue entry.
14414 *
14415 * This routine process a mailbox completion queue entry, it invokes the
14416 * proper mailbox complete handling or asynchronous event handling routine
14417 * according to the MCQE's async bit.
14418 *
14419 * Return: true if work posted to worker thread, otherwise false.
14420 **/
14421 static bool
lpfc_sli4_sp_handle_mcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14422 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14423 struct lpfc_cqe *cqe)
14424 {
14425 struct lpfc_mcqe mcqe;
14426 bool workposted;
14427
14428 cq->CQ_mbox++;
14429
14430 /* Copy the mailbox MCQE and convert endian order as needed */
14431 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14432
14433 /* Invoke the proper event handling routine */
14434 if (!bf_get(lpfc_trailer_async, &mcqe))
14435 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14436 else
14437 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14438 return workposted;
14439 }
14440
14441 /**
14442 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14443 * @phba: Pointer to HBA context object.
14444 * @cq: Pointer to associated CQ
14445 * @wcqe: Pointer to work-queue completion queue entry.
14446 *
14447 * This routine handles an ELS work-queue completion event.
14448 *
14449 * Return: true if work posted to worker thread, otherwise false.
14450 **/
14451 static bool
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)14452 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14453 struct lpfc_wcqe_complete *wcqe)
14454 {
14455 struct lpfc_iocbq *irspiocbq;
14456 unsigned long iflags;
14457 struct lpfc_sli_ring *pring = cq->pring;
14458 int txq_cnt = 0;
14459 int txcmplq_cnt = 0;
14460
14461 /* Check for response status */
14462 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14463 /* Log the error status */
14464 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14465 "0357 ELS CQE error: status=x%x: "
14466 "CQE: %08x %08x %08x %08x\n",
14467 bf_get(lpfc_wcqe_c_status, wcqe),
14468 wcqe->word0, wcqe->total_data_placed,
14469 wcqe->parameter, wcqe->word3);
14470 }
14471
14472 /* Get an irspiocbq for later ELS response processing use */
14473 irspiocbq = lpfc_sli_get_iocbq(phba);
14474 if (!irspiocbq) {
14475 if (!list_empty(&pring->txq))
14476 txq_cnt++;
14477 if (!list_empty(&pring->txcmplq))
14478 txcmplq_cnt++;
14479 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14480 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14481 "els_txcmplq_cnt=%d\n",
14482 txq_cnt, phba->iocb_cnt,
14483 txcmplq_cnt);
14484 return false;
14485 }
14486
14487 /* Save off the slow-path queue event for work thread to process */
14488 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14489 spin_lock_irqsave(&phba->hbalock, iflags);
14490 list_add_tail(&irspiocbq->cq_event.list,
14491 &phba->sli4_hba.sp_queue_event);
14492 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14493 spin_unlock_irqrestore(&phba->hbalock, iflags);
14494
14495 return true;
14496 }
14497
14498 /**
14499 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14500 * @phba: Pointer to HBA context object.
14501 * @wcqe: Pointer to work-queue completion queue entry.
14502 *
14503 * This routine handles slow-path WQ entry consumed event by invoking the
14504 * proper WQ release routine to the slow-path WQ.
14505 **/
14506 static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_wcqe_release * wcqe)14507 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14508 struct lpfc_wcqe_release *wcqe)
14509 {
14510 /* sanity check on queue memory */
14511 if (unlikely(!phba->sli4_hba.els_wq))
14512 return;
14513 /* Check for the slow-path ELS work queue */
14514 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14515 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14516 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14517 else
14518 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14519 "2579 Slow-path wqe consume event carries "
14520 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14521 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14522 phba->sli4_hba.els_wq->queue_id);
14523 }
14524
14525 /**
14526 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14527 * @phba: Pointer to HBA context object.
14528 * @cq: Pointer to a WQ completion queue.
14529 * @wcqe: Pointer to work-queue completion queue entry.
14530 *
14531 * This routine handles an XRI abort event.
14532 *
14533 * Return: true if work posted to worker thread, otherwise false.
14534 **/
14535 static bool
lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct sli4_wcqe_xri_aborted * wcqe)14536 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14537 struct lpfc_queue *cq,
14538 struct sli4_wcqe_xri_aborted *wcqe)
14539 {
14540 bool workposted = false;
14541 struct lpfc_cq_event *cq_event;
14542 unsigned long iflags;
14543
14544 switch (cq->subtype) {
14545 case LPFC_IO:
14546 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14547 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14548 /* Notify aborted XRI for NVME work queue */
14549 if (phba->nvmet_support)
14550 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14551 }
14552 workposted = false;
14553 break;
14554 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14555 case LPFC_ELS:
14556 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14557 if (!cq_event) {
14558 workposted = false;
14559 break;
14560 }
14561 cq_event->hdwq = cq->hdwq;
14562 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14563 iflags);
14564 list_add_tail(&cq_event->list,
14565 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14566 /* Set the els xri abort event flag */
14567 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14568 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14569 iflags);
14570 workposted = true;
14571 break;
14572 default:
14573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14574 "0603 Invalid CQ subtype %d: "
14575 "%08x %08x %08x %08x\n",
14576 cq->subtype, wcqe->word0, wcqe->parameter,
14577 wcqe->word2, wcqe->word3);
14578 workposted = false;
14579 break;
14580 }
14581 return workposted;
14582 }
14583
14584 #define FC_RCTL_MDS_DIAGS 0xF4
14585
14586 /**
14587 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14588 * @phba: Pointer to HBA context object.
14589 * @rcqe: Pointer to receive-queue completion queue entry.
14590 *
14591 * This routine process a receive-queue completion queue entry.
14592 *
14593 * Return: true if work posted to worker thread, otherwise false.
14594 **/
14595 static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba * phba,struct lpfc_rcqe * rcqe)14596 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14597 {
14598 bool workposted = false;
14599 struct fc_frame_header *fc_hdr;
14600 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14601 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14602 struct lpfc_nvmet_tgtport *tgtp;
14603 struct hbq_dmabuf *dma_buf;
14604 uint32_t status, rq_id;
14605 unsigned long iflags;
14606
14607 /* sanity check on queue memory */
14608 if (unlikely(!hrq) || unlikely(!drq))
14609 return workposted;
14610
14611 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14612 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14613 else
14614 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14615 if (rq_id != hrq->queue_id)
14616 goto out;
14617
14618 status = bf_get(lpfc_rcqe_status, rcqe);
14619 switch (status) {
14620 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14621 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14622 "2537 Receive Frame Truncated!!\n");
14623 fallthrough;
14624 case FC_STATUS_RQ_SUCCESS:
14625 spin_lock_irqsave(&phba->hbalock, iflags);
14626 lpfc_sli4_rq_release(hrq, drq);
14627 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14628 if (!dma_buf) {
14629 hrq->RQ_no_buf_found++;
14630 spin_unlock_irqrestore(&phba->hbalock, iflags);
14631 goto out;
14632 }
14633 hrq->RQ_rcv_buf++;
14634 hrq->RQ_buf_posted--;
14635 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14636
14637 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14638
14639 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14640 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14641 spin_unlock_irqrestore(&phba->hbalock, iflags);
14642 /* Handle MDS Loopback frames */
14643 if (!(phba->pport->load_flag & FC_UNLOADING))
14644 lpfc_sli4_handle_mds_loopback(phba->pport,
14645 dma_buf);
14646 else
14647 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14648 break;
14649 }
14650
14651 /* save off the frame for the work thread to process */
14652 list_add_tail(&dma_buf->cq_event.list,
14653 &phba->sli4_hba.sp_queue_event);
14654 /* Frame received */
14655 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14656 spin_unlock_irqrestore(&phba->hbalock, iflags);
14657 workposted = true;
14658 break;
14659 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14660 if (phba->nvmet_support) {
14661 tgtp = phba->targetport->private;
14662 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14663 "6402 RQE Error x%x, posted %d err_cnt "
14664 "%d: %x %x %x\n",
14665 status, hrq->RQ_buf_posted,
14666 hrq->RQ_no_posted_buf,
14667 atomic_read(&tgtp->rcv_fcp_cmd_in),
14668 atomic_read(&tgtp->rcv_fcp_cmd_out),
14669 atomic_read(&tgtp->xmt_fcp_release));
14670 }
14671 fallthrough;
14672
14673 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14674 hrq->RQ_no_posted_buf++;
14675 /* Post more buffers if possible */
14676 spin_lock_irqsave(&phba->hbalock, iflags);
14677 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14678 spin_unlock_irqrestore(&phba->hbalock, iflags);
14679 workposted = true;
14680 break;
14681 }
14682 out:
14683 return workposted;
14684 }
14685
14686 /**
14687 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14688 * @phba: Pointer to HBA context object.
14689 * @cq: Pointer to the completion queue.
14690 * @cqe: Pointer to a completion queue entry.
14691 *
14692 * This routine process a slow-path work-queue or receive queue completion queue
14693 * entry.
14694 *
14695 * Return: true if work posted to worker thread, otherwise false.
14696 **/
14697 static bool
lpfc_sli4_sp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14698 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14699 struct lpfc_cqe *cqe)
14700 {
14701 struct lpfc_cqe cqevt;
14702 bool workposted = false;
14703
14704 /* Copy the work queue CQE and convert endian order if needed */
14705 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14706
14707 /* Check and process for different type of WCQE and dispatch */
14708 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14709 case CQE_CODE_COMPL_WQE:
14710 /* Process the WQ/RQ complete event */
14711 phba->last_completion_time = jiffies;
14712 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14713 (struct lpfc_wcqe_complete *)&cqevt);
14714 break;
14715 case CQE_CODE_RELEASE_WQE:
14716 /* Process the WQ release event */
14717 lpfc_sli4_sp_handle_rel_wcqe(phba,
14718 (struct lpfc_wcqe_release *)&cqevt);
14719 break;
14720 case CQE_CODE_XRI_ABORTED:
14721 /* Process the WQ XRI abort event */
14722 phba->last_completion_time = jiffies;
14723 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14724 (struct sli4_wcqe_xri_aborted *)&cqevt);
14725 break;
14726 case CQE_CODE_RECEIVE:
14727 case CQE_CODE_RECEIVE_V1:
14728 /* Process the RQ event */
14729 phba->last_completion_time = jiffies;
14730 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14731 (struct lpfc_rcqe *)&cqevt);
14732 break;
14733 default:
14734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14735 "0388 Not a valid WCQE code: x%x\n",
14736 bf_get(lpfc_cqe_code, &cqevt));
14737 break;
14738 }
14739 return workposted;
14740 }
14741
14742 /**
14743 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14744 * @phba: Pointer to HBA context object.
14745 * @eqe: Pointer to fast-path event queue entry.
14746 * @speq: Pointer to slow-path event queue.
14747 *
14748 * This routine process a event queue entry from the slow-path event queue.
14749 * It will check the MajorCode and MinorCode to determine this is for a
14750 * completion event on a completion queue, if not, an error shall be logged
14751 * and just return. Otherwise, it will get to the corresponding completion
14752 * queue and process all the entries on that completion queue, rearm the
14753 * completion queue, and then return.
14754 *
14755 **/
14756 static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba * phba,struct lpfc_eqe * eqe,struct lpfc_queue * speq)14757 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14758 struct lpfc_queue *speq)
14759 {
14760 struct lpfc_queue *cq = NULL, *childq;
14761 uint16_t cqid;
14762 int ret = 0;
14763
14764 /* Get the reference to the corresponding CQ */
14765 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14766
14767 list_for_each_entry(childq, &speq->child_list, list) {
14768 if (childq->queue_id == cqid) {
14769 cq = childq;
14770 break;
14771 }
14772 }
14773 if (unlikely(!cq)) {
14774 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14775 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14776 "0365 Slow-path CQ identifier "
14777 "(%d) does not exist\n", cqid);
14778 return;
14779 }
14780
14781 /* Save EQ associated with this CQ */
14782 cq->assoc_qp = speq;
14783
14784 if (is_kdump_kernel())
14785 ret = queue_work(phba->wq, &cq->spwork);
14786 else
14787 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14788
14789 if (!ret)
14790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14791 "0390 Cannot schedule queue work "
14792 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14793 cqid, cq->queue_id, raw_smp_processor_id());
14794 }
14795
14796 /**
14797 * __lpfc_sli4_process_cq - Process elements of a CQ
14798 * @phba: Pointer to HBA context object.
14799 * @cq: Pointer to CQ to be processed
14800 * @handler: Routine to process each cqe
14801 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14802 * @poll_mode: Polling mode we were called from
14803 *
14804 * This routine processes completion queue entries in a CQ. While a valid
14805 * queue element is found, the handler is called. During processing checks
14806 * are made for periodic doorbell writes to let the hardware know of
14807 * element consumption.
14808 *
14809 * If the max limit on cqes to process is hit, or there are no more valid
14810 * entries, the loop stops. If we processed a sufficient number of elements,
14811 * meaning there is sufficient load, rather than rearming and generating
14812 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14813 * indicates no rescheduling.
14814 *
14815 * Returns True if work scheduled, False otherwise.
14816 **/
14817 static bool
__lpfc_sli4_process_cq(struct lpfc_hba * phba,struct lpfc_queue * cq,bool (* handler)(struct lpfc_hba *,struct lpfc_queue *,struct lpfc_cqe *),unsigned long * delay,enum lpfc_poll_mode poll_mode)14818 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14819 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14820 struct lpfc_cqe *), unsigned long *delay,
14821 enum lpfc_poll_mode poll_mode)
14822 {
14823 struct lpfc_cqe *cqe;
14824 bool workposted = false;
14825 int count = 0, consumed = 0;
14826 bool arm = true;
14827
14828 /* default - no reschedule */
14829 *delay = 0;
14830
14831 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14832 goto rearm_and_exit;
14833
14834 /* Process all the entries to the CQ */
14835 cq->q_flag = 0;
14836 cqe = lpfc_sli4_cq_get(cq);
14837 while (cqe) {
14838 workposted |= handler(phba, cq, cqe);
14839 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14840
14841 consumed++;
14842 if (!(++count % cq->max_proc_limit))
14843 break;
14844
14845 if (!(count % cq->notify_interval)) {
14846 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14847 LPFC_QUEUE_NOARM);
14848 consumed = 0;
14849 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14850 }
14851
14852 if (count == LPFC_NVMET_CQ_NOTIFY)
14853 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14854
14855 cqe = lpfc_sli4_cq_get(cq);
14856 }
14857 if (count >= phba->cfg_cq_poll_threshold) {
14858 *delay = 1;
14859 arm = false;
14860 }
14861
14862 /* Note: complete the irq_poll softirq before rearming CQ */
14863 if (poll_mode == LPFC_IRQ_POLL)
14864 irq_poll_complete(&cq->iop);
14865
14866 /* Track the max number of CQEs processed in 1 EQ */
14867 if (count > cq->CQ_max_cqe)
14868 cq->CQ_max_cqe = count;
14869
14870 cq->assoc_qp->EQ_cqe_cnt += count;
14871
14872 /* Catch the no cq entry condition */
14873 if (unlikely(count == 0))
14874 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14875 "0369 No entry from completion queue "
14876 "qid=%d\n", cq->queue_id);
14877
14878 xchg(&cq->queue_claimed, 0);
14879
14880 rearm_and_exit:
14881 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14882 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14883
14884 return workposted;
14885 }
14886
14887 /**
14888 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14889 * @cq: pointer to CQ to process
14890 *
14891 * This routine calls the cq processing routine with a handler specific
14892 * to the type of queue bound to it.
14893 *
14894 * The CQ routine returns two values: the first is the calling status,
14895 * which indicates whether work was queued to the background discovery
14896 * thread. If true, the routine should wakeup the discovery thread;
14897 * the second is the delay parameter. If non-zero, rather than rearming
14898 * the CQ and yet another interrupt, the CQ handler should be queued so
14899 * that it is processed in a subsequent polling action. The value of
14900 * the delay indicates when to reschedule it.
14901 **/
14902 static void
__lpfc_sli4_sp_process_cq(struct lpfc_queue * cq)14903 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14904 {
14905 struct lpfc_hba *phba = cq->phba;
14906 unsigned long delay;
14907 bool workposted = false;
14908 int ret = 0;
14909
14910 /* Process and rearm the CQ */
14911 switch (cq->type) {
14912 case LPFC_MCQ:
14913 workposted |= __lpfc_sli4_process_cq(phba, cq,
14914 lpfc_sli4_sp_handle_mcqe,
14915 &delay, LPFC_QUEUE_WORK);
14916 break;
14917 case LPFC_WCQ:
14918 if (cq->subtype == LPFC_IO)
14919 workposted |= __lpfc_sli4_process_cq(phba, cq,
14920 lpfc_sli4_fp_handle_cqe,
14921 &delay, LPFC_QUEUE_WORK);
14922 else
14923 workposted |= __lpfc_sli4_process_cq(phba, cq,
14924 lpfc_sli4_sp_handle_cqe,
14925 &delay, LPFC_QUEUE_WORK);
14926 break;
14927 default:
14928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14929 "0370 Invalid completion queue type (%d)\n",
14930 cq->type);
14931 return;
14932 }
14933
14934 if (delay) {
14935 if (is_kdump_kernel())
14936 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14937 delay);
14938 else
14939 ret = queue_delayed_work_on(cq->chann, phba->wq,
14940 &cq->sched_spwork, delay);
14941 if (!ret)
14942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14943 "0394 Cannot schedule queue work "
14944 "for cqid=%d on CPU %d\n",
14945 cq->queue_id, cq->chann);
14946 }
14947
14948 /* wake up worker thread if there are works to be done */
14949 if (workposted)
14950 lpfc_worker_wake_up(phba);
14951 }
14952
14953 /**
14954 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14955 * interrupt
14956 * @work: pointer to work element
14957 *
14958 * translates from the work handler and calls the slow-path handler.
14959 **/
14960 static void
lpfc_sli4_sp_process_cq(struct work_struct * work)14961 lpfc_sli4_sp_process_cq(struct work_struct *work)
14962 {
14963 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14964
14965 __lpfc_sli4_sp_process_cq(cq);
14966 }
14967
14968 /**
14969 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14970 * @work: pointer to work element
14971 *
14972 * translates from the work handler and calls the slow-path handler.
14973 **/
14974 static void
lpfc_sli4_dly_sp_process_cq(struct work_struct * work)14975 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14976 {
14977 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14978 struct lpfc_queue, sched_spwork);
14979
14980 __lpfc_sli4_sp_process_cq(cq);
14981 }
14982
14983 /**
14984 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14985 * @phba: Pointer to HBA context object.
14986 * @cq: Pointer to associated CQ
14987 * @wcqe: Pointer to work-queue completion queue entry.
14988 *
14989 * This routine process a fast-path work queue completion entry from fast-path
14990 * event queue for FCP command response completion.
14991 **/
14992 static void
lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)14993 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14994 struct lpfc_wcqe_complete *wcqe)
14995 {
14996 struct lpfc_sli_ring *pring = cq->pring;
14997 struct lpfc_iocbq *cmdiocbq;
14998 unsigned long iflags;
14999
15000 /* Check for response status */
15001 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15002 /* If resource errors reported from HBA, reduce queue
15003 * depth of the SCSI device.
15004 */
15005 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15006 IOSTAT_LOCAL_REJECT)) &&
15007 ((wcqe->parameter & IOERR_PARAM_MASK) ==
15008 IOERR_NO_RESOURCES))
15009 phba->lpfc_rampdown_queue_depth(phba);
15010
15011 /* Log the cmpl status */
15012 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15013 "0373 FCP CQE cmpl: status=x%x: "
15014 "CQE: %08x %08x %08x %08x\n",
15015 bf_get(lpfc_wcqe_c_status, wcqe),
15016 wcqe->word0, wcqe->total_data_placed,
15017 wcqe->parameter, wcqe->word3);
15018 }
15019
15020 /* Look up the FCP command IOCB and create pseudo response IOCB */
15021 spin_lock_irqsave(&pring->ring_lock, iflags);
15022 pring->stats.iocb_event++;
15023 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15024 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15025 spin_unlock_irqrestore(&pring->ring_lock, iflags);
15026 if (unlikely(!cmdiocbq)) {
15027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15028 "0374 FCP complete with no corresponding "
15029 "cmdiocb: iotag (%d)\n",
15030 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15031 return;
15032 }
15033 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15034 cmdiocbq->isr_timestamp = cq->isr_timestamp;
15035 #endif
15036 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15037 spin_lock_irqsave(&phba->hbalock, iflags);
15038 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15039 spin_unlock_irqrestore(&phba->hbalock, iflags);
15040 }
15041
15042 if (cmdiocbq->cmd_cmpl) {
15043 /* For FCP the flag is cleared in cmd_cmpl */
15044 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15045 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15046 spin_lock_irqsave(&phba->hbalock, iflags);
15047 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15048 spin_unlock_irqrestore(&phba->hbalock, iflags);
15049 }
15050
15051 /* Pass the cmd_iocb and the wcqe to the upper layer */
15052 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15053 sizeof(struct lpfc_wcqe_complete));
15054 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15055 } else {
15056 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15057 "0375 FCP cmdiocb not callback function "
15058 "iotag: (%d)\n",
15059 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15060 }
15061 }
15062
15063 /**
15064 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15065 * @phba: Pointer to HBA context object.
15066 * @cq: Pointer to completion queue.
15067 * @wcqe: Pointer to work-queue completion queue entry.
15068 *
15069 * This routine handles an fast-path WQ entry consumed event by invoking the
15070 * proper WQ release routine to the slow-path WQ.
15071 **/
15072 static void
lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_release * wcqe)15073 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15074 struct lpfc_wcqe_release *wcqe)
15075 {
15076 struct lpfc_queue *childwq;
15077 bool wqid_matched = false;
15078 uint16_t hba_wqid;
15079
15080 /* Check for fast-path FCP work queue release */
15081 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15082 list_for_each_entry(childwq, &cq->child_list, list) {
15083 if (childwq->queue_id == hba_wqid) {
15084 lpfc_sli4_wq_release(childwq,
15085 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15086 if (childwq->q_flag & HBA_NVMET_WQFULL)
15087 lpfc_nvmet_wqfull_process(phba, childwq);
15088 wqid_matched = true;
15089 break;
15090 }
15091 }
15092 /* Report warning log message if no match found */
15093 if (wqid_matched != true)
15094 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15095 "2580 Fast-path wqe consume event carries "
15096 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15097 }
15098
15099 /**
15100 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15101 * @phba: Pointer to HBA context object.
15102 * @cq: Pointer to completion queue.
15103 * @rcqe: Pointer to receive-queue completion queue entry.
15104 *
15105 * This routine process a receive-queue completion queue entry.
15106 *
15107 * Return: true if work posted to worker thread, otherwise false.
15108 **/
15109 static bool
lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_rcqe * rcqe)15110 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15111 struct lpfc_rcqe *rcqe)
15112 {
15113 bool workposted = false;
15114 struct lpfc_queue *hrq;
15115 struct lpfc_queue *drq;
15116 struct rqb_dmabuf *dma_buf;
15117 struct fc_frame_header *fc_hdr;
15118 struct lpfc_nvmet_tgtport *tgtp;
15119 uint32_t status, rq_id;
15120 unsigned long iflags;
15121 uint32_t fctl, idx;
15122
15123 if ((phba->nvmet_support == 0) ||
15124 (phba->sli4_hba.nvmet_cqset == NULL))
15125 return workposted;
15126
15127 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15128 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15129 drq = phba->sli4_hba.nvmet_mrq_data[idx];
15130
15131 /* sanity check on queue memory */
15132 if (unlikely(!hrq) || unlikely(!drq))
15133 return workposted;
15134
15135 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15136 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15137 else
15138 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15139
15140 if ((phba->nvmet_support == 0) ||
15141 (rq_id != hrq->queue_id))
15142 return workposted;
15143
15144 status = bf_get(lpfc_rcqe_status, rcqe);
15145 switch (status) {
15146 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15148 "6126 Receive Frame Truncated!!\n");
15149 fallthrough;
15150 case FC_STATUS_RQ_SUCCESS:
15151 spin_lock_irqsave(&phba->hbalock, iflags);
15152 lpfc_sli4_rq_release(hrq, drq);
15153 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15154 if (!dma_buf) {
15155 hrq->RQ_no_buf_found++;
15156 spin_unlock_irqrestore(&phba->hbalock, iflags);
15157 goto out;
15158 }
15159 spin_unlock_irqrestore(&phba->hbalock, iflags);
15160 hrq->RQ_rcv_buf++;
15161 hrq->RQ_buf_posted--;
15162 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15163
15164 /* Just some basic sanity checks on FCP Command frame */
15165 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15166 fc_hdr->fh_f_ctl[1] << 8 |
15167 fc_hdr->fh_f_ctl[2]);
15168 if (((fctl &
15169 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15170 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15171 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15172 goto drop;
15173
15174 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15175 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15176 lpfc_nvmet_unsol_fcp_event(
15177 phba, idx, dma_buf, cq->isr_timestamp,
15178 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15179 return false;
15180 }
15181 drop:
15182 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15183 break;
15184 case FC_STATUS_INSUFF_BUF_FRM_DISC:
15185 if (phba->nvmet_support) {
15186 tgtp = phba->targetport->private;
15187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15188 "6401 RQE Error x%x, posted %d err_cnt "
15189 "%d: %x %x %x\n",
15190 status, hrq->RQ_buf_posted,
15191 hrq->RQ_no_posted_buf,
15192 atomic_read(&tgtp->rcv_fcp_cmd_in),
15193 atomic_read(&tgtp->rcv_fcp_cmd_out),
15194 atomic_read(&tgtp->xmt_fcp_release));
15195 }
15196 fallthrough;
15197
15198 case FC_STATUS_INSUFF_BUF_NEED_BUF:
15199 hrq->RQ_no_posted_buf++;
15200 /* Post more buffers if possible */
15201 break;
15202 }
15203 out:
15204 return workposted;
15205 }
15206
15207 /**
15208 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15209 * @phba: adapter with cq
15210 * @cq: Pointer to the completion queue.
15211 * @cqe: Pointer to fast-path completion queue entry.
15212 *
15213 * This routine process a fast-path work queue completion entry from fast-path
15214 * event queue for FCP command response completion.
15215 *
15216 * Return: true if work posted to worker thread, otherwise false.
15217 **/
15218 static bool
lpfc_sli4_fp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)15219 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15220 struct lpfc_cqe *cqe)
15221 {
15222 struct lpfc_wcqe_release wcqe;
15223 bool workposted = false;
15224
15225 /* Copy the work queue CQE and convert endian order if needed */
15226 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15227
15228 /* Check and process for different type of WCQE and dispatch */
15229 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15230 case CQE_CODE_COMPL_WQE:
15231 case CQE_CODE_NVME_ERSP:
15232 cq->CQ_wq++;
15233 /* Process the WQ complete event */
15234 phba->last_completion_time = jiffies;
15235 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15236 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15237 (struct lpfc_wcqe_complete *)&wcqe);
15238 break;
15239 case CQE_CODE_RELEASE_WQE:
15240 cq->CQ_release_wqe++;
15241 /* Process the WQ release event */
15242 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15243 (struct lpfc_wcqe_release *)&wcqe);
15244 break;
15245 case CQE_CODE_XRI_ABORTED:
15246 cq->CQ_xri_aborted++;
15247 /* Process the WQ XRI abort event */
15248 phba->last_completion_time = jiffies;
15249 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15250 (struct sli4_wcqe_xri_aborted *)&wcqe);
15251 break;
15252 case CQE_CODE_RECEIVE_V1:
15253 case CQE_CODE_RECEIVE:
15254 phba->last_completion_time = jiffies;
15255 if (cq->subtype == LPFC_NVMET) {
15256 workposted = lpfc_sli4_nvmet_handle_rcqe(
15257 phba, cq, (struct lpfc_rcqe *)&wcqe);
15258 }
15259 break;
15260 default:
15261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15262 "0144 Not a valid CQE code: x%x\n",
15263 bf_get(lpfc_wcqe_c_code, &wcqe));
15264 break;
15265 }
15266 return workposted;
15267 }
15268
15269 /**
15270 * lpfc_sli4_sched_cq_work - Schedules cq work
15271 * @phba: Pointer to HBA context object.
15272 * @cq: Pointer to CQ
15273 * @cqid: CQ ID
15274 *
15275 * This routine checks the poll mode of the CQ corresponding to
15276 * cq->chann, then either schedules a softirq or queue_work to complete
15277 * cq work.
15278 *
15279 * queue_work path is taken if in NVMET mode, or if poll_mode is in
15280 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
15281 *
15282 **/
lpfc_sli4_sched_cq_work(struct lpfc_hba * phba,struct lpfc_queue * cq,uint16_t cqid)15283 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15284 struct lpfc_queue *cq, uint16_t cqid)
15285 {
15286 int ret = 0;
15287
15288 switch (cq->poll_mode) {
15289 case LPFC_IRQ_POLL:
15290 /* CGN mgmt is mutually exclusive from softirq processing */
15291 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15292 irq_poll_sched(&cq->iop);
15293 break;
15294 }
15295 fallthrough;
15296 case LPFC_QUEUE_WORK:
15297 default:
15298 if (is_kdump_kernel())
15299 ret = queue_work(phba->wq, &cq->irqwork);
15300 else
15301 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15302 if (!ret)
15303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15304 "0383 Cannot schedule queue work "
15305 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15306 cqid, cq->queue_id,
15307 raw_smp_processor_id());
15308 }
15309 }
15310
15311 /**
15312 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15313 * @phba: Pointer to HBA context object.
15314 * @eq: Pointer to the queue structure.
15315 * @eqe: Pointer to fast-path event queue entry.
15316 *
15317 * This routine process a event queue entry from the fast-path event queue.
15318 * It will check the MajorCode and MinorCode to determine this is for a
15319 * completion event on a completion queue, if not, an error shall be logged
15320 * and just return. Otherwise, it will get to the corresponding completion
15321 * queue and process all the entries on the completion queue, rearm the
15322 * completion queue, and then return.
15323 **/
15324 static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe)15325 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15326 struct lpfc_eqe *eqe)
15327 {
15328 struct lpfc_queue *cq = NULL;
15329 uint32_t qidx = eq->hdwq;
15330 uint16_t cqid, id;
15331
15332 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15334 "0366 Not a valid completion "
15335 "event: majorcode=x%x, minorcode=x%x\n",
15336 bf_get_le32(lpfc_eqe_major_code, eqe),
15337 bf_get_le32(lpfc_eqe_minor_code, eqe));
15338 return;
15339 }
15340
15341 /* Get the reference to the corresponding CQ */
15342 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15343
15344 /* Use the fast lookup method first */
15345 if (cqid <= phba->sli4_hba.cq_max) {
15346 cq = phba->sli4_hba.cq_lookup[cqid];
15347 if (cq)
15348 goto work_cq;
15349 }
15350
15351 /* Next check for NVMET completion */
15352 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15353 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15354 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15355 /* Process NVMET unsol rcv */
15356 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15357 goto process_cq;
15358 }
15359 }
15360
15361 if (phba->sli4_hba.nvmels_cq &&
15362 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15363 /* Process NVME unsol rcv */
15364 cq = phba->sli4_hba.nvmels_cq;
15365 }
15366
15367 /* Otherwise this is a Slow path event */
15368 if (cq == NULL) {
15369 lpfc_sli4_sp_handle_eqe(phba, eqe,
15370 phba->sli4_hba.hdwq[qidx].hba_eq);
15371 return;
15372 }
15373
15374 process_cq:
15375 if (unlikely(cqid != cq->queue_id)) {
15376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15377 "0368 Miss-matched fast-path completion "
15378 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15379 cqid, cq->queue_id);
15380 return;
15381 }
15382
15383 work_cq:
15384 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15385 if (phba->ktime_on)
15386 cq->isr_timestamp = ktime_get_ns();
15387 else
15388 cq->isr_timestamp = 0;
15389 #endif
15390 lpfc_sli4_sched_cq_work(phba, cq, cqid);
15391 }
15392
15393 /**
15394 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15395 * @cq: Pointer to CQ to be processed
15396 * @poll_mode: Enum lpfc_poll_state to determine poll mode
15397 *
15398 * This routine calls the cq processing routine with the handler for
15399 * fast path CQEs.
15400 *
15401 * The CQ routine returns two values: the first is the calling status,
15402 * which indicates whether work was queued to the background discovery
15403 * thread. If true, the routine should wakeup the discovery thread;
15404 * the second is the delay parameter. If non-zero, rather than rearming
15405 * the CQ and yet another interrupt, the CQ handler should be queued so
15406 * that it is processed in a subsequent polling action. The value of
15407 * the delay indicates when to reschedule it.
15408 **/
15409 static void
__lpfc_sli4_hba_process_cq(struct lpfc_queue * cq,enum lpfc_poll_mode poll_mode)15410 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15411 enum lpfc_poll_mode poll_mode)
15412 {
15413 struct lpfc_hba *phba = cq->phba;
15414 unsigned long delay;
15415 bool workposted = false;
15416 int ret = 0;
15417
15418 /* process and rearm the CQ */
15419 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15420 &delay, poll_mode);
15421
15422 if (delay) {
15423 if (is_kdump_kernel())
15424 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15425 delay);
15426 else
15427 ret = queue_delayed_work_on(cq->chann, phba->wq,
15428 &cq->sched_irqwork, delay);
15429 if (!ret)
15430 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15431 "0367 Cannot schedule queue work "
15432 "for cqid=%d on CPU %d\n",
15433 cq->queue_id, cq->chann);
15434 }
15435
15436 /* wake up worker thread if there are works to be done */
15437 if (workposted)
15438 lpfc_worker_wake_up(phba);
15439 }
15440
15441 /**
15442 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15443 * interrupt
15444 * @work: pointer to work element
15445 *
15446 * translates from the work handler and calls the fast-path handler.
15447 **/
15448 static void
lpfc_sli4_hba_process_cq(struct work_struct * work)15449 lpfc_sli4_hba_process_cq(struct work_struct *work)
15450 {
15451 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15452
15453 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15454 }
15455
15456 /**
15457 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15458 * @work: pointer to work element
15459 *
15460 * translates from the work handler and calls the fast-path handler.
15461 **/
15462 static void
lpfc_sli4_dly_hba_process_cq(struct work_struct * work)15463 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15464 {
15465 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15466 struct lpfc_queue, sched_irqwork);
15467
15468 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15469 }
15470
15471 /**
15472 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15473 * @irq: Interrupt number.
15474 * @dev_id: The device context pointer.
15475 *
15476 * This function is directly called from the PCI layer as an interrupt
15477 * service routine when device with SLI-4 interface spec is enabled with
15478 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15479 * ring event in the HBA. However, when the device is enabled with either
15480 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15481 * device-level interrupt handler. When the PCI slot is in error recovery
15482 * or the HBA is undergoing initialization, the interrupt handler will not
15483 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15484 * the intrrupt context. This function is called without any lock held.
15485 * It gets the hbalock to access and update SLI data structures. Note that,
15486 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15487 * equal to that of FCP CQ index.
15488 *
15489 * The link attention and ELS ring attention events are handled
15490 * by the worker thread. The interrupt handler signals the worker thread
15491 * and returns for these events. This function is called without any lock
15492 * held. It gets the hbalock to access and update SLI data structures.
15493 *
15494 * This function returns IRQ_HANDLED when interrupt is handled else it
15495 * returns IRQ_NONE.
15496 **/
15497 irqreturn_t
lpfc_sli4_hba_intr_handler(int irq,void * dev_id)15498 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15499 {
15500 struct lpfc_hba *phba;
15501 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15502 struct lpfc_queue *fpeq;
15503 unsigned long iflag;
15504 int ecount = 0;
15505 int hba_eqidx;
15506 struct lpfc_eq_intr_info *eqi;
15507
15508 /* Get the driver's phba structure from the dev_id */
15509 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15510 phba = hba_eq_hdl->phba;
15511 hba_eqidx = hba_eq_hdl->idx;
15512
15513 if (unlikely(!phba))
15514 return IRQ_NONE;
15515 if (unlikely(!phba->sli4_hba.hdwq))
15516 return IRQ_NONE;
15517
15518 /* Get to the EQ struct associated with this vector */
15519 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15520 if (unlikely(!fpeq))
15521 return IRQ_NONE;
15522
15523 /* Check device state for handling interrupt */
15524 if (unlikely(lpfc_intr_state_check(phba))) {
15525 /* Check again for link_state with lock held */
15526 spin_lock_irqsave(&phba->hbalock, iflag);
15527 if (phba->link_state < LPFC_LINK_DOWN)
15528 /* Flush, clear interrupt, and rearm the EQ */
15529 lpfc_sli4_eqcq_flush(phba, fpeq);
15530 spin_unlock_irqrestore(&phba->hbalock, iflag);
15531 return IRQ_NONE;
15532 }
15533
15534 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15535 eqi->icnt++;
15536
15537 fpeq->last_cpu = raw_smp_processor_id();
15538
15539 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15540 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15541 phba->cfg_auto_imax &&
15542 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15543 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15544 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15545
15546 /* process and rearm the EQ */
15547 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15548
15549 if (unlikely(ecount == 0)) {
15550 fpeq->EQ_no_entry++;
15551 if (phba->intr_type == MSIX)
15552 /* MSI-X treated interrupt served as no EQ share INT */
15553 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15554 "0358 MSI-X interrupt with no EQE\n");
15555 else
15556 /* Non MSI-X treated on interrupt as EQ share INT */
15557 return IRQ_NONE;
15558 }
15559
15560 return IRQ_HANDLED;
15561 } /* lpfc_sli4_hba_intr_handler */
15562
15563 /**
15564 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15565 * @irq: Interrupt number.
15566 * @dev_id: The device context pointer.
15567 *
15568 * This function is the device-level interrupt handler to device with SLI-4
15569 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15570 * interrupt mode is enabled and there is an event in the HBA which requires
15571 * driver attention. This function invokes the slow-path interrupt attention
15572 * handling function and fast-path interrupt attention handling function in
15573 * turn to process the relevant HBA attention events. This function is called
15574 * without any lock held. It gets the hbalock to access and update SLI data
15575 * structures.
15576 *
15577 * This function returns IRQ_HANDLED when interrupt is handled, else it
15578 * returns IRQ_NONE.
15579 **/
15580 irqreturn_t
lpfc_sli4_intr_handler(int irq,void * dev_id)15581 lpfc_sli4_intr_handler(int irq, void *dev_id)
15582 {
15583 struct lpfc_hba *phba;
15584 irqreturn_t hba_irq_rc;
15585 bool hba_handled = false;
15586 int qidx;
15587
15588 /* Get the driver's phba structure from the dev_id */
15589 phba = (struct lpfc_hba *)dev_id;
15590
15591 if (unlikely(!phba))
15592 return IRQ_NONE;
15593
15594 /*
15595 * Invoke fast-path host attention interrupt handling as appropriate.
15596 */
15597 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15598 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15599 &phba->sli4_hba.hba_eq_hdl[qidx]);
15600 if (hba_irq_rc == IRQ_HANDLED)
15601 hba_handled |= true;
15602 }
15603
15604 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15605 } /* lpfc_sli4_intr_handler */
15606
lpfc_sli4_poll_hbtimer(struct timer_list * t)15607 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15608 {
15609 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15610 struct lpfc_queue *eq;
15611 int i = 0;
15612
15613 rcu_read_lock();
15614
15615 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15616 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15617 if (!list_empty(&phba->poll_list))
15618 mod_timer(&phba->cpuhp_poll_timer,
15619 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15620
15621 rcu_read_unlock();
15622 }
15623
lpfc_sli4_poll_eq(struct lpfc_queue * eq,uint8_t path)15624 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15625 {
15626 struct lpfc_hba *phba = eq->phba;
15627 int i = 0;
15628
15629 /*
15630 * Unlocking an irq is one of the entry point to check
15631 * for re-schedule, but we are good for io submission
15632 * path as midlayer does a get_cpu to glue us in. Flush
15633 * out the invalidate queue so we can see the updated
15634 * value for flag.
15635 */
15636 smp_rmb();
15637
15638 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15639 /* We will not likely get the completion for the caller
15640 * during this iteration but i guess that's fine.
15641 * Future io's coming on this eq should be able to
15642 * pick it up. As for the case of single io's, they
15643 * will be handled through a sched from polling timer
15644 * function which is currently triggered every 1msec.
15645 */
15646 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15647
15648 return i;
15649 }
15650
lpfc_sli4_add_to_poll_list(struct lpfc_queue * eq)15651 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15652 {
15653 struct lpfc_hba *phba = eq->phba;
15654
15655 /* kickstart slowpath processing if needed */
15656 if (list_empty(&phba->poll_list))
15657 mod_timer(&phba->cpuhp_poll_timer,
15658 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15659
15660 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15661 synchronize_rcu();
15662 }
15663
lpfc_sli4_remove_from_poll_list(struct lpfc_queue * eq)15664 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15665 {
15666 struct lpfc_hba *phba = eq->phba;
15667
15668 /* Disable slowpath processing for this eq. Kick start the eq
15669 * by RE-ARMING the eq's ASAP
15670 */
15671 list_del_rcu(&eq->_poll_list);
15672 synchronize_rcu();
15673
15674 if (list_empty(&phba->poll_list))
15675 del_timer_sync(&phba->cpuhp_poll_timer);
15676 }
15677
lpfc_sli4_cleanup_poll_list(struct lpfc_hba * phba)15678 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15679 {
15680 struct lpfc_queue *eq, *next;
15681
15682 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15683 list_del(&eq->_poll_list);
15684
15685 INIT_LIST_HEAD(&phba->poll_list);
15686 synchronize_rcu();
15687 }
15688
15689 static inline void
__lpfc_sli4_switch_eqmode(struct lpfc_queue * eq,uint8_t mode)15690 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15691 {
15692 if (mode == eq->mode)
15693 return;
15694 /*
15695 * currently this function is only called during a hotplug
15696 * event and the cpu on which this function is executing
15697 * is going offline. By now the hotplug has instructed
15698 * the scheduler to remove this cpu from cpu active mask.
15699 * So we don't need to work about being put aside by the
15700 * scheduler for a high priority process. Yes, the inte-
15701 * rrupts could come but they are known to retire ASAP.
15702 */
15703
15704 /* Disable polling in the fastpath */
15705 WRITE_ONCE(eq->mode, mode);
15706 /* flush out the store buffer */
15707 smp_wmb();
15708
15709 /*
15710 * Add this eq to the polling list and start polling. For
15711 * a grace period both interrupt handler and poller will
15712 * try to process the eq _but_ that's fine. We have a
15713 * synchronization mechanism in place (queue_claimed) to
15714 * deal with it. This is just a draining phase for int-
15715 * errupt handler (not eq's) as we have guranteed through
15716 * barrier that all the CPUs have seen the new CQ_POLLED
15717 * state. which will effectively disable the REARMING of
15718 * the EQ. The whole idea is eq's die off eventually as
15719 * we are not rearming EQ's anymore.
15720 */
15721 mode ? lpfc_sli4_add_to_poll_list(eq) :
15722 lpfc_sli4_remove_from_poll_list(eq);
15723 }
15724
lpfc_sli4_start_polling(struct lpfc_queue * eq)15725 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15726 {
15727 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15728 }
15729
lpfc_sli4_stop_polling(struct lpfc_queue * eq)15730 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15731 {
15732 struct lpfc_hba *phba = eq->phba;
15733
15734 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15735
15736 /* Kick start for the pending io's in h/w.
15737 * Once we switch back to interrupt processing on a eq
15738 * the io path completion will only arm eq's when it
15739 * receives a completion. But since eq's are in disa-
15740 * rmed state it doesn't receive a completion. This
15741 * creates a deadlock scenaro.
15742 */
15743 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15744 }
15745
15746 /**
15747 * lpfc_sli4_queue_free - free a queue structure and associated memory
15748 * @queue: The queue structure to free.
15749 *
15750 * This function frees a queue structure and the DMAable memory used for
15751 * the host resident queue. This function must be called after destroying the
15752 * queue on the HBA.
15753 **/
15754 void
lpfc_sli4_queue_free(struct lpfc_queue * queue)15755 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15756 {
15757 struct lpfc_dmabuf *dmabuf;
15758
15759 if (!queue)
15760 return;
15761
15762 if (!list_empty(&queue->wq_list))
15763 list_del(&queue->wq_list);
15764
15765 while (!list_empty(&queue->page_list)) {
15766 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15767 list);
15768 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15769 dmabuf->virt, dmabuf->phys);
15770 kfree(dmabuf);
15771 }
15772 if (queue->rqbp) {
15773 lpfc_free_rq_buffer(queue->phba, queue);
15774 kfree(queue->rqbp);
15775 }
15776
15777 if (!list_empty(&queue->cpu_list))
15778 list_del(&queue->cpu_list);
15779
15780 kfree(queue);
15781 return;
15782 }
15783
15784 /**
15785 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15786 * @phba: The HBA that this queue is being created on.
15787 * @page_size: The size of a queue page
15788 * @entry_size: The size of each queue entry for this queue.
15789 * @entry_count: The number of entries that this queue will handle.
15790 * @cpu: The cpu that will primarily utilize this queue.
15791 *
15792 * This function allocates a queue structure and the DMAable memory used for
15793 * the host resident queue. This function must be called before creating the
15794 * queue on the HBA.
15795 **/
15796 struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba * phba,uint32_t page_size,uint32_t entry_size,uint32_t entry_count,int cpu)15797 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15798 uint32_t entry_size, uint32_t entry_count, int cpu)
15799 {
15800 struct lpfc_queue *queue;
15801 struct lpfc_dmabuf *dmabuf;
15802 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15803 uint16_t x, pgcnt;
15804
15805 if (!phba->sli4_hba.pc_sli4_params.supported)
15806 hw_page_size = page_size;
15807
15808 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15809
15810 /* If needed, Adjust page count to match the max the adapter supports */
15811 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15812 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15813
15814 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15815 GFP_KERNEL, cpu_to_node(cpu));
15816 if (!queue)
15817 return NULL;
15818
15819 INIT_LIST_HEAD(&queue->list);
15820 INIT_LIST_HEAD(&queue->_poll_list);
15821 INIT_LIST_HEAD(&queue->wq_list);
15822 INIT_LIST_HEAD(&queue->wqfull_list);
15823 INIT_LIST_HEAD(&queue->page_list);
15824 INIT_LIST_HEAD(&queue->child_list);
15825 INIT_LIST_HEAD(&queue->cpu_list);
15826
15827 /* Set queue parameters now. If the system cannot provide memory
15828 * resources, the free routine needs to know what was allocated.
15829 */
15830 queue->page_count = pgcnt;
15831 queue->q_pgs = (void **)&queue[1];
15832 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15833 queue->entry_size = entry_size;
15834 queue->entry_count = entry_count;
15835 queue->page_size = hw_page_size;
15836 queue->phba = phba;
15837
15838 for (x = 0; x < queue->page_count; x++) {
15839 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15840 dev_to_node(&phba->pcidev->dev));
15841 if (!dmabuf)
15842 goto out_fail;
15843 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15844 hw_page_size, &dmabuf->phys,
15845 GFP_KERNEL);
15846 if (!dmabuf->virt) {
15847 kfree(dmabuf);
15848 goto out_fail;
15849 }
15850 dmabuf->buffer_tag = x;
15851 list_add_tail(&dmabuf->list, &queue->page_list);
15852 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15853 queue->q_pgs[x] = dmabuf->virt;
15854 }
15855 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15856 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15857 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15858 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15859
15860 /* notify_interval will be set during q creation */
15861
15862 return queue;
15863 out_fail:
15864 lpfc_sli4_queue_free(queue);
15865 return NULL;
15866 }
15867
15868 /**
15869 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15870 * @phba: HBA structure that indicates port to create a queue on.
15871 * @pci_barset: PCI BAR set flag.
15872 *
15873 * This function shall perform iomap of the specified PCI BAR address to host
15874 * memory address if not already done so and return it. The returned host
15875 * memory address can be NULL.
15876 */
15877 static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba * phba,uint16_t pci_barset)15878 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15879 {
15880 if (!phba->pcidev)
15881 return NULL;
15882
15883 switch (pci_barset) {
15884 case WQ_PCI_BAR_0_AND_1:
15885 return phba->pci_bar0_memmap_p;
15886 case WQ_PCI_BAR_2_AND_3:
15887 return phba->pci_bar2_memmap_p;
15888 case WQ_PCI_BAR_4_AND_5:
15889 return phba->pci_bar4_memmap_p;
15890 default:
15891 break;
15892 }
15893 return NULL;
15894 }
15895
15896 /**
15897 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15898 * @phba: HBA structure that EQs are on.
15899 * @startq: The starting EQ index to modify
15900 * @numq: The number of EQs (consecutive indexes) to modify
15901 * @usdelay: amount of delay
15902 *
15903 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15904 * is set either by writing to a register (if supported by the SLI Port)
15905 * or by mailbox command. The mailbox command allows several EQs to be
15906 * updated at once.
15907 *
15908 * The @phba struct is used to send a mailbox command to HBA. The @startq
15909 * is used to get the starting EQ index to change. The @numq value is
15910 * used to specify how many consecutive EQ indexes, starting at EQ index,
15911 * are to be changed. This function is asynchronous and will wait for any
15912 * mailbox commands to finish before returning.
15913 *
15914 * On success this function will return a zero. If unable to allocate
15915 * enough memory this function will return -ENOMEM. If a mailbox command
15916 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15917 * have had their delay multipler changed.
15918 **/
15919 void
lpfc_modify_hba_eq_delay(struct lpfc_hba * phba,uint32_t startq,uint32_t numq,uint32_t usdelay)15920 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15921 uint32_t numq, uint32_t usdelay)
15922 {
15923 struct lpfc_mbx_modify_eq_delay *eq_delay;
15924 LPFC_MBOXQ_t *mbox;
15925 struct lpfc_queue *eq;
15926 int cnt = 0, rc, length;
15927 uint32_t shdr_status, shdr_add_status;
15928 uint32_t dmult;
15929 int qidx;
15930 union lpfc_sli4_cfg_shdr *shdr;
15931
15932 if (startq >= phba->cfg_irq_chann)
15933 return;
15934
15935 if (usdelay > 0xFFFF) {
15936 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15937 "6429 usdelay %d too large. Scaled down to "
15938 "0xFFFF.\n", usdelay);
15939 usdelay = 0xFFFF;
15940 }
15941
15942 /* set values by EQ_DELAY register if supported */
15943 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15944 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15945 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15946 if (!eq)
15947 continue;
15948
15949 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15950
15951 if (++cnt >= numq)
15952 break;
15953 }
15954 return;
15955 }
15956
15957 /* Otherwise, set values by mailbox cmd */
15958
15959 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15960 if (!mbox) {
15961 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15962 "6428 Failed allocating mailbox cmd buffer."
15963 " EQ delay was not set.\n");
15964 return;
15965 }
15966 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15967 sizeof(struct lpfc_sli4_cfg_mhdr));
15968 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15969 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15970 length, LPFC_SLI4_MBX_EMBED);
15971 eq_delay = &mbox->u.mqe.un.eq_delay;
15972
15973 /* Calculate delay multiper from maximum interrupt per second */
15974 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15975 if (dmult)
15976 dmult--;
15977 if (dmult > LPFC_DMULT_MAX)
15978 dmult = LPFC_DMULT_MAX;
15979
15980 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15981 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15982 if (!eq)
15983 continue;
15984 eq->q_mode = usdelay;
15985 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15986 eq_delay->u.request.eq[cnt].phase = 0;
15987 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15988
15989 if (++cnt >= numq)
15990 break;
15991 }
15992 eq_delay->u.request.num_eq = cnt;
15993
15994 mbox->vport = phba->pport;
15995 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15996 mbox->ctx_ndlp = NULL;
15997 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15998 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15999 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16000 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16001 if (shdr_status || shdr_add_status || rc) {
16002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16003 "2512 MODIFY_EQ_DELAY mailbox failed with "
16004 "status x%x add_status x%x, mbx status x%x\n",
16005 shdr_status, shdr_add_status, rc);
16006 }
16007 mempool_free(mbox, phba->mbox_mem_pool);
16008 return;
16009 }
16010
16011 /**
16012 * lpfc_eq_create - Create an Event Queue on the HBA
16013 * @phba: HBA structure that indicates port to create a queue on.
16014 * @eq: The queue structure to use to create the event queue.
16015 * @imax: The maximum interrupt per second limit.
16016 *
16017 * This function creates an event queue, as detailed in @eq, on a port,
16018 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16019 *
16020 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16021 * is used to get the entry count and entry size that are necessary to
16022 * determine the number of pages to allocate and use for this queue. This
16023 * function will send the EQ_CREATE mailbox command to the HBA to setup the
16024 * event queue. This function is asynchronous and will wait for the mailbox
16025 * command to finish before continuing.
16026 *
16027 * On success this function will return a zero. If unable to allocate enough
16028 * memory this function will return -ENOMEM. If the queue create mailbox command
16029 * fails this function will return -ENXIO.
16030 **/
16031 int
lpfc_eq_create(struct lpfc_hba * phba,struct lpfc_queue * eq,uint32_t imax)16032 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16033 {
16034 struct lpfc_mbx_eq_create *eq_create;
16035 LPFC_MBOXQ_t *mbox;
16036 int rc, length, status = 0;
16037 struct lpfc_dmabuf *dmabuf;
16038 uint32_t shdr_status, shdr_add_status;
16039 union lpfc_sli4_cfg_shdr *shdr;
16040 uint16_t dmult;
16041 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16042
16043 /* sanity check on queue memory */
16044 if (!eq)
16045 return -ENODEV;
16046 if (!phba->sli4_hba.pc_sli4_params.supported)
16047 hw_page_size = SLI4_PAGE_SIZE;
16048
16049 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16050 if (!mbox)
16051 return -ENOMEM;
16052 length = (sizeof(struct lpfc_mbx_eq_create) -
16053 sizeof(struct lpfc_sli4_cfg_mhdr));
16054 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16055 LPFC_MBOX_OPCODE_EQ_CREATE,
16056 length, LPFC_SLI4_MBX_EMBED);
16057 eq_create = &mbox->u.mqe.un.eq_create;
16058 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16059 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16060 eq->page_count);
16061 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16062 LPFC_EQE_SIZE);
16063 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16064
16065 /* Use version 2 of CREATE_EQ if eqav is set */
16066 if (phba->sli4_hba.pc_sli4_params.eqav) {
16067 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16068 LPFC_Q_CREATE_VERSION_2);
16069 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16070 phba->sli4_hba.pc_sli4_params.eqav);
16071 }
16072
16073 /* don't setup delay multiplier using EQ_CREATE */
16074 dmult = 0;
16075 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16076 dmult);
16077 switch (eq->entry_count) {
16078 default:
16079 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16080 "0360 Unsupported EQ count. (%d)\n",
16081 eq->entry_count);
16082 if (eq->entry_count < 256) {
16083 status = -EINVAL;
16084 goto out;
16085 }
16086 fallthrough; /* otherwise default to smallest count */
16087 case 256:
16088 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16089 LPFC_EQ_CNT_256);
16090 break;
16091 case 512:
16092 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16093 LPFC_EQ_CNT_512);
16094 break;
16095 case 1024:
16096 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16097 LPFC_EQ_CNT_1024);
16098 break;
16099 case 2048:
16100 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16101 LPFC_EQ_CNT_2048);
16102 break;
16103 case 4096:
16104 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16105 LPFC_EQ_CNT_4096);
16106 break;
16107 }
16108 list_for_each_entry(dmabuf, &eq->page_list, list) {
16109 memset(dmabuf->virt, 0, hw_page_size);
16110 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16111 putPaddrLow(dmabuf->phys);
16112 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16113 putPaddrHigh(dmabuf->phys);
16114 }
16115 mbox->vport = phba->pport;
16116 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16117 mbox->ctx_buf = NULL;
16118 mbox->ctx_ndlp = NULL;
16119 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16120 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16121 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16122 if (shdr_status || shdr_add_status || rc) {
16123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16124 "2500 EQ_CREATE mailbox failed with "
16125 "status x%x add_status x%x, mbx status x%x\n",
16126 shdr_status, shdr_add_status, rc);
16127 status = -ENXIO;
16128 }
16129 eq->type = LPFC_EQ;
16130 eq->subtype = LPFC_NONE;
16131 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16132 if (eq->queue_id == 0xFFFF)
16133 status = -ENXIO;
16134 eq->host_index = 0;
16135 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16136 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16137 out:
16138 mempool_free(mbox, phba->mbox_mem_pool);
16139 return status;
16140 }
16141
lpfc_cq_poll_hdler(struct irq_poll * iop,int budget)16142 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
16143 {
16144 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
16145
16146 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
16147
16148 return 1;
16149 }
16150
16151 /**
16152 * lpfc_cq_create - Create a Completion Queue on the HBA
16153 * @phba: HBA structure that indicates port to create a queue on.
16154 * @cq: The queue structure to use to create the completion queue.
16155 * @eq: The event queue to bind this completion queue to.
16156 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16157 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16158 *
16159 * This function creates a completion queue, as detailed in @wq, on a port,
16160 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16161 *
16162 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16163 * is used to get the entry count and entry size that are necessary to
16164 * determine the number of pages to allocate and use for this queue. The @eq
16165 * is used to indicate which event queue to bind this completion queue to. This
16166 * function will send the CQ_CREATE mailbox command to the HBA to setup the
16167 * completion queue. This function is asynchronous and will wait for the mailbox
16168 * command to finish before continuing.
16169 *
16170 * On success this function will return a zero. If unable to allocate enough
16171 * memory this function will return -ENOMEM. If the queue create mailbox command
16172 * fails this function will return -ENXIO.
16173 **/
16174 int
lpfc_cq_create(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_queue * eq,uint32_t type,uint32_t subtype)16175 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16176 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16177 {
16178 struct lpfc_mbx_cq_create *cq_create;
16179 struct lpfc_dmabuf *dmabuf;
16180 LPFC_MBOXQ_t *mbox;
16181 int rc, length, status = 0;
16182 uint32_t shdr_status, shdr_add_status;
16183 union lpfc_sli4_cfg_shdr *shdr;
16184
16185 /* sanity check on queue memory */
16186 if (!cq || !eq)
16187 return -ENODEV;
16188
16189 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16190 if (!mbox)
16191 return -ENOMEM;
16192 length = (sizeof(struct lpfc_mbx_cq_create) -
16193 sizeof(struct lpfc_sli4_cfg_mhdr));
16194 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16195 LPFC_MBOX_OPCODE_CQ_CREATE,
16196 length, LPFC_SLI4_MBX_EMBED);
16197 cq_create = &mbox->u.mqe.un.cq_create;
16198 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16199 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16200 cq->page_count);
16201 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16202 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16203 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16204 phba->sli4_hba.pc_sli4_params.cqv);
16205 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16206 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16207 (cq->page_size / SLI4_PAGE_SIZE));
16208 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16209 eq->queue_id);
16210 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16211 phba->sli4_hba.pc_sli4_params.cqav);
16212 } else {
16213 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16214 eq->queue_id);
16215 }
16216 switch (cq->entry_count) {
16217 case 2048:
16218 case 4096:
16219 if (phba->sli4_hba.pc_sli4_params.cqv ==
16220 LPFC_Q_CREATE_VERSION_2) {
16221 cq_create->u.request.context.lpfc_cq_context_count =
16222 cq->entry_count;
16223 bf_set(lpfc_cq_context_count,
16224 &cq_create->u.request.context,
16225 LPFC_CQ_CNT_WORD7);
16226 break;
16227 }
16228 fallthrough;
16229 default:
16230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16231 "0361 Unsupported CQ count: "
16232 "entry cnt %d sz %d pg cnt %d\n",
16233 cq->entry_count, cq->entry_size,
16234 cq->page_count);
16235 if (cq->entry_count < 256) {
16236 status = -EINVAL;
16237 goto out;
16238 }
16239 fallthrough; /* otherwise default to smallest count */
16240 case 256:
16241 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16242 LPFC_CQ_CNT_256);
16243 break;
16244 case 512:
16245 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16246 LPFC_CQ_CNT_512);
16247 break;
16248 case 1024:
16249 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16250 LPFC_CQ_CNT_1024);
16251 break;
16252 }
16253 list_for_each_entry(dmabuf, &cq->page_list, list) {
16254 memset(dmabuf->virt, 0, cq->page_size);
16255 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16256 putPaddrLow(dmabuf->phys);
16257 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16258 putPaddrHigh(dmabuf->phys);
16259 }
16260 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16261
16262 /* The IOCTL status is embedded in the mailbox subheader. */
16263 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16264 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16265 if (shdr_status || shdr_add_status || rc) {
16266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16267 "2501 CQ_CREATE mailbox failed with "
16268 "status x%x add_status x%x, mbx status x%x\n",
16269 shdr_status, shdr_add_status, rc);
16270 status = -ENXIO;
16271 goto out;
16272 }
16273 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16274 if (cq->queue_id == 0xFFFF) {
16275 status = -ENXIO;
16276 goto out;
16277 }
16278 /* link the cq onto the parent eq child list */
16279 list_add_tail(&cq->list, &eq->child_list);
16280 /* Set up completion queue's type and subtype */
16281 cq->type = type;
16282 cq->subtype = subtype;
16283 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16284 cq->assoc_qid = eq->queue_id;
16285 cq->assoc_qp = eq;
16286 cq->host_index = 0;
16287 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16288 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16289
16290 if (cq->queue_id > phba->sli4_hba.cq_max)
16291 phba->sli4_hba.cq_max = cq->queue_id;
16292
16293 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16294 out:
16295 mempool_free(mbox, phba->mbox_mem_pool);
16296 return status;
16297 }
16298
16299 /**
16300 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16301 * @phba: HBA structure that indicates port to create a queue on.
16302 * @cqp: The queue structure array to use to create the completion queues.
16303 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16304 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16305 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16306 *
16307 * This function creates a set of completion queue, s to support MRQ
16308 * as detailed in @cqp, on a port,
16309 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16310 *
16311 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16312 * is used to get the entry count and entry size that are necessary to
16313 * determine the number of pages to allocate and use for this queue. The @eq
16314 * is used to indicate which event queue to bind this completion queue to. This
16315 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16316 * completion queue. This function is asynchronous and will wait for the mailbox
16317 * command to finish before continuing.
16318 *
16319 * On success this function will return a zero. If unable to allocate enough
16320 * memory this function will return -ENOMEM. If the queue create mailbox command
16321 * fails this function will return -ENXIO.
16322 **/
16323 int
lpfc_cq_create_set(struct lpfc_hba * phba,struct lpfc_queue ** cqp,struct lpfc_sli4_hdw_queue * hdwq,uint32_t type,uint32_t subtype)16324 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16325 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16326 uint32_t subtype)
16327 {
16328 struct lpfc_queue *cq;
16329 struct lpfc_queue *eq;
16330 struct lpfc_mbx_cq_create_set *cq_set;
16331 struct lpfc_dmabuf *dmabuf;
16332 LPFC_MBOXQ_t *mbox;
16333 int rc, length, alloclen, status = 0;
16334 int cnt, idx, numcq, page_idx = 0;
16335 uint32_t shdr_status, shdr_add_status;
16336 union lpfc_sli4_cfg_shdr *shdr;
16337 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16338
16339 /* sanity check on queue memory */
16340 numcq = phba->cfg_nvmet_mrq;
16341 if (!cqp || !hdwq || !numcq)
16342 return -ENODEV;
16343
16344 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16345 if (!mbox)
16346 return -ENOMEM;
16347
16348 length = sizeof(struct lpfc_mbx_cq_create_set);
16349 length += ((numcq * cqp[0]->page_count) *
16350 sizeof(struct dma_address));
16351 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16352 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16353 LPFC_SLI4_MBX_NEMBED);
16354 if (alloclen < length) {
16355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16356 "3098 Allocated DMA memory size (%d) is "
16357 "less than the requested DMA memory size "
16358 "(%d)\n", alloclen, length);
16359 status = -ENOMEM;
16360 goto out;
16361 }
16362 cq_set = mbox->sge_array->addr[0];
16363 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16364 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16365
16366 for (idx = 0; idx < numcq; idx++) {
16367 cq = cqp[idx];
16368 eq = hdwq[idx].hba_eq;
16369 if (!cq || !eq) {
16370 status = -ENOMEM;
16371 goto out;
16372 }
16373 if (!phba->sli4_hba.pc_sli4_params.supported)
16374 hw_page_size = cq->page_size;
16375
16376 switch (idx) {
16377 case 0:
16378 bf_set(lpfc_mbx_cq_create_set_page_size,
16379 &cq_set->u.request,
16380 (hw_page_size / SLI4_PAGE_SIZE));
16381 bf_set(lpfc_mbx_cq_create_set_num_pages,
16382 &cq_set->u.request, cq->page_count);
16383 bf_set(lpfc_mbx_cq_create_set_evt,
16384 &cq_set->u.request, 1);
16385 bf_set(lpfc_mbx_cq_create_set_valid,
16386 &cq_set->u.request, 1);
16387 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16388 &cq_set->u.request, 0);
16389 bf_set(lpfc_mbx_cq_create_set_num_cq,
16390 &cq_set->u.request, numcq);
16391 bf_set(lpfc_mbx_cq_create_set_autovalid,
16392 &cq_set->u.request,
16393 phba->sli4_hba.pc_sli4_params.cqav);
16394 switch (cq->entry_count) {
16395 case 2048:
16396 case 4096:
16397 if (phba->sli4_hba.pc_sli4_params.cqv ==
16398 LPFC_Q_CREATE_VERSION_2) {
16399 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16400 &cq_set->u.request,
16401 cq->entry_count);
16402 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16403 &cq_set->u.request,
16404 LPFC_CQ_CNT_WORD7);
16405 break;
16406 }
16407 fallthrough;
16408 default:
16409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16410 "3118 Bad CQ count. (%d)\n",
16411 cq->entry_count);
16412 if (cq->entry_count < 256) {
16413 status = -EINVAL;
16414 goto out;
16415 }
16416 fallthrough; /* otherwise default to smallest */
16417 case 256:
16418 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16419 &cq_set->u.request, LPFC_CQ_CNT_256);
16420 break;
16421 case 512:
16422 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16423 &cq_set->u.request, LPFC_CQ_CNT_512);
16424 break;
16425 case 1024:
16426 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16427 &cq_set->u.request, LPFC_CQ_CNT_1024);
16428 break;
16429 }
16430 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16431 &cq_set->u.request, eq->queue_id);
16432 break;
16433 case 1:
16434 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16435 &cq_set->u.request, eq->queue_id);
16436 break;
16437 case 2:
16438 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16439 &cq_set->u.request, eq->queue_id);
16440 break;
16441 case 3:
16442 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16443 &cq_set->u.request, eq->queue_id);
16444 break;
16445 case 4:
16446 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16447 &cq_set->u.request, eq->queue_id);
16448 break;
16449 case 5:
16450 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16451 &cq_set->u.request, eq->queue_id);
16452 break;
16453 case 6:
16454 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16455 &cq_set->u.request, eq->queue_id);
16456 break;
16457 case 7:
16458 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16459 &cq_set->u.request, eq->queue_id);
16460 break;
16461 case 8:
16462 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16463 &cq_set->u.request, eq->queue_id);
16464 break;
16465 case 9:
16466 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16467 &cq_set->u.request, eq->queue_id);
16468 break;
16469 case 10:
16470 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16471 &cq_set->u.request, eq->queue_id);
16472 break;
16473 case 11:
16474 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16475 &cq_set->u.request, eq->queue_id);
16476 break;
16477 case 12:
16478 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16479 &cq_set->u.request, eq->queue_id);
16480 break;
16481 case 13:
16482 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16483 &cq_set->u.request, eq->queue_id);
16484 break;
16485 case 14:
16486 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16487 &cq_set->u.request, eq->queue_id);
16488 break;
16489 case 15:
16490 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16491 &cq_set->u.request, eq->queue_id);
16492 break;
16493 }
16494
16495 /* link the cq onto the parent eq child list */
16496 list_add_tail(&cq->list, &eq->child_list);
16497 /* Set up completion queue's type and subtype */
16498 cq->type = type;
16499 cq->subtype = subtype;
16500 cq->assoc_qid = eq->queue_id;
16501 cq->assoc_qp = eq;
16502 cq->host_index = 0;
16503 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16504 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16505 cq->entry_count);
16506 cq->chann = idx;
16507
16508 rc = 0;
16509 list_for_each_entry(dmabuf, &cq->page_list, list) {
16510 memset(dmabuf->virt, 0, hw_page_size);
16511 cnt = page_idx + dmabuf->buffer_tag;
16512 cq_set->u.request.page[cnt].addr_lo =
16513 putPaddrLow(dmabuf->phys);
16514 cq_set->u.request.page[cnt].addr_hi =
16515 putPaddrHigh(dmabuf->phys);
16516 rc++;
16517 }
16518 page_idx += rc;
16519 }
16520
16521 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16522
16523 /* The IOCTL status is embedded in the mailbox subheader. */
16524 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16525 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16526 if (shdr_status || shdr_add_status || rc) {
16527 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16528 "3119 CQ_CREATE_SET mailbox failed with "
16529 "status x%x add_status x%x, mbx status x%x\n",
16530 shdr_status, shdr_add_status, rc);
16531 status = -ENXIO;
16532 goto out;
16533 }
16534 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16535 if (rc == 0xFFFF) {
16536 status = -ENXIO;
16537 goto out;
16538 }
16539
16540 for (idx = 0; idx < numcq; idx++) {
16541 cq = cqp[idx];
16542 cq->queue_id = rc + idx;
16543 if (cq->queue_id > phba->sli4_hba.cq_max)
16544 phba->sli4_hba.cq_max = cq->queue_id;
16545 }
16546
16547 out:
16548 lpfc_sli4_mbox_cmd_free(phba, mbox);
16549 return status;
16550 }
16551
16552 /**
16553 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16554 * @phba: HBA structure that indicates port to create a queue on.
16555 * @mq: The queue structure to use to create the mailbox queue.
16556 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16557 * @cq: The completion queue to associate with this cq.
16558 *
16559 * This function provides failback (fb) functionality when the
16560 * mq_create_ext fails on older FW generations. It's purpose is identical
16561 * to mq_create_ext otherwise.
16562 *
16563 * This routine cannot fail as all attributes were previously accessed and
16564 * initialized in mq_create_ext.
16565 **/
16566 static void
lpfc_mq_create_fb_init(struct lpfc_hba * phba,struct lpfc_queue * mq,LPFC_MBOXQ_t * mbox,struct lpfc_queue * cq)16567 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16568 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16569 {
16570 struct lpfc_mbx_mq_create *mq_create;
16571 struct lpfc_dmabuf *dmabuf;
16572 int length;
16573
16574 length = (sizeof(struct lpfc_mbx_mq_create) -
16575 sizeof(struct lpfc_sli4_cfg_mhdr));
16576 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16577 LPFC_MBOX_OPCODE_MQ_CREATE,
16578 length, LPFC_SLI4_MBX_EMBED);
16579 mq_create = &mbox->u.mqe.un.mq_create;
16580 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16581 mq->page_count);
16582 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16583 cq->queue_id);
16584 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16585 switch (mq->entry_count) {
16586 case 16:
16587 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16588 LPFC_MQ_RING_SIZE_16);
16589 break;
16590 case 32:
16591 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16592 LPFC_MQ_RING_SIZE_32);
16593 break;
16594 case 64:
16595 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16596 LPFC_MQ_RING_SIZE_64);
16597 break;
16598 case 128:
16599 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16600 LPFC_MQ_RING_SIZE_128);
16601 break;
16602 }
16603 list_for_each_entry(dmabuf, &mq->page_list, list) {
16604 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16605 putPaddrLow(dmabuf->phys);
16606 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16607 putPaddrHigh(dmabuf->phys);
16608 }
16609 }
16610
16611 /**
16612 * lpfc_mq_create - Create a mailbox Queue on the HBA
16613 * @phba: HBA structure that indicates port to create a queue on.
16614 * @mq: The queue structure to use to create the mailbox queue.
16615 * @cq: The completion queue to associate with this cq.
16616 * @subtype: The queue's subtype.
16617 *
16618 * This function creates a mailbox queue, as detailed in @mq, on a port,
16619 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16620 *
16621 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16622 * is used to get the entry count and entry size that are necessary to
16623 * determine the number of pages to allocate and use for this queue. This
16624 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16625 * mailbox queue. This function is asynchronous and will wait for the mailbox
16626 * command to finish before continuing.
16627 *
16628 * On success this function will return a zero. If unable to allocate enough
16629 * memory this function will return -ENOMEM. If the queue create mailbox command
16630 * fails this function will return -ENXIO.
16631 **/
16632 int32_t
lpfc_mq_create(struct lpfc_hba * phba,struct lpfc_queue * mq,struct lpfc_queue * cq,uint32_t subtype)16633 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16634 struct lpfc_queue *cq, uint32_t subtype)
16635 {
16636 struct lpfc_mbx_mq_create *mq_create;
16637 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16638 struct lpfc_dmabuf *dmabuf;
16639 LPFC_MBOXQ_t *mbox;
16640 int rc, length, status = 0;
16641 uint32_t shdr_status, shdr_add_status;
16642 union lpfc_sli4_cfg_shdr *shdr;
16643 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16644
16645 /* sanity check on queue memory */
16646 if (!mq || !cq)
16647 return -ENODEV;
16648 if (!phba->sli4_hba.pc_sli4_params.supported)
16649 hw_page_size = SLI4_PAGE_SIZE;
16650
16651 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16652 if (!mbox)
16653 return -ENOMEM;
16654 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16655 sizeof(struct lpfc_sli4_cfg_mhdr));
16656 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16657 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16658 length, LPFC_SLI4_MBX_EMBED);
16659
16660 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16661 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16662 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16663 &mq_create_ext->u.request, mq->page_count);
16664 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16665 &mq_create_ext->u.request, 1);
16666 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16667 &mq_create_ext->u.request, 1);
16668 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16669 &mq_create_ext->u.request, 1);
16670 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16671 &mq_create_ext->u.request, 1);
16672 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16673 &mq_create_ext->u.request, 1);
16674 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16675 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16676 phba->sli4_hba.pc_sli4_params.mqv);
16677 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16678 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16679 cq->queue_id);
16680 else
16681 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16682 cq->queue_id);
16683 switch (mq->entry_count) {
16684 default:
16685 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16686 "0362 Unsupported MQ count. (%d)\n",
16687 mq->entry_count);
16688 if (mq->entry_count < 16) {
16689 status = -EINVAL;
16690 goto out;
16691 }
16692 fallthrough; /* otherwise default to smallest count */
16693 case 16:
16694 bf_set(lpfc_mq_context_ring_size,
16695 &mq_create_ext->u.request.context,
16696 LPFC_MQ_RING_SIZE_16);
16697 break;
16698 case 32:
16699 bf_set(lpfc_mq_context_ring_size,
16700 &mq_create_ext->u.request.context,
16701 LPFC_MQ_RING_SIZE_32);
16702 break;
16703 case 64:
16704 bf_set(lpfc_mq_context_ring_size,
16705 &mq_create_ext->u.request.context,
16706 LPFC_MQ_RING_SIZE_64);
16707 break;
16708 case 128:
16709 bf_set(lpfc_mq_context_ring_size,
16710 &mq_create_ext->u.request.context,
16711 LPFC_MQ_RING_SIZE_128);
16712 break;
16713 }
16714 list_for_each_entry(dmabuf, &mq->page_list, list) {
16715 memset(dmabuf->virt, 0, hw_page_size);
16716 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16717 putPaddrLow(dmabuf->phys);
16718 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16719 putPaddrHigh(dmabuf->phys);
16720 }
16721 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16722 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16723 &mq_create_ext->u.response);
16724 if (rc != MBX_SUCCESS) {
16725 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16726 "2795 MQ_CREATE_EXT failed with "
16727 "status x%x. Failback to MQ_CREATE.\n",
16728 rc);
16729 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16730 mq_create = &mbox->u.mqe.un.mq_create;
16731 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16732 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16733 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16734 &mq_create->u.response);
16735 }
16736
16737 /* The IOCTL status is embedded in the mailbox subheader. */
16738 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16739 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16740 if (shdr_status || shdr_add_status || rc) {
16741 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16742 "2502 MQ_CREATE mailbox failed with "
16743 "status x%x add_status x%x, mbx status x%x\n",
16744 shdr_status, shdr_add_status, rc);
16745 status = -ENXIO;
16746 goto out;
16747 }
16748 if (mq->queue_id == 0xFFFF) {
16749 status = -ENXIO;
16750 goto out;
16751 }
16752 mq->type = LPFC_MQ;
16753 mq->assoc_qid = cq->queue_id;
16754 mq->subtype = subtype;
16755 mq->host_index = 0;
16756 mq->hba_index = 0;
16757
16758 /* link the mq onto the parent cq child list */
16759 list_add_tail(&mq->list, &cq->child_list);
16760 out:
16761 mempool_free(mbox, phba->mbox_mem_pool);
16762 return status;
16763 }
16764
16765 /**
16766 * lpfc_wq_create - Create a Work Queue on the HBA
16767 * @phba: HBA structure that indicates port to create a queue on.
16768 * @wq: The queue structure to use to create the work queue.
16769 * @cq: The completion queue to bind this work queue to.
16770 * @subtype: The subtype of the work queue indicating its functionality.
16771 *
16772 * This function creates a work queue, as detailed in @wq, on a port, described
16773 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16774 *
16775 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16776 * is used to get the entry count and entry size that are necessary to
16777 * determine the number of pages to allocate and use for this queue. The @cq
16778 * is used to indicate which completion queue to bind this work queue to. This
16779 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16780 * work queue. This function is asynchronous and will wait for the mailbox
16781 * command to finish before continuing.
16782 *
16783 * On success this function will return a zero. If unable to allocate enough
16784 * memory this function will return -ENOMEM. If the queue create mailbox command
16785 * fails this function will return -ENXIO.
16786 **/
16787 int
lpfc_wq_create(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_queue * cq,uint32_t subtype)16788 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16789 struct lpfc_queue *cq, uint32_t subtype)
16790 {
16791 struct lpfc_mbx_wq_create *wq_create;
16792 struct lpfc_dmabuf *dmabuf;
16793 LPFC_MBOXQ_t *mbox;
16794 int rc, length, status = 0;
16795 uint32_t shdr_status, shdr_add_status;
16796 union lpfc_sli4_cfg_shdr *shdr;
16797 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16798 struct dma_address *page;
16799 void __iomem *bar_memmap_p;
16800 uint32_t db_offset;
16801 uint16_t pci_barset;
16802 uint8_t dpp_barset;
16803 uint32_t dpp_offset;
16804 uint8_t wq_create_version;
16805 #ifdef CONFIG_X86
16806 unsigned long pg_addr;
16807 #endif
16808
16809 /* sanity check on queue memory */
16810 if (!wq || !cq)
16811 return -ENODEV;
16812 if (!phba->sli4_hba.pc_sli4_params.supported)
16813 hw_page_size = wq->page_size;
16814
16815 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16816 if (!mbox)
16817 return -ENOMEM;
16818 length = (sizeof(struct lpfc_mbx_wq_create) -
16819 sizeof(struct lpfc_sli4_cfg_mhdr));
16820 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16821 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16822 length, LPFC_SLI4_MBX_EMBED);
16823 wq_create = &mbox->u.mqe.un.wq_create;
16824 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16825 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16826 wq->page_count);
16827 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16828 cq->queue_id);
16829
16830 /* wqv is the earliest version supported, NOT the latest */
16831 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16832 phba->sli4_hba.pc_sli4_params.wqv);
16833
16834 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16835 (wq->page_size > SLI4_PAGE_SIZE))
16836 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16837 else
16838 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16839
16840 switch (wq_create_version) {
16841 case LPFC_Q_CREATE_VERSION_1:
16842 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16843 wq->entry_count);
16844 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16845 LPFC_Q_CREATE_VERSION_1);
16846
16847 switch (wq->entry_size) {
16848 default:
16849 case 64:
16850 bf_set(lpfc_mbx_wq_create_wqe_size,
16851 &wq_create->u.request_1,
16852 LPFC_WQ_WQE_SIZE_64);
16853 break;
16854 case 128:
16855 bf_set(lpfc_mbx_wq_create_wqe_size,
16856 &wq_create->u.request_1,
16857 LPFC_WQ_WQE_SIZE_128);
16858 break;
16859 }
16860 /* Request DPP by default */
16861 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16862 bf_set(lpfc_mbx_wq_create_page_size,
16863 &wq_create->u.request_1,
16864 (wq->page_size / SLI4_PAGE_SIZE));
16865 page = wq_create->u.request_1.page;
16866 break;
16867 default:
16868 page = wq_create->u.request.page;
16869 break;
16870 }
16871
16872 list_for_each_entry(dmabuf, &wq->page_list, list) {
16873 memset(dmabuf->virt, 0, hw_page_size);
16874 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16875 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16876 }
16877
16878 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16879 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16880
16881 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16882 /* The IOCTL status is embedded in the mailbox subheader. */
16883 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16884 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16885 if (shdr_status || shdr_add_status || rc) {
16886 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16887 "2503 WQ_CREATE mailbox failed with "
16888 "status x%x add_status x%x, mbx status x%x\n",
16889 shdr_status, shdr_add_status, rc);
16890 status = -ENXIO;
16891 goto out;
16892 }
16893
16894 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16895 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16896 &wq_create->u.response);
16897 else
16898 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16899 &wq_create->u.response_1);
16900
16901 if (wq->queue_id == 0xFFFF) {
16902 status = -ENXIO;
16903 goto out;
16904 }
16905
16906 wq->db_format = LPFC_DB_LIST_FORMAT;
16907 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16908 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16909 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16910 &wq_create->u.response);
16911 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16912 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16914 "3265 WQ[%d] doorbell format "
16915 "not supported: x%x\n",
16916 wq->queue_id, wq->db_format);
16917 status = -EINVAL;
16918 goto out;
16919 }
16920 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16921 &wq_create->u.response);
16922 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16923 pci_barset);
16924 if (!bar_memmap_p) {
16925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16926 "3263 WQ[%d] failed to memmap "
16927 "pci barset:x%x\n",
16928 wq->queue_id, pci_barset);
16929 status = -ENOMEM;
16930 goto out;
16931 }
16932 db_offset = wq_create->u.response.doorbell_offset;
16933 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16934 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16936 "3252 WQ[%d] doorbell offset "
16937 "not supported: x%x\n",
16938 wq->queue_id, db_offset);
16939 status = -EINVAL;
16940 goto out;
16941 }
16942 wq->db_regaddr = bar_memmap_p + db_offset;
16943 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16944 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16945 "format:x%x\n", wq->queue_id,
16946 pci_barset, db_offset, wq->db_format);
16947 } else
16948 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16949 } else {
16950 /* Check if DPP was honored by the firmware */
16951 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16952 &wq_create->u.response_1);
16953 if (wq->dpp_enable) {
16954 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16955 &wq_create->u.response_1);
16956 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16957 pci_barset);
16958 if (!bar_memmap_p) {
16959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16960 "3267 WQ[%d] failed to memmap "
16961 "pci barset:x%x\n",
16962 wq->queue_id, pci_barset);
16963 status = -ENOMEM;
16964 goto out;
16965 }
16966 db_offset = wq_create->u.response_1.doorbell_offset;
16967 wq->db_regaddr = bar_memmap_p + db_offset;
16968 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16969 &wq_create->u.response_1);
16970 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16971 &wq_create->u.response_1);
16972 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16973 dpp_barset);
16974 if (!bar_memmap_p) {
16975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16976 "3268 WQ[%d] failed to memmap "
16977 "pci barset:x%x\n",
16978 wq->queue_id, dpp_barset);
16979 status = -ENOMEM;
16980 goto out;
16981 }
16982 dpp_offset = wq_create->u.response_1.dpp_offset;
16983 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16984 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16985 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16986 "dpp_id:x%x dpp_barset:x%x "
16987 "dpp_offset:x%x\n",
16988 wq->queue_id, pci_barset, db_offset,
16989 wq->dpp_id, dpp_barset, dpp_offset);
16990
16991 #ifdef CONFIG_X86
16992 /* Enable combined writes for DPP aperture */
16993 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16994 rc = set_memory_wc(pg_addr, 1);
16995 if (rc) {
16996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16997 "3272 Cannot setup Combined "
16998 "Write on WQ[%d] - disable DPP\n",
16999 wq->queue_id);
17000 phba->cfg_enable_dpp = 0;
17001 }
17002 #else
17003 phba->cfg_enable_dpp = 0;
17004 #endif
17005 } else
17006 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17007 }
17008 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17009 if (wq->pring == NULL) {
17010 status = -ENOMEM;
17011 goto out;
17012 }
17013 wq->type = LPFC_WQ;
17014 wq->assoc_qid = cq->queue_id;
17015 wq->subtype = subtype;
17016 wq->host_index = 0;
17017 wq->hba_index = 0;
17018 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17019
17020 /* link the wq onto the parent cq child list */
17021 list_add_tail(&wq->list, &cq->child_list);
17022 out:
17023 mempool_free(mbox, phba->mbox_mem_pool);
17024 return status;
17025 }
17026
17027 /**
17028 * lpfc_rq_create - Create a Receive Queue on the HBA
17029 * @phba: HBA structure that indicates port to create a queue on.
17030 * @hrq: The queue structure to use to create the header receive queue.
17031 * @drq: The queue structure to use to create the data receive queue.
17032 * @cq: The completion queue to bind this work queue to.
17033 * @subtype: The subtype of the work queue indicating its functionality.
17034 *
17035 * This function creates a receive buffer queue pair , as detailed in @hrq and
17036 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17037 * to the HBA.
17038 *
17039 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17040 * struct is used to get the entry count that is necessary to determine the
17041 * number of pages to use for this queue. The @cq is used to indicate which
17042 * completion queue to bind received buffers that are posted to these queues to.
17043 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17044 * receive queue pair. This function is asynchronous and will wait for the
17045 * mailbox command to finish before continuing.
17046 *
17047 * On success this function will return a zero. If unable to allocate enough
17048 * memory this function will return -ENOMEM. If the queue create mailbox command
17049 * fails this function will return -ENXIO.
17050 **/
17051 int
lpfc_rq_create(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,struct lpfc_queue * cq,uint32_t subtype)17052 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17053 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17054 {
17055 struct lpfc_mbx_rq_create *rq_create;
17056 struct lpfc_dmabuf *dmabuf;
17057 LPFC_MBOXQ_t *mbox;
17058 int rc, length, status = 0;
17059 uint32_t shdr_status, shdr_add_status;
17060 union lpfc_sli4_cfg_shdr *shdr;
17061 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17062 void __iomem *bar_memmap_p;
17063 uint32_t db_offset;
17064 uint16_t pci_barset;
17065
17066 /* sanity check on queue memory */
17067 if (!hrq || !drq || !cq)
17068 return -ENODEV;
17069 if (!phba->sli4_hba.pc_sli4_params.supported)
17070 hw_page_size = SLI4_PAGE_SIZE;
17071
17072 if (hrq->entry_count != drq->entry_count)
17073 return -EINVAL;
17074 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17075 if (!mbox)
17076 return -ENOMEM;
17077 length = (sizeof(struct lpfc_mbx_rq_create) -
17078 sizeof(struct lpfc_sli4_cfg_mhdr));
17079 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17080 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17081 length, LPFC_SLI4_MBX_EMBED);
17082 rq_create = &mbox->u.mqe.un.rq_create;
17083 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17084 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17085 phba->sli4_hba.pc_sli4_params.rqv);
17086 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17087 bf_set(lpfc_rq_context_rqe_count_1,
17088 &rq_create->u.request.context,
17089 hrq->entry_count);
17090 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17091 bf_set(lpfc_rq_context_rqe_size,
17092 &rq_create->u.request.context,
17093 LPFC_RQE_SIZE_8);
17094 bf_set(lpfc_rq_context_page_size,
17095 &rq_create->u.request.context,
17096 LPFC_RQ_PAGE_SIZE_4096);
17097 } else {
17098 switch (hrq->entry_count) {
17099 default:
17100 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17101 "2535 Unsupported RQ count. (%d)\n",
17102 hrq->entry_count);
17103 if (hrq->entry_count < 512) {
17104 status = -EINVAL;
17105 goto out;
17106 }
17107 fallthrough; /* otherwise default to smallest count */
17108 case 512:
17109 bf_set(lpfc_rq_context_rqe_count,
17110 &rq_create->u.request.context,
17111 LPFC_RQ_RING_SIZE_512);
17112 break;
17113 case 1024:
17114 bf_set(lpfc_rq_context_rqe_count,
17115 &rq_create->u.request.context,
17116 LPFC_RQ_RING_SIZE_1024);
17117 break;
17118 case 2048:
17119 bf_set(lpfc_rq_context_rqe_count,
17120 &rq_create->u.request.context,
17121 LPFC_RQ_RING_SIZE_2048);
17122 break;
17123 case 4096:
17124 bf_set(lpfc_rq_context_rqe_count,
17125 &rq_create->u.request.context,
17126 LPFC_RQ_RING_SIZE_4096);
17127 break;
17128 }
17129 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17130 LPFC_HDR_BUF_SIZE);
17131 }
17132 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17133 cq->queue_id);
17134 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17135 hrq->page_count);
17136 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17137 memset(dmabuf->virt, 0, hw_page_size);
17138 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17139 putPaddrLow(dmabuf->phys);
17140 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17141 putPaddrHigh(dmabuf->phys);
17142 }
17143 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17144 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17145
17146 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17147 /* The IOCTL status is embedded in the mailbox subheader. */
17148 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17149 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17150 if (shdr_status || shdr_add_status || rc) {
17151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17152 "2504 RQ_CREATE mailbox failed with "
17153 "status x%x add_status x%x, mbx status x%x\n",
17154 shdr_status, shdr_add_status, rc);
17155 status = -ENXIO;
17156 goto out;
17157 }
17158 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17159 if (hrq->queue_id == 0xFFFF) {
17160 status = -ENXIO;
17161 goto out;
17162 }
17163
17164 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17165 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17166 &rq_create->u.response);
17167 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17168 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17169 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17170 "3262 RQ [%d] doorbell format not "
17171 "supported: x%x\n", hrq->queue_id,
17172 hrq->db_format);
17173 status = -EINVAL;
17174 goto out;
17175 }
17176
17177 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17178 &rq_create->u.response);
17179 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17180 if (!bar_memmap_p) {
17181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17182 "3269 RQ[%d] failed to memmap pci "
17183 "barset:x%x\n", hrq->queue_id,
17184 pci_barset);
17185 status = -ENOMEM;
17186 goto out;
17187 }
17188
17189 db_offset = rq_create->u.response.doorbell_offset;
17190 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17191 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17192 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17193 "3270 RQ[%d] doorbell offset not "
17194 "supported: x%x\n", hrq->queue_id,
17195 db_offset);
17196 status = -EINVAL;
17197 goto out;
17198 }
17199 hrq->db_regaddr = bar_memmap_p + db_offset;
17200 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17201 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17202 "format:x%x\n", hrq->queue_id, pci_barset,
17203 db_offset, hrq->db_format);
17204 } else {
17205 hrq->db_format = LPFC_DB_RING_FORMAT;
17206 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17207 }
17208 hrq->type = LPFC_HRQ;
17209 hrq->assoc_qid = cq->queue_id;
17210 hrq->subtype = subtype;
17211 hrq->host_index = 0;
17212 hrq->hba_index = 0;
17213 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17214
17215 /* now create the data queue */
17216 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17217 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17218 length, LPFC_SLI4_MBX_EMBED);
17219 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17220 phba->sli4_hba.pc_sli4_params.rqv);
17221 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17222 bf_set(lpfc_rq_context_rqe_count_1,
17223 &rq_create->u.request.context, hrq->entry_count);
17224 if (subtype == LPFC_NVMET)
17225 rq_create->u.request.context.buffer_size =
17226 LPFC_NVMET_DATA_BUF_SIZE;
17227 else
17228 rq_create->u.request.context.buffer_size =
17229 LPFC_DATA_BUF_SIZE;
17230 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17231 LPFC_RQE_SIZE_8);
17232 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17233 (PAGE_SIZE/SLI4_PAGE_SIZE));
17234 } else {
17235 switch (drq->entry_count) {
17236 default:
17237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17238 "2536 Unsupported RQ count. (%d)\n",
17239 drq->entry_count);
17240 if (drq->entry_count < 512) {
17241 status = -EINVAL;
17242 goto out;
17243 }
17244 fallthrough; /* otherwise default to smallest count */
17245 case 512:
17246 bf_set(lpfc_rq_context_rqe_count,
17247 &rq_create->u.request.context,
17248 LPFC_RQ_RING_SIZE_512);
17249 break;
17250 case 1024:
17251 bf_set(lpfc_rq_context_rqe_count,
17252 &rq_create->u.request.context,
17253 LPFC_RQ_RING_SIZE_1024);
17254 break;
17255 case 2048:
17256 bf_set(lpfc_rq_context_rqe_count,
17257 &rq_create->u.request.context,
17258 LPFC_RQ_RING_SIZE_2048);
17259 break;
17260 case 4096:
17261 bf_set(lpfc_rq_context_rqe_count,
17262 &rq_create->u.request.context,
17263 LPFC_RQ_RING_SIZE_4096);
17264 break;
17265 }
17266 if (subtype == LPFC_NVMET)
17267 bf_set(lpfc_rq_context_buf_size,
17268 &rq_create->u.request.context,
17269 LPFC_NVMET_DATA_BUF_SIZE);
17270 else
17271 bf_set(lpfc_rq_context_buf_size,
17272 &rq_create->u.request.context,
17273 LPFC_DATA_BUF_SIZE);
17274 }
17275 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17276 cq->queue_id);
17277 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17278 drq->page_count);
17279 list_for_each_entry(dmabuf, &drq->page_list, list) {
17280 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17281 putPaddrLow(dmabuf->phys);
17282 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17283 putPaddrHigh(dmabuf->phys);
17284 }
17285 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17286 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17288 /* The IOCTL status is embedded in the mailbox subheader. */
17289 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17290 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17291 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17292 if (shdr_status || shdr_add_status || rc) {
17293 status = -ENXIO;
17294 goto out;
17295 }
17296 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17297 if (drq->queue_id == 0xFFFF) {
17298 status = -ENXIO;
17299 goto out;
17300 }
17301 drq->type = LPFC_DRQ;
17302 drq->assoc_qid = cq->queue_id;
17303 drq->subtype = subtype;
17304 drq->host_index = 0;
17305 drq->hba_index = 0;
17306 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17307
17308 /* link the header and data RQs onto the parent cq child list */
17309 list_add_tail(&hrq->list, &cq->child_list);
17310 list_add_tail(&drq->list, &cq->child_list);
17311
17312 out:
17313 mempool_free(mbox, phba->mbox_mem_pool);
17314 return status;
17315 }
17316
17317 /**
17318 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17319 * @phba: HBA structure that indicates port to create a queue on.
17320 * @hrqp: The queue structure array to use to create the header receive queues.
17321 * @drqp: The queue structure array to use to create the data receive queues.
17322 * @cqp: The completion queue array to bind these receive queues to.
17323 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17324 *
17325 * This function creates a receive buffer queue pair , as detailed in @hrq and
17326 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17327 * to the HBA.
17328 *
17329 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17330 * struct is used to get the entry count that is necessary to determine the
17331 * number of pages to use for this queue. The @cq is used to indicate which
17332 * completion queue to bind received buffers that are posted to these queues to.
17333 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17334 * receive queue pair. This function is asynchronous and will wait for the
17335 * mailbox command to finish before continuing.
17336 *
17337 * On success this function will return a zero. If unable to allocate enough
17338 * memory this function will return -ENOMEM. If the queue create mailbox command
17339 * fails this function will return -ENXIO.
17340 **/
17341 int
lpfc_mrq_create(struct lpfc_hba * phba,struct lpfc_queue ** hrqp,struct lpfc_queue ** drqp,struct lpfc_queue ** cqp,uint32_t subtype)17342 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17343 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17344 uint32_t subtype)
17345 {
17346 struct lpfc_queue *hrq, *drq, *cq;
17347 struct lpfc_mbx_rq_create_v2 *rq_create;
17348 struct lpfc_dmabuf *dmabuf;
17349 LPFC_MBOXQ_t *mbox;
17350 int rc, length, alloclen, status = 0;
17351 int cnt, idx, numrq, page_idx = 0;
17352 uint32_t shdr_status, shdr_add_status;
17353 union lpfc_sli4_cfg_shdr *shdr;
17354 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17355
17356 numrq = phba->cfg_nvmet_mrq;
17357 /* sanity check on array memory */
17358 if (!hrqp || !drqp || !cqp || !numrq)
17359 return -ENODEV;
17360 if (!phba->sli4_hba.pc_sli4_params.supported)
17361 hw_page_size = SLI4_PAGE_SIZE;
17362
17363 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17364 if (!mbox)
17365 return -ENOMEM;
17366
17367 length = sizeof(struct lpfc_mbx_rq_create_v2);
17368 length += ((2 * numrq * hrqp[0]->page_count) *
17369 sizeof(struct dma_address));
17370
17371 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17372 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17373 LPFC_SLI4_MBX_NEMBED);
17374 if (alloclen < length) {
17375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17376 "3099 Allocated DMA memory size (%d) is "
17377 "less than the requested DMA memory size "
17378 "(%d)\n", alloclen, length);
17379 status = -ENOMEM;
17380 goto out;
17381 }
17382
17383
17384
17385 rq_create = mbox->sge_array->addr[0];
17386 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17387
17388 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17389 cnt = 0;
17390
17391 for (idx = 0; idx < numrq; idx++) {
17392 hrq = hrqp[idx];
17393 drq = drqp[idx];
17394 cq = cqp[idx];
17395
17396 /* sanity check on queue memory */
17397 if (!hrq || !drq || !cq) {
17398 status = -ENODEV;
17399 goto out;
17400 }
17401
17402 if (hrq->entry_count != drq->entry_count) {
17403 status = -EINVAL;
17404 goto out;
17405 }
17406
17407 if (idx == 0) {
17408 bf_set(lpfc_mbx_rq_create_num_pages,
17409 &rq_create->u.request,
17410 hrq->page_count);
17411 bf_set(lpfc_mbx_rq_create_rq_cnt,
17412 &rq_create->u.request, (numrq * 2));
17413 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17414 1);
17415 bf_set(lpfc_rq_context_base_cq,
17416 &rq_create->u.request.context,
17417 cq->queue_id);
17418 bf_set(lpfc_rq_context_data_size,
17419 &rq_create->u.request.context,
17420 LPFC_NVMET_DATA_BUF_SIZE);
17421 bf_set(lpfc_rq_context_hdr_size,
17422 &rq_create->u.request.context,
17423 LPFC_HDR_BUF_SIZE);
17424 bf_set(lpfc_rq_context_rqe_count_1,
17425 &rq_create->u.request.context,
17426 hrq->entry_count);
17427 bf_set(lpfc_rq_context_rqe_size,
17428 &rq_create->u.request.context,
17429 LPFC_RQE_SIZE_8);
17430 bf_set(lpfc_rq_context_page_size,
17431 &rq_create->u.request.context,
17432 (PAGE_SIZE/SLI4_PAGE_SIZE));
17433 }
17434 rc = 0;
17435 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17436 memset(dmabuf->virt, 0, hw_page_size);
17437 cnt = page_idx + dmabuf->buffer_tag;
17438 rq_create->u.request.page[cnt].addr_lo =
17439 putPaddrLow(dmabuf->phys);
17440 rq_create->u.request.page[cnt].addr_hi =
17441 putPaddrHigh(dmabuf->phys);
17442 rc++;
17443 }
17444 page_idx += rc;
17445
17446 rc = 0;
17447 list_for_each_entry(dmabuf, &drq->page_list, list) {
17448 memset(dmabuf->virt, 0, hw_page_size);
17449 cnt = page_idx + dmabuf->buffer_tag;
17450 rq_create->u.request.page[cnt].addr_lo =
17451 putPaddrLow(dmabuf->phys);
17452 rq_create->u.request.page[cnt].addr_hi =
17453 putPaddrHigh(dmabuf->phys);
17454 rc++;
17455 }
17456 page_idx += rc;
17457
17458 hrq->db_format = LPFC_DB_RING_FORMAT;
17459 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17460 hrq->type = LPFC_HRQ;
17461 hrq->assoc_qid = cq->queue_id;
17462 hrq->subtype = subtype;
17463 hrq->host_index = 0;
17464 hrq->hba_index = 0;
17465 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17466
17467 drq->db_format = LPFC_DB_RING_FORMAT;
17468 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17469 drq->type = LPFC_DRQ;
17470 drq->assoc_qid = cq->queue_id;
17471 drq->subtype = subtype;
17472 drq->host_index = 0;
17473 drq->hba_index = 0;
17474 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17475
17476 list_add_tail(&hrq->list, &cq->child_list);
17477 list_add_tail(&drq->list, &cq->child_list);
17478 }
17479
17480 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17481 /* The IOCTL status is embedded in the mailbox subheader. */
17482 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17483 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17484 if (shdr_status || shdr_add_status || rc) {
17485 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17486 "3120 RQ_CREATE mailbox failed with "
17487 "status x%x add_status x%x, mbx status x%x\n",
17488 shdr_status, shdr_add_status, rc);
17489 status = -ENXIO;
17490 goto out;
17491 }
17492 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17493 if (rc == 0xFFFF) {
17494 status = -ENXIO;
17495 goto out;
17496 }
17497
17498 /* Initialize all RQs with associated queue id */
17499 for (idx = 0; idx < numrq; idx++) {
17500 hrq = hrqp[idx];
17501 hrq->queue_id = rc + (2 * idx);
17502 drq = drqp[idx];
17503 drq->queue_id = rc + (2 * idx) + 1;
17504 }
17505
17506 out:
17507 lpfc_sli4_mbox_cmd_free(phba, mbox);
17508 return status;
17509 }
17510
17511 /**
17512 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17513 * @phba: HBA structure that indicates port to destroy a queue on.
17514 * @eq: The queue structure associated with the queue to destroy.
17515 *
17516 * This function destroys a queue, as detailed in @eq by sending an mailbox
17517 * command, specific to the type of queue, to the HBA.
17518 *
17519 * The @eq struct is used to get the queue ID of the queue to destroy.
17520 *
17521 * On success this function will return a zero. If the queue destroy mailbox
17522 * command fails this function will return -ENXIO.
17523 **/
17524 int
lpfc_eq_destroy(struct lpfc_hba * phba,struct lpfc_queue * eq)17525 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17526 {
17527 LPFC_MBOXQ_t *mbox;
17528 int rc, length, status = 0;
17529 uint32_t shdr_status, shdr_add_status;
17530 union lpfc_sli4_cfg_shdr *shdr;
17531
17532 /* sanity check on queue memory */
17533 if (!eq)
17534 return -ENODEV;
17535
17536 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17537 if (!mbox)
17538 return -ENOMEM;
17539 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17540 sizeof(struct lpfc_sli4_cfg_mhdr));
17541 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17542 LPFC_MBOX_OPCODE_EQ_DESTROY,
17543 length, LPFC_SLI4_MBX_EMBED);
17544 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17545 eq->queue_id);
17546 mbox->vport = eq->phba->pport;
17547 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17548
17549 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17550 /* The IOCTL status is embedded in the mailbox subheader. */
17551 shdr = (union lpfc_sli4_cfg_shdr *)
17552 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17553 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17554 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17555 if (shdr_status || shdr_add_status || rc) {
17556 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17557 "2505 EQ_DESTROY mailbox failed with "
17558 "status x%x add_status x%x, mbx status x%x\n",
17559 shdr_status, shdr_add_status, rc);
17560 status = -ENXIO;
17561 }
17562
17563 /* Remove eq from any list */
17564 list_del_init(&eq->list);
17565 mempool_free(mbox, eq->phba->mbox_mem_pool);
17566 return status;
17567 }
17568
17569 /**
17570 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17571 * @phba: HBA structure that indicates port to destroy a queue on.
17572 * @cq: The queue structure associated with the queue to destroy.
17573 *
17574 * This function destroys a queue, as detailed in @cq by sending an mailbox
17575 * command, specific to the type of queue, to the HBA.
17576 *
17577 * The @cq struct is used to get the queue ID of the queue to destroy.
17578 *
17579 * On success this function will return a zero. If the queue destroy mailbox
17580 * command fails this function will return -ENXIO.
17581 **/
17582 int
lpfc_cq_destroy(struct lpfc_hba * phba,struct lpfc_queue * cq)17583 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17584 {
17585 LPFC_MBOXQ_t *mbox;
17586 int rc, length, status = 0;
17587 uint32_t shdr_status, shdr_add_status;
17588 union lpfc_sli4_cfg_shdr *shdr;
17589
17590 /* sanity check on queue memory */
17591 if (!cq)
17592 return -ENODEV;
17593 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17594 if (!mbox)
17595 return -ENOMEM;
17596 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17597 sizeof(struct lpfc_sli4_cfg_mhdr));
17598 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17599 LPFC_MBOX_OPCODE_CQ_DESTROY,
17600 length, LPFC_SLI4_MBX_EMBED);
17601 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17602 cq->queue_id);
17603 mbox->vport = cq->phba->pport;
17604 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17605 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17606 /* The IOCTL status is embedded in the mailbox subheader. */
17607 shdr = (union lpfc_sli4_cfg_shdr *)
17608 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17609 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17610 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17611 if (shdr_status || shdr_add_status || rc) {
17612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17613 "2506 CQ_DESTROY mailbox failed with "
17614 "status x%x add_status x%x, mbx status x%x\n",
17615 shdr_status, shdr_add_status, rc);
17616 status = -ENXIO;
17617 }
17618 /* Remove cq from any list */
17619 list_del_init(&cq->list);
17620 mempool_free(mbox, cq->phba->mbox_mem_pool);
17621 return status;
17622 }
17623
17624 /**
17625 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17626 * @phba: HBA structure that indicates port to destroy a queue on.
17627 * @mq: The queue structure associated with the queue to destroy.
17628 *
17629 * This function destroys a queue, as detailed in @mq by sending an mailbox
17630 * command, specific to the type of queue, to the HBA.
17631 *
17632 * The @mq struct is used to get the queue ID of the queue to destroy.
17633 *
17634 * On success this function will return a zero. If the queue destroy mailbox
17635 * command fails this function will return -ENXIO.
17636 **/
17637 int
lpfc_mq_destroy(struct lpfc_hba * phba,struct lpfc_queue * mq)17638 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17639 {
17640 LPFC_MBOXQ_t *mbox;
17641 int rc, length, status = 0;
17642 uint32_t shdr_status, shdr_add_status;
17643 union lpfc_sli4_cfg_shdr *shdr;
17644
17645 /* sanity check on queue memory */
17646 if (!mq)
17647 return -ENODEV;
17648 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17649 if (!mbox)
17650 return -ENOMEM;
17651 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17652 sizeof(struct lpfc_sli4_cfg_mhdr));
17653 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17654 LPFC_MBOX_OPCODE_MQ_DESTROY,
17655 length, LPFC_SLI4_MBX_EMBED);
17656 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17657 mq->queue_id);
17658 mbox->vport = mq->phba->pport;
17659 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17660 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17661 /* The IOCTL status is embedded in the mailbox subheader. */
17662 shdr = (union lpfc_sli4_cfg_shdr *)
17663 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17664 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17665 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17666 if (shdr_status || shdr_add_status || rc) {
17667 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17668 "2507 MQ_DESTROY mailbox failed with "
17669 "status x%x add_status x%x, mbx status x%x\n",
17670 shdr_status, shdr_add_status, rc);
17671 status = -ENXIO;
17672 }
17673 /* Remove mq from any list */
17674 list_del_init(&mq->list);
17675 mempool_free(mbox, mq->phba->mbox_mem_pool);
17676 return status;
17677 }
17678
17679 /**
17680 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17681 * @phba: HBA structure that indicates port to destroy a queue on.
17682 * @wq: The queue structure associated with the queue to destroy.
17683 *
17684 * This function destroys a queue, as detailed in @wq by sending an mailbox
17685 * command, specific to the type of queue, to the HBA.
17686 *
17687 * The @wq struct is used to get the queue ID of the queue to destroy.
17688 *
17689 * On success this function will return a zero. If the queue destroy mailbox
17690 * command fails this function will return -ENXIO.
17691 **/
17692 int
lpfc_wq_destroy(struct lpfc_hba * phba,struct lpfc_queue * wq)17693 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17694 {
17695 LPFC_MBOXQ_t *mbox;
17696 int rc, length, status = 0;
17697 uint32_t shdr_status, shdr_add_status;
17698 union lpfc_sli4_cfg_shdr *shdr;
17699
17700 /* sanity check on queue memory */
17701 if (!wq)
17702 return -ENODEV;
17703 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17704 if (!mbox)
17705 return -ENOMEM;
17706 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17707 sizeof(struct lpfc_sli4_cfg_mhdr));
17708 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17709 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17710 length, LPFC_SLI4_MBX_EMBED);
17711 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17712 wq->queue_id);
17713 mbox->vport = wq->phba->pport;
17714 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17715 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17716 shdr = (union lpfc_sli4_cfg_shdr *)
17717 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17718 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17719 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17720 if (shdr_status || shdr_add_status || rc) {
17721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17722 "2508 WQ_DESTROY mailbox failed with "
17723 "status x%x add_status x%x, mbx status x%x\n",
17724 shdr_status, shdr_add_status, rc);
17725 status = -ENXIO;
17726 }
17727 /* Remove wq from any list */
17728 list_del_init(&wq->list);
17729 kfree(wq->pring);
17730 wq->pring = NULL;
17731 mempool_free(mbox, wq->phba->mbox_mem_pool);
17732 return status;
17733 }
17734
17735 /**
17736 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17737 * @phba: HBA structure that indicates port to destroy a queue on.
17738 * @hrq: The queue structure associated with the queue to destroy.
17739 * @drq: The queue structure associated with the queue to destroy.
17740 *
17741 * This function destroys a queue, as detailed in @rq by sending an mailbox
17742 * command, specific to the type of queue, to the HBA.
17743 *
17744 * The @rq struct is used to get the queue ID of the queue to destroy.
17745 *
17746 * On success this function will return a zero. If the queue destroy mailbox
17747 * command fails this function will return -ENXIO.
17748 **/
17749 int
lpfc_rq_destroy(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq)17750 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17751 struct lpfc_queue *drq)
17752 {
17753 LPFC_MBOXQ_t *mbox;
17754 int rc, length, status = 0;
17755 uint32_t shdr_status, shdr_add_status;
17756 union lpfc_sli4_cfg_shdr *shdr;
17757
17758 /* sanity check on queue memory */
17759 if (!hrq || !drq)
17760 return -ENODEV;
17761 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17762 if (!mbox)
17763 return -ENOMEM;
17764 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17765 sizeof(struct lpfc_sli4_cfg_mhdr));
17766 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17767 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17768 length, LPFC_SLI4_MBX_EMBED);
17769 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17770 hrq->queue_id);
17771 mbox->vport = hrq->phba->pport;
17772 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17773 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17774 /* The IOCTL status is embedded in the mailbox subheader. */
17775 shdr = (union lpfc_sli4_cfg_shdr *)
17776 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17777 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17778 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17779 if (shdr_status || shdr_add_status || rc) {
17780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17781 "2509 RQ_DESTROY mailbox failed with "
17782 "status x%x add_status x%x, mbx status x%x\n",
17783 shdr_status, shdr_add_status, rc);
17784 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17785 return -ENXIO;
17786 }
17787 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17788 drq->queue_id);
17789 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17790 shdr = (union lpfc_sli4_cfg_shdr *)
17791 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17792 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17793 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17794 if (shdr_status || shdr_add_status || rc) {
17795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17796 "2510 RQ_DESTROY mailbox failed with "
17797 "status x%x add_status x%x, mbx status x%x\n",
17798 shdr_status, shdr_add_status, rc);
17799 status = -ENXIO;
17800 }
17801 list_del_init(&hrq->list);
17802 list_del_init(&drq->list);
17803 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17804 return status;
17805 }
17806
17807 /**
17808 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17809 * @phba: The virtual port for which this call being executed.
17810 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17811 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17812 * @xritag: the xritag that ties this io to the SGL pages.
17813 *
17814 * This routine will post the sgl pages for the IO that has the xritag
17815 * that is in the iocbq structure. The xritag is assigned during iocbq
17816 * creation and persists for as long as the driver is loaded.
17817 * if the caller has fewer than 256 scatter gather segments to map then
17818 * pdma_phys_addr1 should be 0.
17819 * If the caller needs to map more than 256 scatter gather segment then
17820 * pdma_phys_addr1 should be a valid physical address.
17821 * physical address for SGLs must be 64 byte aligned.
17822 * If you are going to map 2 SGL's then the first one must have 256 entries
17823 * the second sgl can have between 1 and 256 entries.
17824 *
17825 * Return codes:
17826 * 0 - Success
17827 * -ENXIO, -ENOMEM - Failure
17828 **/
17829 int
lpfc_sli4_post_sgl(struct lpfc_hba * phba,dma_addr_t pdma_phys_addr0,dma_addr_t pdma_phys_addr1,uint16_t xritag)17830 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17831 dma_addr_t pdma_phys_addr0,
17832 dma_addr_t pdma_phys_addr1,
17833 uint16_t xritag)
17834 {
17835 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17836 LPFC_MBOXQ_t *mbox;
17837 int rc;
17838 uint32_t shdr_status, shdr_add_status;
17839 uint32_t mbox_tmo;
17840 union lpfc_sli4_cfg_shdr *shdr;
17841
17842 if (xritag == NO_XRI) {
17843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17844 "0364 Invalid param:\n");
17845 return -EINVAL;
17846 }
17847
17848 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17849 if (!mbox)
17850 return -ENOMEM;
17851
17852 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17853 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17854 sizeof(struct lpfc_mbx_post_sgl_pages) -
17855 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17856
17857 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17858 &mbox->u.mqe.un.post_sgl_pages;
17859 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17860 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17861
17862 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17863 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17864 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17865 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17866
17867 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17868 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17869 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17870 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17871 if (!phba->sli4_hba.intr_enable)
17872 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17873 else {
17874 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17875 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17876 }
17877 /* The IOCTL status is embedded in the mailbox subheader. */
17878 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17879 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17880 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17881 if (!phba->sli4_hba.intr_enable)
17882 mempool_free(mbox, phba->mbox_mem_pool);
17883 else if (rc != MBX_TIMEOUT)
17884 mempool_free(mbox, phba->mbox_mem_pool);
17885 if (shdr_status || shdr_add_status || rc) {
17886 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17887 "2511 POST_SGL mailbox failed with "
17888 "status x%x add_status x%x, mbx status x%x\n",
17889 shdr_status, shdr_add_status, rc);
17890 }
17891 return 0;
17892 }
17893
17894 /**
17895 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17896 * @phba: pointer to lpfc hba data structure.
17897 *
17898 * This routine is invoked to post rpi header templates to the
17899 * HBA consistent with the SLI-4 interface spec. This routine
17900 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17901 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17902 *
17903 * Returns
17904 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17905 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17906 **/
17907 static uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba * phba)17908 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17909 {
17910 unsigned long xri;
17911
17912 /*
17913 * Fetch the next logical xri. Because this index is logical,
17914 * the driver starts at 0 each time.
17915 */
17916 spin_lock_irq(&phba->hbalock);
17917 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17918 phba->sli4_hba.max_cfg_param.max_xri);
17919 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17920 spin_unlock_irq(&phba->hbalock);
17921 return NO_XRI;
17922 } else {
17923 set_bit(xri, phba->sli4_hba.xri_bmask);
17924 phba->sli4_hba.max_cfg_param.xri_used++;
17925 }
17926 spin_unlock_irq(&phba->hbalock);
17927 return xri;
17928 }
17929
17930 /**
17931 * __lpfc_sli4_free_xri - Release an xri for reuse.
17932 * @phba: pointer to lpfc hba data structure.
17933 * @xri: xri to release.
17934 *
17935 * This routine is invoked to release an xri to the pool of
17936 * available rpis maintained by the driver.
17937 **/
17938 static void
__lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)17939 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17940 {
17941 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17942 phba->sli4_hba.max_cfg_param.xri_used--;
17943 }
17944 }
17945
17946 /**
17947 * lpfc_sli4_free_xri - Release an xri for reuse.
17948 * @phba: pointer to lpfc hba data structure.
17949 * @xri: xri to release.
17950 *
17951 * This routine is invoked to release an xri to the pool of
17952 * available rpis maintained by the driver.
17953 **/
17954 void
lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)17955 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17956 {
17957 spin_lock_irq(&phba->hbalock);
17958 __lpfc_sli4_free_xri(phba, xri);
17959 spin_unlock_irq(&phba->hbalock);
17960 }
17961
17962 /**
17963 * lpfc_sli4_next_xritag - Get an xritag for the io
17964 * @phba: Pointer to HBA context object.
17965 *
17966 * This function gets an xritag for the iocb. If there is no unused xritag
17967 * it will return 0xffff.
17968 * The function returns the allocated xritag if successful, else returns zero.
17969 * Zero is not a valid xritag.
17970 * The caller is not required to hold any lock.
17971 **/
17972 uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba * phba)17973 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17974 {
17975 uint16_t xri_index;
17976
17977 xri_index = lpfc_sli4_alloc_xri(phba);
17978 if (xri_index == NO_XRI)
17979 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17980 "2004 Failed to allocate XRI.last XRITAG is %d"
17981 " Max XRI is %d, Used XRI is %d\n",
17982 xri_index,
17983 phba->sli4_hba.max_cfg_param.max_xri,
17984 phba->sli4_hba.max_cfg_param.xri_used);
17985 return xri_index;
17986 }
17987
17988 /**
17989 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17990 * @phba: pointer to lpfc hba data structure.
17991 * @post_sgl_list: pointer to els sgl entry list.
17992 * @post_cnt: number of els sgl entries on the list.
17993 *
17994 * This routine is invoked to post a block of driver's sgl pages to the
17995 * HBA using non-embedded mailbox command. No Lock is held. This routine
17996 * is only called when the driver is loading and after all IO has been
17997 * stopped.
17998 **/
17999 static int
lpfc_sli4_post_sgl_list(struct lpfc_hba * phba,struct list_head * post_sgl_list,int post_cnt)18000 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18001 struct list_head *post_sgl_list,
18002 int post_cnt)
18003 {
18004 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18005 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18006 struct sgl_page_pairs *sgl_pg_pairs;
18007 void *viraddr;
18008 LPFC_MBOXQ_t *mbox;
18009 uint32_t reqlen, alloclen, pg_pairs;
18010 uint32_t mbox_tmo;
18011 uint16_t xritag_start = 0;
18012 int rc = 0;
18013 uint32_t shdr_status, shdr_add_status;
18014 union lpfc_sli4_cfg_shdr *shdr;
18015
18016 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18017 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18018 if (reqlen > SLI4_PAGE_SIZE) {
18019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18020 "2559 Block sgl registration required DMA "
18021 "size (%d) great than a page\n", reqlen);
18022 return -ENOMEM;
18023 }
18024
18025 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18026 if (!mbox)
18027 return -ENOMEM;
18028
18029 /* Allocate DMA memory and set up the non-embedded mailbox command */
18030 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18031 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18032 LPFC_SLI4_MBX_NEMBED);
18033
18034 if (alloclen < reqlen) {
18035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18036 "0285 Allocated DMA memory size (%d) is "
18037 "less than the requested DMA memory "
18038 "size (%d)\n", alloclen, reqlen);
18039 lpfc_sli4_mbox_cmd_free(phba, mbox);
18040 return -ENOMEM;
18041 }
18042 /* Set up the SGL pages in the non-embedded DMA pages */
18043 viraddr = mbox->sge_array->addr[0];
18044 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18045 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18046
18047 pg_pairs = 0;
18048 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18049 /* Set up the sge entry */
18050 sgl_pg_pairs->sgl_pg0_addr_lo =
18051 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18052 sgl_pg_pairs->sgl_pg0_addr_hi =
18053 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18054 sgl_pg_pairs->sgl_pg1_addr_lo =
18055 cpu_to_le32(putPaddrLow(0));
18056 sgl_pg_pairs->sgl_pg1_addr_hi =
18057 cpu_to_le32(putPaddrHigh(0));
18058
18059 /* Keep the first xritag on the list */
18060 if (pg_pairs == 0)
18061 xritag_start = sglq_entry->sli4_xritag;
18062 sgl_pg_pairs++;
18063 pg_pairs++;
18064 }
18065
18066 /* Complete initialization and perform endian conversion. */
18067 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18068 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18069 sgl->word0 = cpu_to_le32(sgl->word0);
18070
18071 if (!phba->sli4_hba.intr_enable)
18072 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18073 else {
18074 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18075 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18076 }
18077 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18078 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18079 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18080 if (!phba->sli4_hba.intr_enable)
18081 lpfc_sli4_mbox_cmd_free(phba, mbox);
18082 else if (rc != MBX_TIMEOUT)
18083 lpfc_sli4_mbox_cmd_free(phba, mbox);
18084 if (shdr_status || shdr_add_status || rc) {
18085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18086 "2513 POST_SGL_BLOCK mailbox command failed "
18087 "status x%x add_status x%x mbx status x%x\n",
18088 shdr_status, shdr_add_status, rc);
18089 rc = -ENXIO;
18090 }
18091 return rc;
18092 }
18093
18094 /**
18095 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18096 * @phba: pointer to lpfc hba data structure.
18097 * @nblist: pointer to nvme buffer list.
18098 * @count: number of scsi buffers on the list.
18099 *
18100 * This routine is invoked to post a block of @count scsi sgl pages from a
18101 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18102 * No Lock is held.
18103 *
18104 **/
18105 static int
lpfc_sli4_post_io_sgl_block(struct lpfc_hba * phba,struct list_head * nblist,int count)18106 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18107 int count)
18108 {
18109 struct lpfc_io_buf *lpfc_ncmd;
18110 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18111 struct sgl_page_pairs *sgl_pg_pairs;
18112 void *viraddr;
18113 LPFC_MBOXQ_t *mbox;
18114 uint32_t reqlen, alloclen, pg_pairs;
18115 uint32_t mbox_tmo;
18116 uint16_t xritag_start = 0;
18117 int rc = 0;
18118 uint32_t shdr_status, shdr_add_status;
18119 dma_addr_t pdma_phys_bpl1;
18120 union lpfc_sli4_cfg_shdr *shdr;
18121
18122 /* Calculate the requested length of the dma memory */
18123 reqlen = count * sizeof(struct sgl_page_pairs) +
18124 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18125 if (reqlen > SLI4_PAGE_SIZE) {
18126 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18127 "6118 Block sgl registration required DMA "
18128 "size (%d) great than a page\n", reqlen);
18129 return -ENOMEM;
18130 }
18131 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18132 if (!mbox) {
18133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18134 "6119 Failed to allocate mbox cmd memory\n");
18135 return -ENOMEM;
18136 }
18137
18138 /* Allocate DMA memory and set up the non-embedded mailbox command */
18139 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18140 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18141 reqlen, LPFC_SLI4_MBX_NEMBED);
18142
18143 if (alloclen < reqlen) {
18144 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18145 "6120 Allocated DMA memory size (%d) is "
18146 "less than the requested DMA memory "
18147 "size (%d)\n", alloclen, reqlen);
18148 lpfc_sli4_mbox_cmd_free(phba, mbox);
18149 return -ENOMEM;
18150 }
18151
18152 /* Get the first SGE entry from the non-embedded DMA memory */
18153 viraddr = mbox->sge_array->addr[0];
18154
18155 /* Set up the SGL pages in the non-embedded DMA pages */
18156 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18157 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18158
18159 pg_pairs = 0;
18160 list_for_each_entry(lpfc_ncmd, nblist, list) {
18161 /* Set up the sge entry */
18162 sgl_pg_pairs->sgl_pg0_addr_lo =
18163 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18164 sgl_pg_pairs->sgl_pg0_addr_hi =
18165 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18166 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18167 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18168 SGL_PAGE_SIZE;
18169 else
18170 pdma_phys_bpl1 = 0;
18171 sgl_pg_pairs->sgl_pg1_addr_lo =
18172 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18173 sgl_pg_pairs->sgl_pg1_addr_hi =
18174 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18175 /* Keep the first xritag on the list */
18176 if (pg_pairs == 0)
18177 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18178 sgl_pg_pairs++;
18179 pg_pairs++;
18180 }
18181 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18182 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18183 /* Perform endian conversion if necessary */
18184 sgl->word0 = cpu_to_le32(sgl->word0);
18185
18186 if (!phba->sli4_hba.intr_enable) {
18187 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18188 } else {
18189 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18190 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18191 }
18192 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18193 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18194 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18195 if (!phba->sli4_hba.intr_enable)
18196 lpfc_sli4_mbox_cmd_free(phba, mbox);
18197 else if (rc != MBX_TIMEOUT)
18198 lpfc_sli4_mbox_cmd_free(phba, mbox);
18199 if (shdr_status || shdr_add_status || rc) {
18200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18201 "6125 POST_SGL_BLOCK mailbox command failed "
18202 "status x%x add_status x%x mbx status x%x\n",
18203 shdr_status, shdr_add_status, rc);
18204 rc = -ENXIO;
18205 }
18206 return rc;
18207 }
18208
18209 /**
18210 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18211 * @phba: pointer to lpfc hba data structure.
18212 * @post_nblist: pointer to the nvme buffer list.
18213 * @sb_count: number of nvme buffers.
18214 *
18215 * This routine walks a list of nvme buffers that was passed in. It attempts
18216 * to construct blocks of nvme buffer sgls which contains contiguous xris and
18217 * uses the non-embedded SGL block post mailbox commands to post to the port.
18218 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18219 * embedded SGL post mailbox command for posting. The @post_nblist passed in
18220 * must be local list, thus no lock is needed when manipulate the list.
18221 *
18222 * Returns: 0 = failure, non-zero number of successfully posted buffers.
18223 **/
18224 int
lpfc_sli4_post_io_sgl_list(struct lpfc_hba * phba,struct list_head * post_nblist,int sb_count)18225 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18226 struct list_head *post_nblist, int sb_count)
18227 {
18228 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18229 int status, sgl_size;
18230 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18231 dma_addr_t pdma_phys_sgl1;
18232 int last_xritag = NO_XRI;
18233 int cur_xritag;
18234 LIST_HEAD(prep_nblist);
18235 LIST_HEAD(blck_nblist);
18236 LIST_HEAD(nvme_nblist);
18237
18238 /* sanity check */
18239 if (sb_count <= 0)
18240 return -EINVAL;
18241
18242 sgl_size = phba->cfg_sg_dma_buf_size;
18243 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18244 list_del_init(&lpfc_ncmd->list);
18245 block_cnt++;
18246 if ((last_xritag != NO_XRI) &&
18247 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18248 /* a hole in xri block, form a sgl posting block */
18249 list_splice_init(&prep_nblist, &blck_nblist);
18250 post_cnt = block_cnt - 1;
18251 /* prepare list for next posting block */
18252 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18253 block_cnt = 1;
18254 } else {
18255 /* prepare list for next posting block */
18256 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18257 /* enough sgls for non-embed sgl mbox command */
18258 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18259 list_splice_init(&prep_nblist, &blck_nblist);
18260 post_cnt = block_cnt;
18261 block_cnt = 0;
18262 }
18263 }
18264 num_posting++;
18265 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18266
18267 /* end of repost sgl list condition for NVME buffers */
18268 if (num_posting == sb_count) {
18269 if (post_cnt == 0) {
18270 /* last sgl posting block */
18271 list_splice_init(&prep_nblist, &blck_nblist);
18272 post_cnt = block_cnt;
18273 } else if (block_cnt == 1) {
18274 /* last single sgl with non-contiguous xri */
18275 if (sgl_size > SGL_PAGE_SIZE)
18276 pdma_phys_sgl1 =
18277 lpfc_ncmd->dma_phys_sgl +
18278 SGL_PAGE_SIZE;
18279 else
18280 pdma_phys_sgl1 = 0;
18281 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18282 status = lpfc_sli4_post_sgl(
18283 phba, lpfc_ncmd->dma_phys_sgl,
18284 pdma_phys_sgl1, cur_xritag);
18285 if (status) {
18286 /* Post error. Buffer unavailable. */
18287 lpfc_ncmd->flags |=
18288 LPFC_SBUF_NOT_POSTED;
18289 } else {
18290 /* Post success. Bffer available. */
18291 lpfc_ncmd->flags &=
18292 ~LPFC_SBUF_NOT_POSTED;
18293 lpfc_ncmd->status = IOSTAT_SUCCESS;
18294 num_posted++;
18295 }
18296 /* success, put on NVME buffer sgl list */
18297 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18298 }
18299 }
18300
18301 /* continue until a nembed page worth of sgls */
18302 if (post_cnt == 0)
18303 continue;
18304
18305 /* post block of NVME buffer list sgls */
18306 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18307 post_cnt);
18308
18309 /* don't reset xirtag due to hole in xri block */
18310 if (block_cnt == 0)
18311 last_xritag = NO_XRI;
18312
18313 /* reset NVME buffer post count for next round of posting */
18314 post_cnt = 0;
18315
18316 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18317 while (!list_empty(&blck_nblist)) {
18318 list_remove_head(&blck_nblist, lpfc_ncmd,
18319 struct lpfc_io_buf, list);
18320 if (status) {
18321 /* Post error. Mark buffer unavailable. */
18322 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18323 } else {
18324 /* Post success, Mark buffer available. */
18325 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18326 lpfc_ncmd->status = IOSTAT_SUCCESS;
18327 num_posted++;
18328 }
18329 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18330 }
18331 }
18332 /* Push NVME buffers with sgl posted to the available list */
18333 lpfc_io_buf_replenish(phba, &nvme_nblist);
18334
18335 return num_posted;
18336 }
18337
18338 /**
18339 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18340 * @phba: pointer to lpfc_hba struct that the frame was received on
18341 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18342 *
18343 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18344 * valid type of frame that the LPFC driver will handle. This function will
18345 * return a zero if the frame is a valid frame or a non zero value when the
18346 * frame does not pass the check.
18347 **/
18348 static int
lpfc_fc_frame_check(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr)18349 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18350 {
18351 /* make rctl_names static to save stack space */
18352 struct fc_vft_header *fc_vft_hdr;
18353 uint32_t *header = (uint32_t *) fc_hdr;
18354
18355 #define FC_RCTL_MDS_DIAGS 0xF4
18356
18357 switch (fc_hdr->fh_r_ctl) {
18358 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18359 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18360 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18361 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18362 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18363 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18364 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18365 case FC_RCTL_DD_CMD_STATUS: /* command status */
18366 case FC_RCTL_ELS_REQ: /* extended link services request */
18367 case FC_RCTL_ELS_REP: /* extended link services reply */
18368 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18369 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18370 case FC_RCTL_BA_ABTS: /* basic link service abort */
18371 case FC_RCTL_BA_RMC: /* remove connection */
18372 case FC_RCTL_BA_ACC: /* basic accept */
18373 case FC_RCTL_BA_RJT: /* basic reject */
18374 case FC_RCTL_BA_PRMT:
18375 case FC_RCTL_ACK_1: /* acknowledge_1 */
18376 case FC_RCTL_ACK_0: /* acknowledge_0 */
18377 case FC_RCTL_P_RJT: /* port reject */
18378 case FC_RCTL_F_RJT: /* fabric reject */
18379 case FC_RCTL_P_BSY: /* port busy */
18380 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18381 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18382 case FC_RCTL_LCR: /* link credit reset */
18383 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18384 case FC_RCTL_END: /* end */
18385 break;
18386 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18387 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18388 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18389 return lpfc_fc_frame_check(phba, fc_hdr);
18390 case FC_RCTL_BA_NOP: /* basic link service NOP */
18391 default:
18392 goto drop;
18393 }
18394
18395 switch (fc_hdr->fh_type) {
18396 case FC_TYPE_BLS:
18397 case FC_TYPE_ELS:
18398 case FC_TYPE_FCP:
18399 case FC_TYPE_CT:
18400 case FC_TYPE_NVME:
18401 break;
18402 case FC_TYPE_IP:
18403 case FC_TYPE_ILS:
18404 default:
18405 goto drop;
18406 }
18407
18408 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18409 "2538 Received frame rctl:x%x, type:x%x, "
18410 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18411 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18412 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18413 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18414 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18415 be32_to_cpu(header[6]));
18416 return 0;
18417 drop:
18418 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18419 "2539 Dropped frame rctl:x%x type:x%x\n",
18420 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18421 return 1;
18422 }
18423
18424 /**
18425 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18426 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18427 *
18428 * This function processes the FC header to retrieve the VFI from the VF
18429 * header, if one exists. This function will return the VFI if one exists
18430 * or 0 if no VSAN Header exists.
18431 **/
18432 static uint32_t
lpfc_fc_hdr_get_vfi(struct fc_frame_header * fc_hdr)18433 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18434 {
18435 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18436
18437 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18438 return 0;
18439 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18440 }
18441
18442 /**
18443 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18444 * @phba: Pointer to the HBA structure to search for the vport on
18445 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18446 * @fcfi: The FC Fabric ID that the frame came from
18447 * @did: Destination ID to match against
18448 *
18449 * This function searches the @phba for a vport that matches the content of the
18450 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18451 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18452 * returns the matching vport pointer or NULL if unable to match frame to a
18453 * vport.
18454 **/
18455 static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr,uint16_t fcfi,uint32_t did)18456 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18457 uint16_t fcfi, uint32_t did)
18458 {
18459 struct lpfc_vport **vports;
18460 struct lpfc_vport *vport = NULL;
18461 int i;
18462
18463 if (did == Fabric_DID)
18464 return phba->pport;
18465 if ((phba->pport->fc_flag & FC_PT2PT) &&
18466 !(phba->link_state == LPFC_HBA_READY))
18467 return phba->pport;
18468
18469 vports = lpfc_create_vport_work_array(phba);
18470 if (vports != NULL) {
18471 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18472 if (phba->fcf.fcfi == fcfi &&
18473 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18474 vports[i]->fc_myDID == did) {
18475 vport = vports[i];
18476 break;
18477 }
18478 }
18479 }
18480 lpfc_destroy_vport_work_array(phba, vports);
18481 return vport;
18482 }
18483
18484 /**
18485 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18486 * @vport: The vport to work on.
18487 *
18488 * This function updates the receive sequence time stamp for this vport. The
18489 * receive sequence time stamp indicates the time that the last frame of the
18490 * the sequence that has been idle for the longest amount of time was received.
18491 * the driver uses this time stamp to indicate if any received sequences have
18492 * timed out.
18493 **/
18494 static void
lpfc_update_rcv_time_stamp(struct lpfc_vport * vport)18495 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18496 {
18497 struct lpfc_dmabuf *h_buf;
18498 struct hbq_dmabuf *dmabuf = NULL;
18499
18500 /* get the oldest sequence on the rcv list */
18501 h_buf = list_get_first(&vport->rcv_buffer_list,
18502 struct lpfc_dmabuf, list);
18503 if (!h_buf)
18504 return;
18505 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18506 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18507 }
18508
18509 /**
18510 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18511 * @vport: The vport that the received sequences were sent to.
18512 *
18513 * This function cleans up all outstanding received sequences. This is called
18514 * by the driver when a link event or user action invalidates all the received
18515 * sequences.
18516 **/
18517 void
lpfc_cleanup_rcv_buffers(struct lpfc_vport * vport)18518 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18519 {
18520 struct lpfc_dmabuf *h_buf, *hnext;
18521 struct lpfc_dmabuf *d_buf, *dnext;
18522 struct hbq_dmabuf *dmabuf = NULL;
18523
18524 /* start with the oldest sequence on the rcv list */
18525 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18526 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18527 list_del_init(&dmabuf->hbuf.list);
18528 list_for_each_entry_safe(d_buf, dnext,
18529 &dmabuf->dbuf.list, list) {
18530 list_del_init(&d_buf->list);
18531 lpfc_in_buf_free(vport->phba, d_buf);
18532 }
18533 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18534 }
18535 }
18536
18537 /**
18538 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18539 * @vport: The vport that the received sequences were sent to.
18540 *
18541 * This function determines whether any received sequences have timed out by
18542 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18543 * indicates that there is at least one timed out sequence this routine will
18544 * go through the received sequences one at a time from most inactive to most
18545 * active to determine which ones need to be cleaned up. Once it has determined
18546 * that a sequence needs to be cleaned up it will simply free up the resources
18547 * without sending an abort.
18548 **/
18549 void
lpfc_rcv_seq_check_edtov(struct lpfc_vport * vport)18550 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18551 {
18552 struct lpfc_dmabuf *h_buf, *hnext;
18553 struct lpfc_dmabuf *d_buf, *dnext;
18554 struct hbq_dmabuf *dmabuf = NULL;
18555 unsigned long timeout;
18556 int abort_count = 0;
18557
18558 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18559 vport->rcv_buffer_time_stamp);
18560 if (list_empty(&vport->rcv_buffer_list) ||
18561 time_before(jiffies, timeout))
18562 return;
18563 /* start with the oldest sequence on the rcv list */
18564 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18565 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18566 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18567 dmabuf->time_stamp);
18568 if (time_before(jiffies, timeout))
18569 break;
18570 abort_count++;
18571 list_del_init(&dmabuf->hbuf.list);
18572 list_for_each_entry_safe(d_buf, dnext,
18573 &dmabuf->dbuf.list, list) {
18574 list_del_init(&d_buf->list);
18575 lpfc_in_buf_free(vport->phba, d_buf);
18576 }
18577 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18578 }
18579 if (abort_count)
18580 lpfc_update_rcv_time_stamp(vport);
18581 }
18582
18583 /**
18584 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18585 * @vport: pointer to a vitural port
18586 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18587 *
18588 * This function searches through the existing incomplete sequences that have
18589 * been sent to this @vport. If the frame matches one of the incomplete
18590 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18591 * make up that sequence. If no sequence is found that matches this frame then
18592 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18593 * This function returns a pointer to the first dmabuf in the sequence list that
18594 * the frame was linked to.
18595 **/
18596 static struct hbq_dmabuf *
lpfc_fc_frame_add(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18597 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18598 {
18599 struct fc_frame_header *new_hdr;
18600 struct fc_frame_header *temp_hdr;
18601 struct lpfc_dmabuf *d_buf;
18602 struct lpfc_dmabuf *h_buf;
18603 struct hbq_dmabuf *seq_dmabuf = NULL;
18604 struct hbq_dmabuf *temp_dmabuf = NULL;
18605 uint8_t found = 0;
18606
18607 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18608 dmabuf->time_stamp = jiffies;
18609 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18610
18611 /* Use the hdr_buf to find the sequence that this frame belongs to */
18612 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18613 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18614 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18615 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18616 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18617 continue;
18618 /* found a pending sequence that matches this frame */
18619 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18620 break;
18621 }
18622 if (!seq_dmabuf) {
18623 /*
18624 * This indicates first frame received for this sequence.
18625 * Queue the buffer on the vport's rcv_buffer_list.
18626 */
18627 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18628 lpfc_update_rcv_time_stamp(vport);
18629 return dmabuf;
18630 }
18631 temp_hdr = seq_dmabuf->hbuf.virt;
18632 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18633 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18634 list_del_init(&seq_dmabuf->hbuf.list);
18635 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18636 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18637 lpfc_update_rcv_time_stamp(vport);
18638 return dmabuf;
18639 }
18640 /* move this sequence to the tail to indicate a young sequence */
18641 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18642 seq_dmabuf->time_stamp = jiffies;
18643 lpfc_update_rcv_time_stamp(vport);
18644 if (list_empty(&seq_dmabuf->dbuf.list)) {
18645 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18646 return seq_dmabuf;
18647 }
18648 /* find the correct place in the sequence to insert this frame */
18649 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18650 while (!found) {
18651 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18652 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18653 /*
18654 * If the frame's sequence count is greater than the frame on
18655 * the list then insert the frame right after this frame
18656 */
18657 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18658 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18659 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18660 found = 1;
18661 break;
18662 }
18663
18664 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18665 break;
18666 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18667 }
18668
18669 if (found)
18670 return seq_dmabuf;
18671 return NULL;
18672 }
18673
18674 /**
18675 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18676 * @vport: pointer to a vitural port
18677 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18678 *
18679 * This function tries to abort from the partially assembed sequence, described
18680 * by the information from basic abbort @dmabuf. It checks to see whether such
18681 * partially assembled sequence held by the driver. If so, it shall free up all
18682 * the frames from the partially assembled sequence.
18683 *
18684 * Return
18685 * true -- if there is matching partially assembled sequence present and all
18686 * the frames freed with the sequence;
18687 * false -- if there is no matching partially assembled sequence present so
18688 * nothing got aborted in the lower layer driver
18689 **/
18690 static bool
lpfc_sli4_abort_partial_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18691 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18692 struct hbq_dmabuf *dmabuf)
18693 {
18694 struct fc_frame_header *new_hdr;
18695 struct fc_frame_header *temp_hdr;
18696 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18697 struct hbq_dmabuf *seq_dmabuf = NULL;
18698
18699 /* Use the hdr_buf to find the sequence that matches this frame */
18700 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18701 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18702 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18703 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18704 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18705 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18706 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18707 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18708 continue;
18709 /* found a pending sequence that matches this frame */
18710 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18711 break;
18712 }
18713
18714 /* Free up all the frames from the partially assembled sequence */
18715 if (seq_dmabuf) {
18716 list_for_each_entry_safe(d_buf, n_buf,
18717 &seq_dmabuf->dbuf.list, list) {
18718 list_del_init(&d_buf->list);
18719 lpfc_in_buf_free(vport->phba, d_buf);
18720 }
18721 return true;
18722 }
18723 return false;
18724 }
18725
18726 /**
18727 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18728 * @vport: pointer to a vitural port
18729 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18730 *
18731 * This function tries to abort from the assembed sequence from upper level
18732 * protocol, described by the information from basic abbort @dmabuf. It
18733 * checks to see whether such pending context exists at upper level protocol.
18734 * If so, it shall clean up the pending context.
18735 *
18736 * Return
18737 * true -- if there is matching pending context of the sequence cleaned
18738 * at ulp;
18739 * false -- if there is no matching pending context of the sequence present
18740 * at ulp.
18741 **/
18742 static bool
lpfc_sli4_abort_ulp_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18743 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18744 {
18745 struct lpfc_hba *phba = vport->phba;
18746 int handled;
18747
18748 /* Accepting abort at ulp with SLI4 only */
18749 if (phba->sli_rev < LPFC_SLI_REV4)
18750 return false;
18751
18752 /* Register all caring upper level protocols to attend abort */
18753 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18754 if (handled)
18755 return true;
18756
18757 return false;
18758 }
18759
18760 /**
18761 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18762 * @phba: Pointer to HBA context object.
18763 * @cmd_iocbq: pointer to the command iocbq structure.
18764 * @rsp_iocbq: pointer to the response iocbq structure.
18765 *
18766 * This function handles the sequence abort response iocb command complete
18767 * event. It properly releases the memory allocated to the sequence abort
18768 * accept iocb.
18769 **/
18770 static void
lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmd_iocbq,struct lpfc_iocbq * rsp_iocbq)18771 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18772 struct lpfc_iocbq *cmd_iocbq,
18773 struct lpfc_iocbq *rsp_iocbq)
18774 {
18775 if (cmd_iocbq) {
18776 lpfc_nlp_put(cmd_iocbq->ndlp);
18777 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18778 }
18779
18780 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18781 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18783 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18784 get_job_ulpstatus(phba, rsp_iocbq),
18785 get_job_word4(phba, rsp_iocbq));
18786 }
18787
18788 /**
18789 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18790 * @phba: Pointer to HBA context object.
18791 * @xri: xri id in transaction.
18792 *
18793 * This function validates the xri maps to the known range of XRIs allocated an
18794 * used by the driver.
18795 **/
18796 uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba * phba,uint16_t xri)18797 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18798 uint16_t xri)
18799 {
18800 uint16_t i;
18801
18802 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18803 if (xri == phba->sli4_hba.xri_ids[i])
18804 return i;
18805 }
18806 return NO_XRI;
18807 }
18808
18809 /**
18810 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18811 * @vport: pointer to a virtual port.
18812 * @fc_hdr: pointer to a FC frame header.
18813 * @aborted: was the partially assembled receive sequence successfully aborted
18814 *
18815 * This function sends a basic response to a previous unsol sequence abort
18816 * event after aborting the sequence handling.
18817 **/
18818 void
lpfc_sli4_seq_abort_rsp(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr,bool aborted)18819 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18820 struct fc_frame_header *fc_hdr, bool aborted)
18821 {
18822 struct lpfc_hba *phba = vport->phba;
18823 struct lpfc_iocbq *ctiocb = NULL;
18824 struct lpfc_nodelist *ndlp;
18825 uint16_t oxid, rxid, xri, lxri;
18826 uint32_t sid, fctl;
18827 union lpfc_wqe128 *icmd;
18828 int rc;
18829
18830 if (!lpfc_is_link_up(phba))
18831 return;
18832
18833 sid = sli4_sid_from_fc_hdr(fc_hdr);
18834 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18835 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18836
18837 ndlp = lpfc_findnode_did(vport, sid);
18838 if (!ndlp) {
18839 ndlp = lpfc_nlp_init(vport, sid);
18840 if (!ndlp) {
18841 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18842 "1268 Failed to allocate ndlp for "
18843 "oxid:x%x SID:x%x\n", oxid, sid);
18844 return;
18845 }
18846 /* Put ndlp onto pport node list */
18847 lpfc_enqueue_node(vport, ndlp);
18848 }
18849
18850 /* Allocate buffer for rsp iocb */
18851 ctiocb = lpfc_sli_get_iocbq(phba);
18852 if (!ctiocb)
18853 return;
18854
18855 icmd = &ctiocb->wqe;
18856
18857 /* Extract the F_CTL field from FC_HDR */
18858 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18859
18860 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18861 if (!ctiocb->ndlp) {
18862 lpfc_sli_release_iocbq(phba, ctiocb);
18863 return;
18864 }
18865
18866 ctiocb->vport = phba->pport;
18867 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18868 ctiocb->sli4_lxritag = NO_XRI;
18869 ctiocb->sli4_xritag = NO_XRI;
18870 ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18871
18872 if (fctl & FC_FC_EX_CTX)
18873 /* Exchange responder sent the abort so we
18874 * own the oxid.
18875 */
18876 xri = oxid;
18877 else
18878 xri = rxid;
18879 lxri = lpfc_sli4_xri_inrange(phba, xri);
18880 if (lxri != NO_XRI)
18881 lpfc_set_rrq_active(phba, ndlp, lxri,
18882 (xri == oxid) ? rxid : oxid, 0);
18883 /* For BA_ABTS from exchange responder, if the logical xri with
18884 * the oxid maps to the FCP XRI range, the port no longer has
18885 * that exchange context, send a BLS_RJT. Override the IOCB for
18886 * a BA_RJT.
18887 */
18888 if ((fctl & FC_FC_EX_CTX) &&
18889 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18890 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18891 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18892 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18893 FC_BA_RJT_INV_XID);
18894 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18895 FC_BA_RJT_UNABLE);
18896 }
18897
18898 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18899 * the driver no longer has that exchange, send a BLS_RJT. Override
18900 * the IOCB for a BA_RJT.
18901 */
18902 if (aborted == false) {
18903 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18904 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18905 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18906 FC_BA_RJT_INV_XID);
18907 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18908 FC_BA_RJT_UNABLE);
18909 }
18910
18911 if (fctl & FC_FC_EX_CTX) {
18912 /* ABTS sent by responder to CT exchange, construction
18913 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18914 * field and RX_ID from ABTS for RX_ID field.
18915 */
18916 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18917 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18918 } else {
18919 /* ABTS sent by initiator to CT exchange, construction
18920 * of BA_ACC will need to allocate a new XRI as for the
18921 * XRI_TAG field.
18922 */
18923 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18924 }
18925
18926 /* OX_ID is invariable to who sent ABTS to CT exchange */
18927 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
18928 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
18929
18930 /* Use CT=VPI */
18931 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
18932 ndlp->nlp_DID);
18933 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
18934 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
18935 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
18936
18937 /* Xmit CT abts response on exchange <xid> */
18938 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18939 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18940 ctiocb->abort_rctl, oxid, phba->link_state);
18941
18942 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18943 if (rc == IOCB_ERROR) {
18944 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18945 "2925 Failed to issue CT ABTS RSP x%x on "
18946 "xri x%x, Data x%x\n",
18947 ctiocb->abort_rctl, oxid,
18948 phba->link_state);
18949 lpfc_nlp_put(ndlp);
18950 ctiocb->ndlp = NULL;
18951 lpfc_sli_release_iocbq(phba, ctiocb);
18952 }
18953 }
18954
18955 /**
18956 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18957 * @vport: Pointer to the vport on which this sequence was received
18958 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18959 *
18960 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18961 * receive sequence is only partially assembed by the driver, it shall abort
18962 * the partially assembled frames for the sequence. Otherwise, if the
18963 * unsolicited receive sequence has been completely assembled and passed to
18964 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18965 * unsolicited sequence has been aborted. After that, it will issue a basic
18966 * accept to accept the abort.
18967 **/
18968 static void
lpfc_sli4_handle_unsol_abort(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18969 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18970 struct hbq_dmabuf *dmabuf)
18971 {
18972 struct lpfc_hba *phba = vport->phba;
18973 struct fc_frame_header fc_hdr;
18974 uint32_t fctl;
18975 bool aborted;
18976
18977 /* Make a copy of fc_hdr before the dmabuf being released */
18978 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18979 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18980
18981 if (fctl & FC_FC_EX_CTX) {
18982 /* ABTS by responder to exchange, no cleanup needed */
18983 aborted = true;
18984 } else {
18985 /* ABTS by initiator to exchange, need to do cleanup */
18986 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18987 if (aborted == false)
18988 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18989 }
18990 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18991
18992 if (phba->nvmet_support) {
18993 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18994 return;
18995 }
18996
18997 /* Respond with BA_ACC or BA_RJT accordingly */
18998 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18999 }
19000
19001 /**
19002 * lpfc_seq_complete - Indicates if a sequence is complete
19003 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19004 *
19005 * This function checks the sequence, starting with the frame described by
19006 * @dmabuf, to see if all the frames associated with this sequence are present.
19007 * the frames associated with this sequence are linked to the @dmabuf using the
19008 * dbuf list. This function looks for two major things. 1) That the first frame
19009 * has a sequence count of zero. 2) There is a frame with last frame of sequence
19010 * set. 3) That there are no holes in the sequence count. The function will
19011 * return 1 when the sequence is complete, otherwise it will return 0.
19012 **/
19013 static int
lpfc_seq_complete(struct hbq_dmabuf * dmabuf)19014 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19015 {
19016 struct fc_frame_header *hdr;
19017 struct lpfc_dmabuf *d_buf;
19018 struct hbq_dmabuf *seq_dmabuf;
19019 uint32_t fctl;
19020 int seq_count = 0;
19021
19022 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19023 /* make sure first fame of sequence has a sequence count of zero */
19024 if (hdr->fh_seq_cnt != seq_count)
19025 return 0;
19026 fctl = (hdr->fh_f_ctl[0] << 16 |
19027 hdr->fh_f_ctl[1] << 8 |
19028 hdr->fh_f_ctl[2]);
19029 /* If last frame of sequence we can return success. */
19030 if (fctl & FC_FC_END_SEQ)
19031 return 1;
19032 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19033 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19034 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19035 /* If there is a hole in the sequence count then fail. */
19036 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19037 return 0;
19038 fctl = (hdr->fh_f_ctl[0] << 16 |
19039 hdr->fh_f_ctl[1] << 8 |
19040 hdr->fh_f_ctl[2]);
19041 /* If last frame of sequence we can return success. */
19042 if (fctl & FC_FC_END_SEQ)
19043 return 1;
19044 }
19045 return 0;
19046 }
19047
19048 /**
19049 * lpfc_prep_seq - Prep sequence for ULP processing
19050 * @vport: Pointer to the vport on which this sequence was received
19051 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19052 *
19053 * This function takes a sequence, described by a list of frames, and creates
19054 * a list of iocbq structures to describe the sequence. This iocbq list will be
19055 * used to issue to the generic unsolicited sequence handler. This routine
19056 * returns a pointer to the first iocbq in the list. If the function is unable
19057 * to allocate an iocbq then it throw out the received frames that were not
19058 * able to be described and return a pointer to the first iocbq. If unable to
19059 * allocate any iocbqs (including the first) this function will return NULL.
19060 **/
19061 static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)19062 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19063 {
19064 struct hbq_dmabuf *hbq_buf;
19065 struct lpfc_dmabuf *d_buf, *n_buf;
19066 struct lpfc_iocbq *first_iocbq, *iocbq;
19067 struct fc_frame_header *fc_hdr;
19068 uint32_t sid;
19069 uint32_t len, tot_len;
19070
19071 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19072 /* remove from receive buffer list */
19073 list_del_init(&seq_dmabuf->hbuf.list);
19074 lpfc_update_rcv_time_stamp(vport);
19075 /* get the Remote Port's SID */
19076 sid = sli4_sid_from_fc_hdr(fc_hdr);
19077 tot_len = 0;
19078 /* Get an iocbq struct to fill in. */
19079 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19080 if (first_iocbq) {
19081 /* Initialize the first IOCB. */
19082 first_iocbq->wcqe_cmpl.total_data_placed = 0;
19083 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19084 IOSTAT_SUCCESS);
19085 first_iocbq->vport = vport;
19086
19087 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19088 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19089 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19090 sli4_did_from_fc_hdr(fc_hdr));
19091 }
19092
19093 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19094 NO_XRI);
19095 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19096 be16_to_cpu(fc_hdr->fh_ox_id));
19097
19098 /* put the first buffer into the first iocb */
19099 tot_len = bf_get(lpfc_rcqe_length,
19100 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19101
19102 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19103 first_iocbq->bpl_dmabuf = NULL;
19104 /* Keep track of the BDE count */
19105 first_iocbq->wcqe_cmpl.word3 = 1;
19106
19107 if (tot_len > LPFC_DATA_BUF_SIZE)
19108 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19109 LPFC_DATA_BUF_SIZE;
19110 else
19111 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19112
19113 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19114 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19115 sid);
19116 }
19117 iocbq = first_iocbq;
19118 /*
19119 * Each IOCBq can have two Buffers assigned, so go through the list
19120 * of buffers for this sequence and save two buffers in each IOCBq
19121 */
19122 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19123 if (!iocbq) {
19124 lpfc_in_buf_free(vport->phba, d_buf);
19125 continue;
19126 }
19127 if (!iocbq->bpl_dmabuf) {
19128 iocbq->bpl_dmabuf = d_buf;
19129 iocbq->wcqe_cmpl.word3++;
19130 /* We need to get the size out of the right CQE */
19131 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19132 len = bf_get(lpfc_rcqe_length,
19133 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19134 iocbq->unsol_rcv_len = len;
19135 iocbq->wcqe_cmpl.total_data_placed += len;
19136 tot_len += len;
19137 } else {
19138 iocbq = lpfc_sli_get_iocbq(vport->phba);
19139 if (!iocbq) {
19140 if (first_iocbq) {
19141 bf_set(lpfc_wcqe_c_status,
19142 &first_iocbq->wcqe_cmpl,
19143 IOSTAT_SUCCESS);
19144 first_iocbq->wcqe_cmpl.parameter =
19145 IOERR_NO_RESOURCES;
19146 }
19147 lpfc_in_buf_free(vport->phba, d_buf);
19148 continue;
19149 }
19150 /* We need to get the size out of the right CQE */
19151 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19152 len = bf_get(lpfc_rcqe_length,
19153 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19154 iocbq->cmd_dmabuf = d_buf;
19155 iocbq->bpl_dmabuf = NULL;
19156 iocbq->wcqe_cmpl.word3 = 1;
19157
19158 if (len > LPFC_DATA_BUF_SIZE)
19159 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19160 LPFC_DATA_BUF_SIZE;
19161 else
19162 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19163 len;
19164
19165 tot_len += len;
19166 iocbq->wcqe_cmpl.total_data_placed = tot_len;
19167 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19168 sid);
19169 list_add_tail(&iocbq->list, &first_iocbq->list);
19170 }
19171 }
19172 /* Free the sequence's header buffer */
19173 if (!first_iocbq)
19174 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19175
19176 return first_iocbq;
19177 }
19178
19179 static void
lpfc_sli4_send_seq_to_ulp(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)19180 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19181 struct hbq_dmabuf *seq_dmabuf)
19182 {
19183 struct fc_frame_header *fc_hdr;
19184 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19185 struct lpfc_hba *phba = vport->phba;
19186
19187 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19188 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19189 if (!iocbq) {
19190 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19191 "2707 Ring %d handler: Failed to allocate "
19192 "iocb Rctl x%x Type x%x received\n",
19193 LPFC_ELS_RING,
19194 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19195 return;
19196 }
19197 if (!lpfc_complete_unsol_iocb(phba,
19198 phba->sli4_hba.els_wq->pring,
19199 iocbq, fc_hdr->fh_r_ctl,
19200 fc_hdr->fh_type)) {
19201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19202 "2540 Ring %d handler: unexpected Rctl "
19203 "x%x Type x%x received\n",
19204 LPFC_ELS_RING,
19205 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19206 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19207 }
19208
19209 /* Free iocb created in lpfc_prep_seq */
19210 list_for_each_entry_safe(curr_iocb, next_iocb,
19211 &iocbq->list, list) {
19212 list_del_init(&curr_iocb->list);
19213 lpfc_sli_release_iocbq(phba, curr_iocb);
19214 }
19215 lpfc_sli_release_iocbq(phba, iocbq);
19216 }
19217
19218 static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)19219 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19220 struct lpfc_iocbq *rspiocb)
19221 {
19222 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19223
19224 if (pcmd && pcmd->virt)
19225 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19226 kfree(pcmd);
19227 lpfc_sli_release_iocbq(phba, cmdiocb);
19228 lpfc_drain_txq(phba);
19229 }
19230
19231 static void
lpfc_sli4_handle_mds_loopback(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)19232 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19233 struct hbq_dmabuf *dmabuf)
19234 {
19235 struct fc_frame_header *fc_hdr;
19236 struct lpfc_hba *phba = vport->phba;
19237 struct lpfc_iocbq *iocbq = NULL;
19238 union lpfc_wqe128 *pwqe;
19239 struct lpfc_dmabuf *pcmd = NULL;
19240 uint32_t frame_len;
19241 int rc;
19242 unsigned long iflags;
19243
19244 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19245 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19246
19247 /* Send the received frame back */
19248 iocbq = lpfc_sli_get_iocbq(phba);
19249 if (!iocbq) {
19250 /* Queue cq event and wakeup worker thread to process it */
19251 spin_lock_irqsave(&phba->hbalock, iflags);
19252 list_add_tail(&dmabuf->cq_event.list,
19253 &phba->sli4_hba.sp_queue_event);
19254 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19255 spin_unlock_irqrestore(&phba->hbalock, iflags);
19256 lpfc_worker_wake_up(phba);
19257 return;
19258 }
19259
19260 /* Allocate buffer for command payload */
19261 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19262 if (pcmd)
19263 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19264 &pcmd->phys);
19265 if (!pcmd || !pcmd->virt)
19266 goto exit;
19267
19268 INIT_LIST_HEAD(&pcmd->list);
19269
19270 /* copyin the payload */
19271 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19272
19273 iocbq->cmd_dmabuf = pcmd;
19274 iocbq->vport = vport;
19275 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19276 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19277 iocbq->num_bdes = 0;
19278
19279 pwqe = &iocbq->wqe;
19280 /* fill in BDE's for command */
19281 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19282 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19283 pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19284 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19285
19286 pwqe->send_frame.frame_len = frame_len;
19287 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19288 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19289 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19290 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19291 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19292 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19293
19294 pwqe->generic.wqe_com.word7 = 0;
19295 pwqe->generic.wqe_com.word10 = 0;
19296
19297 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19298 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19299 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19300 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19301 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19302 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19303 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19304 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19305 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19306 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19307 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19308 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19309 pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19310
19311 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19312
19313 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19314 if (rc == IOCB_ERROR)
19315 goto exit;
19316
19317 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19318 return;
19319
19320 exit:
19321 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19322 "2023 Unable to process MDS loopback frame\n");
19323 if (pcmd && pcmd->virt)
19324 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19325 kfree(pcmd);
19326 if (iocbq)
19327 lpfc_sli_release_iocbq(phba, iocbq);
19328 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19329 }
19330
19331 /**
19332 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19333 * @phba: Pointer to HBA context object.
19334 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19335 *
19336 * This function is called with no lock held. This function processes all
19337 * the received buffers and gives it to upper layers when a received buffer
19338 * indicates that it is the final frame in the sequence. The interrupt
19339 * service routine processes received buffers at interrupt contexts.
19340 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19341 * appropriate receive function when the final frame in a sequence is received.
19342 **/
19343 void
lpfc_sli4_handle_received_buffer(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)19344 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19345 struct hbq_dmabuf *dmabuf)
19346 {
19347 struct hbq_dmabuf *seq_dmabuf;
19348 struct fc_frame_header *fc_hdr;
19349 struct lpfc_vport *vport;
19350 uint32_t fcfi;
19351 uint32_t did;
19352
19353 /* Process each received buffer */
19354 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19355
19356 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19357 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19358 vport = phba->pport;
19359 /* Handle MDS Loopback frames */
19360 if (!(phba->pport->load_flag & FC_UNLOADING))
19361 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19362 else
19363 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19364 return;
19365 }
19366
19367 /* check to see if this a valid type of frame */
19368 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19369 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19370 return;
19371 }
19372
19373 if ((bf_get(lpfc_cqe_code,
19374 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19375 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19376 &dmabuf->cq_event.cqe.rcqe_cmpl);
19377 else
19378 fcfi = bf_get(lpfc_rcqe_fcf_id,
19379 &dmabuf->cq_event.cqe.rcqe_cmpl);
19380
19381 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19382 vport = phba->pport;
19383 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19384 "2023 MDS Loopback %d bytes\n",
19385 bf_get(lpfc_rcqe_length,
19386 &dmabuf->cq_event.cqe.rcqe_cmpl));
19387 /* Handle MDS Loopback frames */
19388 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19389 return;
19390 }
19391
19392 /* d_id this frame is directed to */
19393 did = sli4_did_from_fc_hdr(fc_hdr);
19394
19395 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19396 if (!vport) {
19397 /* throw out the frame */
19398 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19399 return;
19400 }
19401
19402 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19403 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19404 (did != Fabric_DID)) {
19405 /*
19406 * Throw out the frame if we are not pt2pt.
19407 * The pt2pt protocol allows for discovery frames
19408 * to be received without a registered VPI.
19409 */
19410 if (!(vport->fc_flag & FC_PT2PT) ||
19411 (phba->link_state == LPFC_HBA_READY)) {
19412 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19413 return;
19414 }
19415 }
19416
19417 /* Handle the basic abort sequence (BA_ABTS) event */
19418 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19419 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19420 return;
19421 }
19422
19423 /* Link this frame */
19424 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19425 if (!seq_dmabuf) {
19426 /* unable to add frame to vport - throw it out */
19427 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19428 return;
19429 }
19430 /* If not last frame in sequence continue processing frames. */
19431 if (!lpfc_seq_complete(seq_dmabuf))
19432 return;
19433
19434 /* Send the complete sequence to the upper layer protocol */
19435 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19436 }
19437
19438 /**
19439 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19440 * @phba: pointer to lpfc hba data structure.
19441 *
19442 * This routine is invoked to post rpi header templates to the
19443 * HBA consistent with the SLI-4 interface spec. This routine
19444 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19445 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19446 *
19447 * This routine does not require any locks. It's usage is expected
19448 * to be driver load or reset recovery when the driver is
19449 * sequential.
19450 *
19451 * Return codes
19452 * 0 - successful
19453 * -EIO - The mailbox failed to complete successfully.
19454 * When this error occurs, the driver is not guaranteed
19455 * to have any rpi regions posted to the device and
19456 * must either attempt to repost the regions or take a
19457 * fatal error.
19458 **/
19459 int
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba * phba)19460 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19461 {
19462 struct lpfc_rpi_hdr *rpi_page;
19463 uint32_t rc = 0;
19464 uint16_t lrpi = 0;
19465
19466 /* SLI4 ports that support extents do not require RPI headers. */
19467 if (!phba->sli4_hba.rpi_hdrs_in_use)
19468 goto exit;
19469 if (phba->sli4_hba.extents_in_use)
19470 return -EIO;
19471
19472 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19473 /*
19474 * Assign the rpi headers a physical rpi only if the driver
19475 * has not initialized those resources. A port reset only
19476 * needs the headers posted.
19477 */
19478 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19479 LPFC_RPI_RSRC_RDY)
19480 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19481
19482 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19483 if (rc != MBX_SUCCESS) {
19484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19485 "2008 Error %d posting all rpi "
19486 "headers\n", rc);
19487 rc = -EIO;
19488 break;
19489 }
19490 }
19491
19492 exit:
19493 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19494 LPFC_RPI_RSRC_RDY);
19495 return rc;
19496 }
19497
19498 /**
19499 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19500 * @phba: pointer to lpfc hba data structure.
19501 * @rpi_page: pointer to the rpi memory region.
19502 *
19503 * This routine is invoked to post a single rpi header to the
19504 * HBA consistent with the SLI-4 interface spec. This memory region
19505 * maps up to 64 rpi context regions.
19506 *
19507 * Return codes
19508 * 0 - successful
19509 * -ENOMEM - No available memory
19510 * -EIO - The mailbox failed to complete successfully.
19511 **/
19512 int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba * phba,struct lpfc_rpi_hdr * rpi_page)19513 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19514 {
19515 LPFC_MBOXQ_t *mboxq;
19516 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19517 uint32_t rc = 0;
19518 uint32_t shdr_status, shdr_add_status;
19519 union lpfc_sli4_cfg_shdr *shdr;
19520
19521 /* SLI4 ports that support extents do not require RPI headers. */
19522 if (!phba->sli4_hba.rpi_hdrs_in_use)
19523 return rc;
19524 if (phba->sli4_hba.extents_in_use)
19525 return -EIO;
19526
19527 /* The port is notified of the header region via a mailbox command. */
19528 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19529 if (!mboxq) {
19530 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19531 "2001 Unable to allocate memory for issuing "
19532 "SLI_CONFIG_SPECIAL mailbox command\n");
19533 return -ENOMEM;
19534 }
19535
19536 /* Post all rpi memory regions to the port. */
19537 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19538 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19539 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19540 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19541 sizeof(struct lpfc_sli4_cfg_mhdr),
19542 LPFC_SLI4_MBX_EMBED);
19543
19544
19545 /* Post the physical rpi to the port for this rpi header. */
19546 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19547 rpi_page->start_rpi);
19548 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19549 hdr_tmpl, rpi_page->page_count);
19550
19551 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19552 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19553 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19554 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19555 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19556 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19557 mempool_free(mboxq, phba->mbox_mem_pool);
19558 if (shdr_status || shdr_add_status || rc) {
19559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19560 "2514 POST_RPI_HDR mailbox failed with "
19561 "status x%x add_status x%x, mbx status x%x\n",
19562 shdr_status, shdr_add_status, rc);
19563 rc = -ENXIO;
19564 } else {
19565 /*
19566 * The next_rpi stores the next logical module-64 rpi value used
19567 * to post physical rpis in subsequent rpi postings.
19568 */
19569 spin_lock_irq(&phba->hbalock);
19570 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19571 spin_unlock_irq(&phba->hbalock);
19572 }
19573 return rc;
19574 }
19575
19576 /**
19577 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19578 * @phba: pointer to lpfc hba data structure.
19579 *
19580 * This routine is invoked to post rpi header templates to the
19581 * HBA consistent with the SLI-4 interface spec. This routine
19582 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19583 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19584 *
19585 * Returns
19586 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19587 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19588 **/
19589 int
lpfc_sli4_alloc_rpi(struct lpfc_hba * phba)19590 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19591 {
19592 unsigned long rpi;
19593 uint16_t max_rpi, rpi_limit;
19594 uint16_t rpi_remaining, lrpi = 0;
19595 struct lpfc_rpi_hdr *rpi_hdr;
19596 unsigned long iflag;
19597
19598 /*
19599 * Fetch the next logical rpi. Because this index is logical,
19600 * the driver starts at 0 each time.
19601 */
19602 spin_lock_irqsave(&phba->hbalock, iflag);
19603 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19604 rpi_limit = phba->sli4_hba.next_rpi;
19605
19606 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19607 if (rpi >= rpi_limit)
19608 rpi = LPFC_RPI_ALLOC_ERROR;
19609 else {
19610 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19611 phba->sli4_hba.max_cfg_param.rpi_used++;
19612 phba->sli4_hba.rpi_count++;
19613 }
19614 lpfc_printf_log(phba, KERN_INFO,
19615 LOG_NODE | LOG_DISCOVERY,
19616 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19617 (int) rpi, max_rpi, rpi_limit);
19618
19619 /*
19620 * Don't try to allocate more rpi header regions if the device limit
19621 * has been exhausted.
19622 */
19623 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19624 (phba->sli4_hba.rpi_count >= max_rpi)) {
19625 spin_unlock_irqrestore(&phba->hbalock, iflag);
19626 return rpi;
19627 }
19628
19629 /*
19630 * RPI header postings are not required for SLI4 ports capable of
19631 * extents.
19632 */
19633 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19634 spin_unlock_irqrestore(&phba->hbalock, iflag);
19635 return rpi;
19636 }
19637
19638 /*
19639 * If the driver is running low on rpi resources, allocate another
19640 * page now. Note that the next_rpi value is used because
19641 * it represents how many are actually in use whereas max_rpi notes
19642 * how many are supported max by the device.
19643 */
19644 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19645 spin_unlock_irqrestore(&phba->hbalock, iflag);
19646 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19647 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19648 if (!rpi_hdr) {
19649 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19650 "2002 Error Could not grow rpi "
19651 "count\n");
19652 } else {
19653 lrpi = rpi_hdr->start_rpi;
19654 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19655 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19656 }
19657 }
19658
19659 return rpi;
19660 }
19661
19662 /**
19663 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19664 * @phba: pointer to lpfc hba data structure.
19665 * @rpi: rpi to free
19666 *
19667 * This routine is invoked to release an rpi to the pool of
19668 * available rpis maintained by the driver.
19669 **/
19670 static void
__lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)19671 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19672 {
19673 /*
19674 * if the rpi value indicates a prior unreg has already
19675 * been done, skip the unreg.
19676 */
19677 if (rpi == LPFC_RPI_ALLOC_ERROR)
19678 return;
19679
19680 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19681 phba->sli4_hba.rpi_count--;
19682 phba->sli4_hba.max_cfg_param.rpi_used--;
19683 } else {
19684 lpfc_printf_log(phba, KERN_INFO,
19685 LOG_NODE | LOG_DISCOVERY,
19686 "2016 rpi %x not inuse\n",
19687 rpi);
19688 }
19689 }
19690
19691 /**
19692 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19693 * @phba: pointer to lpfc hba data structure.
19694 * @rpi: rpi to free
19695 *
19696 * This routine is invoked to release an rpi to the pool of
19697 * available rpis maintained by the driver.
19698 **/
19699 void
lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)19700 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19701 {
19702 spin_lock_irq(&phba->hbalock);
19703 __lpfc_sli4_free_rpi(phba, rpi);
19704 spin_unlock_irq(&phba->hbalock);
19705 }
19706
19707 /**
19708 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19709 * @phba: pointer to lpfc hba data structure.
19710 *
19711 * This routine is invoked to remove the memory region that
19712 * provided rpi via a bitmask.
19713 **/
19714 void
lpfc_sli4_remove_rpis(struct lpfc_hba * phba)19715 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19716 {
19717 kfree(phba->sli4_hba.rpi_bmask);
19718 kfree(phba->sli4_hba.rpi_ids);
19719 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19720 }
19721
19722 /**
19723 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19724 * @ndlp: pointer to lpfc nodelist data structure.
19725 * @cmpl: completion call-back.
19726 * @arg: data to load as MBox 'caller buffer information'
19727 *
19728 * This routine is invoked to remove the memory region that
19729 * provided rpi via a bitmask.
19730 **/
19731 int
lpfc_sli4_resume_rpi(struct lpfc_nodelist * ndlp,void (* cmpl)(struct lpfc_hba *,LPFC_MBOXQ_t *),void * arg)19732 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19733 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19734 {
19735 LPFC_MBOXQ_t *mboxq;
19736 struct lpfc_hba *phba = ndlp->phba;
19737 int rc;
19738
19739 /* The port is notified of the header region via a mailbox command. */
19740 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19741 if (!mboxq)
19742 return -ENOMEM;
19743
19744 /* If cmpl assigned, then this nlp_get pairs with
19745 * lpfc_mbx_cmpl_resume_rpi.
19746 *
19747 * Else cmpl is NULL, then this nlp_get pairs with
19748 * lpfc_sli_def_mbox_cmpl.
19749 */
19750 if (!lpfc_nlp_get(ndlp)) {
19751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19752 "2122 %s: Failed to get nlp ref\n",
19753 __func__);
19754 mempool_free(mboxq, phba->mbox_mem_pool);
19755 return -EIO;
19756 }
19757
19758 /* Post all rpi memory regions to the port. */
19759 lpfc_resume_rpi(mboxq, ndlp);
19760 if (cmpl) {
19761 mboxq->mbox_cmpl = cmpl;
19762 mboxq->ctx_buf = arg;
19763 } else
19764 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19765 mboxq->ctx_ndlp = ndlp;
19766 mboxq->vport = ndlp->vport;
19767 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19768 if (rc == MBX_NOT_FINISHED) {
19769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19770 "2010 Resume RPI Mailbox failed "
19771 "status %d, mbxStatus x%x\n", rc,
19772 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19773 lpfc_nlp_put(ndlp);
19774 mempool_free(mboxq, phba->mbox_mem_pool);
19775 return -EIO;
19776 }
19777 return 0;
19778 }
19779
19780 /**
19781 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19782 * @vport: Pointer to the vport for which the vpi is being initialized
19783 *
19784 * This routine is invoked to activate a vpi with the port.
19785 *
19786 * Returns:
19787 * 0 success
19788 * -Evalue otherwise
19789 **/
19790 int
lpfc_sli4_init_vpi(struct lpfc_vport * vport)19791 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19792 {
19793 LPFC_MBOXQ_t *mboxq;
19794 int rc = 0;
19795 int retval = MBX_SUCCESS;
19796 uint32_t mbox_tmo;
19797 struct lpfc_hba *phba = vport->phba;
19798 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19799 if (!mboxq)
19800 return -ENOMEM;
19801 lpfc_init_vpi(phba, mboxq, vport->vpi);
19802 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19803 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19804 if (rc != MBX_SUCCESS) {
19805 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19806 "2022 INIT VPI Mailbox failed "
19807 "status %d, mbxStatus x%x\n", rc,
19808 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19809 retval = -EIO;
19810 }
19811 if (rc != MBX_TIMEOUT)
19812 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19813
19814 return retval;
19815 }
19816
19817 /**
19818 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19819 * @phba: pointer to lpfc hba data structure.
19820 * @mboxq: Pointer to mailbox object.
19821 *
19822 * This routine is invoked to manually add a single FCF record. The caller
19823 * must pass a completely initialized FCF_Record. This routine takes
19824 * care of the nonembedded mailbox operations.
19825 **/
19826 static void
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)19827 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19828 {
19829 void *virt_addr;
19830 union lpfc_sli4_cfg_shdr *shdr;
19831 uint32_t shdr_status, shdr_add_status;
19832
19833 virt_addr = mboxq->sge_array->addr[0];
19834 /* The IOCTL status is embedded in the mailbox subheader. */
19835 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19836 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19837 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19838
19839 if ((shdr_status || shdr_add_status) &&
19840 (shdr_status != STATUS_FCF_IN_USE))
19841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19842 "2558 ADD_FCF_RECORD mailbox failed with "
19843 "status x%x add_status x%x\n",
19844 shdr_status, shdr_add_status);
19845
19846 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19847 }
19848
19849 /**
19850 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19851 * @phba: pointer to lpfc hba data structure.
19852 * @fcf_record: pointer to the initialized fcf record to add.
19853 *
19854 * This routine is invoked to manually add a single FCF record. The caller
19855 * must pass a completely initialized FCF_Record. This routine takes
19856 * care of the nonembedded mailbox operations.
19857 **/
19858 int
lpfc_sli4_add_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record)19859 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19860 {
19861 int rc = 0;
19862 LPFC_MBOXQ_t *mboxq;
19863 uint8_t *bytep;
19864 void *virt_addr;
19865 struct lpfc_mbx_sge sge;
19866 uint32_t alloc_len, req_len;
19867 uint32_t fcfindex;
19868
19869 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19870 if (!mboxq) {
19871 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19872 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19873 return -ENOMEM;
19874 }
19875
19876 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19877 sizeof(uint32_t);
19878
19879 /* Allocate DMA memory and set up the non-embedded mailbox command */
19880 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19881 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19882 req_len, LPFC_SLI4_MBX_NEMBED);
19883 if (alloc_len < req_len) {
19884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19885 "2523 Allocated DMA memory size (x%x) is "
19886 "less than the requested DMA memory "
19887 "size (x%x)\n", alloc_len, req_len);
19888 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19889 return -ENOMEM;
19890 }
19891
19892 /*
19893 * Get the first SGE entry from the non-embedded DMA memory. This
19894 * routine only uses a single SGE.
19895 */
19896 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19897 virt_addr = mboxq->sge_array->addr[0];
19898 /*
19899 * Configure the FCF record for FCFI 0. This is the driver's
19900 * hardcoded default and gets used in nonFIP mode.
19901 */
19902 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19903 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19904 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19905
19906 /*
19907 * Copy the fcf_index and the FCF Record Data. The data starts after
19908 * the FCoE header plus word10. The data copy needs to be endian
19909 * correct.
19910 */
19911 bytep += sizeof(uint32_t);
19912 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19913 mboxq->vport = phba->pport;
19914 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19915 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19916 if (rc == MBX_NOT_FINISHED) {
19917 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19918 "2515 ADD_FCF_RECORD mailbox failed with "
19919 "status 0x%x\n", rc);
19920 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19921 rc = -EIO;
19922 } else
19923 rc = 0;
19924
19925 return rc;
19926 }
19927
19928 /**
19929 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19930 * @phba: pointer to lpfc hba data structure.
19931 * @fcf_record: pointer to the fcf record to write the default data.
19932 * @fcf_index: FCF table entry index.
19933 *
19934 * This routine is invoked to build the driver's default FCF record. The
19935 * values used are hardcoded. This routine handles memory initialization.
19936 *
19937 **/
19938 void
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record,uint16_t fcf_index)19939 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19940 struct fcf_record *fcf_record,
19941 uint16_t fcf_index)
19942 {
19943 memset(fcf_record, 0, sizeof(struct fcf_record));
19944 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19945 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19946 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19947 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19948 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19949 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19950 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19951 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19952 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19953 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19954 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19955 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19956 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19957 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19958 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19959 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19960 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19961 /* Set the VLAN bit map */
19962 if (phba->valid_vlan) {
19963 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19964 = 1 << (phba->vlan_id % 8);
19965 }
19966 }
19967
19968 /**
19969 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19970 * @phba: pointer to lpfc hba data structure.
19971 * @fcf_index: FCF table entry offset.
19972 *
19973 * This routine is invoked to scan the entire FCF table by reading FCF
19974 * record and processing it one at a time starting from the @fcf_index
19975 * for initial FCF discovery or fast FCF failover rediscovery.
19976 *
19977 * Return 0 if the mailbox command is submitted successfully, none 0
19978 * otherwise.
19979 **/
19980 int
lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)19981 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19982 {
19983 int rc = 0, error;
19984 LPFC_MBOXQ_t *mboxq;
19985
19986 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19987 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19988 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19989 if (!mboxq) {
19990 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19991 "2000 Failed to allocate mbox for "
19992 "READ_FCF cmd\n");
19993 error = -ENOMEM;
19994 goto fail_fcf_scan;
19995 }
19996 /* Construct the read FCF record mailbox command */
19997 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19998 if (rc) {
19999 error = -EINVAL;
20000 goto fail_fcf_scan;
20001 }
20002 /* Issue the mailbox command asynchronously */
20003 mboxq->vport = phba->pport;
20004 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20005
20006 spin_lock_irq(&phba->hbalock);
20007 phba->hba_flag |= FCF_TS_INPROG;
20008 spin_unlock_irq(&phba->hbalock);
20009
20010 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20011 if (rc == MBX_NOT_FINISHED)
20012 error = -EIO;
20013 else {
20014 /* Reset eligible FCF count for new scan */
20015 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20016 phba->fcf.eligible_fcf_cnt = 0;
20017 error = 0;
20018 }
20019 fail_fcf_scan:
20020 if (error) {
20021 if (mboxq)
20022 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20023 /* FCF scan failed, clear FCF_TS_INPROG flag */
20024 spin_lock_irq(&phba->hbalock);
20025 phba->hba_flag &= ~FCF_TS_INPROG;
20026 spin_unlock_irq(&phba->hbalock);
20027 }
20028 return error;
20029 }
20030
20031 /**
20032 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20033 * @phba: pointer to lpfc hba data structure.
20034 * @fcf_index: FCF table entry offset.
20035 *
20036 * This routine is invoked to read an FCF record indicated by @fcf_index
20037 * and to use it for FLOGI roundrobin FCF failover.
20038 *
20039 * Return 0 if the mailbox command is submitted successfully, none 0
20040 * otherwise.
20041 **/
20042 int
lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20043 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20044 {
20045 int rc = 0, error;
20046 LPFC_MBOXQ_t *mboxq;
20047
20048 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20049 if (!mboxq) {
20050 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20051 "2763 Failed to allocate mbox for "
20052 "READ_FCF cmd\n");
20053 error = -ENOMEM;
20054 goto fail_fcf_read;
20055 }
20056 /* Construct the read FCF record mailbox command */
20057 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20058 if (rc) {
20059 error = -EINVAL;
20060 goto fail_fcf_read;
20061 }
20062 /* Issue the mailbox command asynchronously */
20063 mboxq->vport = phba->pport;
20064 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20065 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20066 if (rc == MBX_NOT_FINISHED)
20067 error = -EIO;
20068 else
20069 error = 0;
20070
20071 fail_fcf_read:
20072 if (error && mboxq)
20073 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20074 return error;
20075 }
20076
20077 /**
20078 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20079 * @phba: pointer to lpfc hba data structure.
20080 * @fcf_index: FCF table entry offset.
20081 *
20082 * This routine is invoked to read an FCF record indicated by @fcf_index to
20083 * determine whether it's eligible for FLOGI roundrobin failover list.
20084 *
20085 * Return 0 if the mailbox command is submitted successfully, none 0
20086 * otherwise.
20087 **/
20088 int
lpfc_sli4_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20089 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20090 {
20091 int rc = 0, error;
20092 LPFC_MBOXQ_t *mboxq;
20093
20094 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20095 if (!mboxq) {
20096 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20097 "2758 Failed to allocate mbox for "
20098 "READ_FCF cmd\n");
20099 error = -ENOMEM;
20100 goto fail_fcf_read;
20101 }
20102 /* Construct the read FCF record mailbox command */
20103 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20104 if (rc) {
20105 error = -EINVAL;
20106 goto fail_fcf_read;
20107 }
20108 /* Issue the mailbox command asynchronously */
20109 mboxq->vport = phba->pport;
20110 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20111 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20112 if (rc == MBX_NOT_FINISHED)
20113 error = -EIO;
20114 else
20115 error = 0;
20116
20117 fail_fcf_read:
20118 if (error && mboxq)
20119 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20120 return error;
20121 }
20122
20123 /**
20124 * lpfc_check_next_fcf_pri_level
20125 * @phba: pointer to the lpfc_hba struct for this port.
20126 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20127 * routine when the rr_bmask is empty. The FCF indecies are put into the
20128 * rr_bmask based on their priority level. Starting from the highest priority
20129 * to the lowest. The most likely FCF candidate will be in the highest
20130 * priority group. When this routine is called it searches the fcf_pri list for
20131 * next lowest priority group and repopulates the rr_bmask with only those
20132 * fcf_indexes.
20133 * returns:
20134 * 1=success 0=failure
20135 **/
20136 static int
lpfc_check_next_fcf_pri_level(struct lpfc_hba * phba)20137 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20138 {
20139 uint16_t next_fcf_pri;
20140 uint16_t last_index;
20141 struct lpfc_fcf_pri *fcf_pri;
20142 int rc;
20143 int ret = 0;
20144
20145 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20146 LPFC_SLI4_FCF_TBL_INDX_MAX);
20147 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20148 "3060 Last IDX %d\n", last_index);
20149
20150 /* Verify the priority list has 2 or more entries */
20151 spin_lock_irq(&phba->hbalock);
20152 if (list_empty(&phba->fcf.fcf_pri_list) ||
20153 list_is_singular(&phba->fcf.fcf_pri_list)) {
20154 spin_unlock_irq(&phba->hbalock);
20155 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20156 "3061 Last IDX %d\n", last_index);
20157 return 0; /* Empty rr list */
20158 }
20159 spin_unlock_irq(&phba->hbalock);
20160
20161 next_fcf_pri = 0;
20162 /*
20163 * Clear the rr_bmask and set all of the bits that are at this
20164 * priority.
20165 */
20166 memset(phba->fcf.fcf_rr_bmask, 0,
20167 sizeof(*phba->fcf.fcf_rr_bmask));
20168 spin_lock_irq(&phba->hbalock);
20169 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20170 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20171 continue;
20172 /*
20173 * the 1st priority that has not FLOGI failed
20174 * will be the highest.
20175 */
20176 if (!next_fcf_pri)
20177 next_fcf_pri = fcf_pri->fcf_rec.priority;
20178 spin_unlock_irq(&phba->hbalock);
20179 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20180 rc = lpfc_sli4_fcf_rr_index_set(phba,
20181 fcf_pri->fcf_rec.fcf_index);
20182 if (rc)
20183 return 0;
20184 }
20185 spin_lock_irq(&phba->hbalock);
20186 }
20187 /*
20188 * if next_fcf_pri was not set above and the list is not empty then
20189 * we have failed flogis on all of them. So reset flogi failed
20190 * and start at the beginning.
20191 */
20192 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20193 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20194 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20195 /*
20196 * the 1st priority that has not FLOGI failed
20197 * will be the highest.
20198 */
20199 if (!next_fcf_pri)
20200 next_fcf_pri = fcf_pri->fcf_rec.priority;
20201 spin_unlock_irq(&phba->hbalock);
20202 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20203 rc = lpfc_sli4_fcf_rr_index_set(phba,
20204 fcf_pri->fcf_rec.fcf_index);
20205 if (rc)
20206 return 0;
20207 }
20208 spin_lock_irq(&phba->hbalock);
20209 }
20210 } else
20211 ret = 1;
20212 spin_unlock_irq(&phba->hbalock);
20213
20214 return ret;
20215 }
20216 /**
20217 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20218 * @phba: pointer to lpfc hba data structure.
20219 *
20220 * This routine is to get the next eligible FCF record index in a round
20221 * robin fashion. If the next eligible FCF record index equals to the
20222 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20223 * shall be returned, otherwise, the next eligible FCF record's index
20224 * shall be returned.
20225 **/
20226 uint16_t
lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba * phba)20227 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20228 {
20229 uint16_t next_fcf_index;
20230
20231 initial_priority:
20232 /* Search start from next bit of currently registered FCF index */
20233 next_fcf_index = phba->fcf.current_rec.fcf_indx;
20234
20235 next_priority:
20236 /* Determine the next fcf index to check */
20237 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20238 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20239 LPFC_SLI4_FCF_TBL_INDX_MAX,
20240 next_fcf_index);
20241
20242 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20243 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20244 /*
20245 * If we have wrapped then we need to clear the bits that
20246 * have been tested so that we can detect when we should
20247 * change the priority level.
20248 */
20249 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20250 LPFC_SLI4_FCF_TBL_INDX_MAX);
20251 }
20252
20253
20254 /* Check roundrobin failover list empty condition */
20255 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20256 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20257 /*
20258 * If next fcf index is not found check if there are lower
20259 * Priority level fcf's in the fcf_priority list.
20260 * Set up the rr_bmask with all of the avaiable fcf bits
20261 * at that level and continue the selection process.
20262 */
20263 if (lpfc_check_next_fcf_pri_level(phba))
20264 goto initial_priority;
20265 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20266 "2844 No roundrobin failover FCF available\n");
20267
20268 return LPFC_FCOE_FCF_NEXT_NONE;
20269 }
20270
20271 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20272 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20273 LPFC_FCF_FLOGI_FAILED) {
20274 if (list_is_singular(&phba->fcf.fcf_pri_list))
20275 return LPFC_FCOE_FCF_NEXT_NONE;
20276
20277 goto next_priority;
20278 }
20279
20280 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20281 "2845 Get next roundrobin failover FCF (x%x)\n",
20282 next_fcf_index);
20283
20284 return next_fcf_index;
20285 }
20286
20287 /**
20288 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20289 * @phba: pointer to lpfc hba data structure.
20290 * @fcf_index: index into the FCF table to 'set'
20291 *
20292 * This routine sets the FCF record index in to the eligible bmask for
20293 * roundrobin failover search. It checks to make sure that the index
20294 * does not go beyond the range of the driver allocated bmask dimension
20295 * before setting the bit.
20296 *
20297 * Returns 0 if the index bit successfully set, otherwise, it returns
20298 * -EINVAL.
20299 **/
20300 int
lpfc_sli4_fcf_rr_index_set(struct lpfc_hba * phba,uint16_t fcf_index)20301 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20302 {
20303 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20304 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20305 "2610 FCF (x%x) reached driver's book "
20306 "keeping dimension:x%x\n",
20307 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20308 return -EINVAL;
20309 }
20310 /* Set the eligible FCF record index bmask */
20311 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20312
20313 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20314 "2790 Set FCF (x%x) to roundrobin FCF failover "
20315 "bmask\n", fcf_index);
20316
20317 return 0;
20318 }
20319
20320 /**
20321 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20322 * @phba: pointer to lpfc hba data structure.
20323 * @fcf_index: index into the FCF table to 'clear'
20324 *
20325 * This routine clears the FCF record index from the eligible bmask for
20326 * roundrobin failover search. It checks to make sure that the index
20327 * does not go beyond the range of the driver allocated bmask dimension
20328 * before clearing the bit.
20329 **/
20330 void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba * phba,uint16_t fcf_index)20331 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20332 {
20333 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20334 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20335 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20336 "2762 FCF (x%x) reached driver's book "
20337 "keeping dimension:x%x\n",
20338 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20339 return;
20340 }
20341 /* Clear the eligible FCF record index bmask */
20342 spin_lock_irq(&phba->hbalock);
20343 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20344 list) {
20345 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20346 list_del_init(&fcf_pri->list);
20347 break;
20348 }
20349 }
20350 spin_unlock_irq(&phba->hbalock);
20351 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20352
20353 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20354 "2791 Clear FCF (x%x) from roundrobin failover "
20355 "bmask\n", fcf_index);
20356 }
20357
20358 /**
20359 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20360 * @phba: pointer to lpfc hba data structure.
20361 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20362 *
20363 * This routine is the completion routine for the rediscover FCF table mailbox
20364 * command. If the mailbox command returned failure, it will try to stop the
20365 * FCF rediscover wait timer.
20366 **/
20367 static void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)20368 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20369 {
20370 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20371 uint32_t shdr_status, shdr_add_status;
20372
20373 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20374
20375 shdr_status = bf_get(lpfc_mbox_hdr_status,
20376 &redisc_fcf->header.cfg_shdr.response);
20377 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20378 &redisc_fcf->header.cfg_shdr.response);
20379 if (shdr_status || shdr_add_status) {
20380 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20381 "2746 Requesting for FCF rediscovery failed "
20382 "status x%x add_status x%x\n",
20383 shdr_status, shdr_add_status);
20384 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20385 spin_lock_irq(&phba->hbalock);
20386 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20387 spin_unlock_irq(&phba->hbalock);
20388 /*
20389 * CVL event triggered FCF rediscover request failed,
20390 * last resort to re-try current registered FCF entry.
20391 */
20392 lpfc_retry_pport_discovery(phba);
20393 } else {
20394 spin_lock_irq(&phba->hbalock);
20395 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20396 spin_unlock_irq(&phba->hbalock);
20397 /*
20398 * DEAD FCF event triggered FCF rediscover request
20399 * failed, last resort to fail over as a link down
20400 * to FCF registration.
20401 */
20402 lpfc_sli4_fcf_dead_failthrough(phba);
20403 }
20404 } else {
20405 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20406 "2775 Start FCF rediscover quiescent timer\n");
20407 /*
20408 * Start FCF rediscovery wait timer for pending FCF
20409 * before rescan FCF record table.
20410 */
20411 lpfc_fcf_redisc_wait_start_timer(phba);
20412 }
20413
20414 mempool_free(mbox, phba->mbox_mem_pool);
20415 }
20416
20417 /**
20418 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20419 * @phba: pointer to lpfc hba data structure.
20420 *
20421 * This routine is invoked to request for rediscovery of the entire FCF table
20422 * by the port.
20423 **/
20424 int
lpfc_sli4_redisc_fcf_table(struct lpfc_hba * phba)20425 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20426 {
20427 LPFC_MBOXQ_t *mbox;
20428 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20429 int rc, length;
20430
20431 /* Cancel retry delay timers to all vports before FCF rediscover */
20432 lpfc_cancel_all_vport_retry_delay_timer(phba);
20433
20434 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20435 if (!mbox) {
20436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20437 "2745 Failed to allocate mbox for "
20438 "requesting FCF rediscover.\n");
20439 return -ENOMEM;
20440 }
20441
20442 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20443 sizeof(struct lpfc_sli4_cfg_mhdr));
20444 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20445 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20446 length, LPFC_SLI4_MBX_EMBED);
20447
20448 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20449 /* Set count to 0 for invalidating the entire FCF database */
20450 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20451
20452 /* Issue the mailbox command asynchronously */
20453 mbox->vport = phba->pport;
20454 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20456
20457 if (rc == MBX_NOT_FINISHED) {
20458 mempool_free(mbox, phba->mbox_mem_pool);
20459 return -EIO;
20460 }
20461 return 0;
20462 }
20463
20464 /**
20465 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20466 * @phba: pointer to lpfc hba data structure.
20467 *
20468 * This function is the failover routine as a last resort to the FCF DEAD
20469 * event when driver failed to perform fast FCF failover.
20470 **/
20471 void
lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba * phba)20472 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20473 {
20474 uint32_t link_state;
20475
20476 /*
20477 * Last resort as FCF DEAD event failover will treat this as
20478 * a link down, but save the link state because we don't want
20479 * it to be changed to Link Down unless it is already down.
20480 */
20481 link_state = phba->link_state;
20482 lpfc_linkdown(phba);
20483 phba->link_state = link_state;
20484
20485 /* Unregister FCF if no devices connected to it */
20486 lpfc_unregister_unused_fcf(phba);
20487 }
20488
20489 /**
20490 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20491 * @phba: pointer to lpfc hba data structure.
20492 * @rgn23_data: pointer to configure region 23 data.
20493 *
20494 * This function gets SLI3 port configure region 23 data through memory dump
20495 * mailbox command. When it successfully retrieves data, the size of the data
20496 * will be returned, otherwise, 0 will be returned.
20497 **/
20498 static uint32_t
lpfc_sli_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)20499 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20500 {
20501 LPFC_MBOXQ_t *pmb = NULL;
20502 MAILBOX_t *mb;
20503 uint32_t offset = 0;
20504 int rc;
20505
20506 if (!rgn23_data)
20507 return 0;
20508
20509 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20510 if (!pmb) {
20511 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20512 "2600 failed to allocate mailbox memory\n");
20513 return 0;
20514 }
20515 mb = &pmb->u.mb;
20516
20517 do {
20518 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20519 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20520
20521 if (rc != MBX_SUCCESS) {
20522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20523 "2601 failed to read config "
20524 "region 23, rc 0x%x Status 0x%x\n",
20525 rc, mb->mbxStatus);
20526 mb->un.varDmp.word_cnt = 0;
20527 }
20528 /*
20529 * dump mem may return a zero when finished or we got a
20530 * mailbox error, either way we are done.
20531 */
20532 if (mb->un.varDmp.word_cnt == 0)
20533 break;
20534
20535 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20536 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20537
20538 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20539 rgn23_data + offset,
20540 mb->un.varDmp.word_cnt);
20541 offset += mb->un.varDmp.word_cnt;
20542 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20543
20544 mempool_free(pmb, phba->mbox_mem_pool);
20545 return offset;
20546 }
20547
20548 /**
20549 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20550 * @phba: pointer to lpfc hba data structure.
20551 * @rgn23_data: pointer to configure region 23 data.
20552 *
20553 * This function gets SLI4 port configure region 23 data through memory dump
20554 * mailbox command. When it successfully retrieves data, the size of the data
20555 * will be returned, otherwise, 0 will be returned.
20556 **/
20557 static uint32_t
lpfc_sli4_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)20558 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20559 {
20560 LPFC_MBOXQ_t *mboxq = NULL;
20561 struct lpfc_dmabuf *mp = NULL;
20562 struct lpfc_mqe *mqe;
20563 uint32_t data_length = 0;
20564 int rc;
20565
20566 if (!rgn23_data)
20567 return 0;
20568
20569 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20570 if (!mboxq) {
20571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20572 "3105 failed to allocate mailbox memory\n");
20573 return 0;
20574 }
20575
20576 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20577 goto out;
20578 mqe = &mboxq->u.mqe;
20579 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20580 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20581 if (rc)
20582 goto out;
20583 data_length = mqe->un.mb_words[5];
20584 if (data_length == 0)
20585 goto out;
20586 if (data_length > DMP_RGN23_SIZE) {
20587 data_length = 0;
20588 goto out;
20589 }
20590 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20591 out:
20592 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20593 return data_length;
20594 }
20595
20596 /**
20597 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20598 * @phba: pointer to lpfc hba data structure.
20599 *
20600 * This function read region 23 and parse TLV for port status to
20601 * decide if the user disaled the port. If the TLV indicates the
20602 * port is disabled, the hba_flag is set accordingly.
20603 **/
20604 void
lpfc_sli_read_link_ste(struct lpfc_hba * phba)20605 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20606 {
20607 uint8_t *rgn23_data = NULL;
20608 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20609 uint32_t offset = 0;
20610
20611 /* Get adapter Region 23 data */
20612 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20613 if (!rgn23_data)
20614 goto out;
20615
20616 if (phba->sli_rev < LPFC_SLI_REV4)
20617 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20618 else {
20619 if_type = bf_get(lpfc_sli_intf_if_type,
20620 &phba->sli4_hba.sli_intf);
20621 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20622 goto out;
20623 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20624 }
20625
20626 if (!data_size)
20627 goto out;
20628
20629 /* Check the region signature first */
20630 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20632 "2619 Config region 23 has bad signature\n");
20633 goto out;
20634 }
20635 offset += 4;
20636
20637 /* Check the data structure version */
20638 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20639 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20640 "2620 Config region 23 has bad version\n");
20641 goto out;
20642 }
20643 offset += 4;
20644
20645 /* Parse TLV entries in the region */
20646 while (offset < data_size) {
20647 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20648 break;
20649 /*
20650 * If the TLV is not driver specific TLV or driver id is
20651 * not linux driver id, skip the record.
20652 */
20653 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20654 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20655 (rgn23_data[offset + 3] != 0)) {
20656 offset += rgn23_data[offset + 1] * 4 + 4;
20657 continue;
20658 }
20659
20660 /* Driver found a driver specific TLV in the config region */
20661 sub_tlv_len = rgn23_data[offset + 1] * 4;
20662 offset += 4;
20663 tlv_offset = 0;
20664
20665 /*
20666 * Search for configured port state sub-TLV.
20667 */
20668 while ((offset < data_size) &&
20669 (tlv_offset < sub_tlv_len)) {
20670 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20671 offset += 4;
20672 tlv_offset += 4;
20673 break;
20674 }
20675 if (rgn23_data[offset] != PORT_STE_TYPE) {
20676 offset += rgn23_data[offset + 1] * 4 + 4;
20677 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20678 continue;
20679 }
20680
20681 /* This HBA contains PORT_STE configured */
20682 if (!rgn23_data[offset + 2])
20683 phba->hba_flag |= LINK_DISABLED;
20684
20685 goto out;
20686 }
20687 }
20688
20689 out:
20690 kfree(rgn23_data);
20691 return;
20692 }
20693
20694 /**
20695 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20696 * @phba: pointer to lpfc hba data structure
20697 * @shdr_status: wr_object rsp's status field
20698 * @shdr_add_status: wr_object rsp's add_status field
20699 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20700 * @shdr_change_status: wr_object rsp's change_status field
20701 * @shdr_csf: wr_object rsp's csf bit
20702 *
20703 * This routine is intended to be called after a firmware write completes.
20704 * It will log next action items to be performed by the user to instantiate
20705 * the newly downloaded firmware or reason for incompatibility.
20706 **/
20707 static void
lpfc_log_fw_write_cmpl(struct lpfc_hba * phba,u32 shdr_status,u32 shdr_add_status,u32 shdr_add_status_2,u32 shdr_change_status,u32 shdr_csf)20708 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20709 u32 shdr_add_status, u32 shdr_add_status_2,
20710 u32 shdr_change_status, u32 shdr_csf)
20711 {
20712 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20713 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20714 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20715 "change_status x%02x, csf %01x\n", __func__,
20716 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20717 shdr_status, shdr_add_status, shdr_add_status_2,
20718 shdr_change_status, shdr_csf);
20719
20720 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20721 switch (shdr_add_status_2) {
20722 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20723 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20724 "4199 Firmware write failed: "
20725 "image incompatible with flash x%02x\n",
20726 phba->sli4_hba.flash_id);
20727 break;
20728 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20729 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20730 "4200 Firmware write failed: "
20731 "image incompatible with ASIC "
20732 "architecture x%02x\n",
20733 phba->sli4_hba.asic_rev);
20734 break;
20735 default:
20736 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20737 "4210 Firmware write failed: "
20738 "add_status_2 x%02x\n",
20739 shdr_add_status_2);
20740 break;
20741 }
20742 } else if (!shdr_status && !shdr_add_status) {
20743 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20744 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20745 if (shdr_csf)
20746 shdr_change_status =
20747 LPFC_CHANGE_STATUS_PCI_RESET;
20748 }
20749
20750 switch (shdr_change_status) {
20751 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20752 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20753 "3198 Firmware write complete: System "
20754 "reboot required to instantiate\n");
20755 break;
20756 case (LPFC_CHANGE_STATUS_FW_RESET):
20757 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20758 "3199 Firmware write complete: "
20759 "Firmware reset required to "
20760 "instantiate\n");
20761 break;
20762 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20763 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20764 "3200 Firmware write complete: Port "
20765 "Migration or PCI Reset required to "
20766 "instantiate\n");
20767 break;
20768 case (LPFC_CHANGE_STATUS_PCI_RESET):
20769 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20770 "3201 Firmware write complete: PCI "
20771 "Reset required to instantiate\n");
20772 break;
20773 default:
20774 break;
20775 }
20776 }
20777 }
20778
20779 /**
20780 * lpfc_wr_object - write an object to the firmware
20781 * @phba: HBA structure that indicates port to create a queue on.
20782 * @dmabuf_list: list of dmabufs to write to the port.
20783 * @size: the total byte value of the objects to write to the port.
20784 * @offset: the current offset to be used to start the transfer.
20785 *
20786 * This routine will create a wr_object mailbox command to send to the port.
20787 * the mailbox command will be constructed using the dma buffers described in
20788 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20789 * BDEs that the imbedded mailbox can support. The @offset variable will be
20790 * used to indicate the starting offset of the transfer and will also return
20791 * the offset after the write object mailbox has completed. @size is used to
20792 * determine the end of the object and whether the eof bit should be set.
20793 *
20794 * Return 0 is successful and offset will contain the the new offset to use
20795 * for the next write.
20796 * Return negative value for error cases.
20797 **/
20798 int
lpfc_wr_object(struct lpfc_hba * phba,struct list_head * dmabuf_list,uint32_t size,uint32_t * offset)20799 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20800 uint32_t size, uint32_t *offset)
20801 {
20802 struct lpfc_mbx_wr_object *wr_object;
20803 LPFC_MBOXQ_t *mbox;
20804 int rc = 0, i = 0;
20805 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20806 uint32_t shdr_change_status = 0, shdr_csf = 0;
20807 uint32_t mbox_tmo;
20808 struct lpfc_dmabuf *dmabuf;
20809 uint32_t written = 0;
20810 bool check_change_status = false;
20811
20812 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20813 if (!mbox)
20814 return -ENOMEM;
20815
20816 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20817 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20818 sizeof(struct lpfc_mbx_wr_object) -
20819 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20820
20821 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20822 wr_object->u.request.write_offset = *offset;
20823 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20824 wr_object->u.request.object_name[0] =
20825 cpu_to_le32(wr_object->u.request.object_name[0]);
20826 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20827 list_for_each_entry(dmabuf, dmabuf_list, list) {
20828 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20829 break;
20830 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20831 wr_object->u.request.bde[i].addrHigh =
20832 putPaddrHigh(dmabuf->phys);
20833 if (written + SLI4_PAGE_SIZE >= size) {
20834 wr_object->u.request.bde[i].tus.f.bdeSize =
20835 (size - written);
20836 written += (size - written);
20837 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20838 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20839 check_change_status = true;
20840 } else {
20841 wr_object->u.request.bde[i].tus.f.bdeSize =
20842 SLI4_PAGE_SIZE;
20843 written += SLI4_PAGE_SIZE;
20844 }
20845 i++;
20846 }
20847 wr_object->u.request.bde_count = i;
20848 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20849 if (!phba->sli4_hba.intr_enable)
20850 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20851 else {
20852 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20853 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20854 }
20855 /* The IOCTL status is embedded in the mailbox subheader. */
20856 shdr_status = bf_get(lpfc_mbox_hdr_status,
20857 &wr_object->header.cfg_shdr.response);
20858 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20859 &wr_object->header.cfg_shdr.response);
20860 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20861 &wr_object->header.cfg_shdr.response);
20862 if (check_change_status) {
20863 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20864 &wr_object->u.response);
20865 shdr_csf = bf_get(lpfc_wr_object_csf,
20866 &wr_object->u.response);
20867 }
20868
20869 if (!phba->sli4_hba.intr_enable)
20870 mempool_free(mbox, phba->mbox_mem_pool);
20871 else if (rc != MBX_TIMEOUT)
20872 mempool_free(mbox, phba->mbox_mem_pool);
20873 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20875 "3025 Write Object mailbox failed with "
20876 "status x%x add_status x%x, add_status_2 x%x, "
20877 "mbx status x%x\n",
20878 shdr_status, shdr_add_status, shdr_add_status_2,
20879 rc);
20880 rc = -ENXIO;
20881 *offset = shdr_add_status;
20882 } else {
20883 *offset += wr_object->u.response.actual_write_length;
20884 }
20885
20886 if (rc || check_change_status)
20887 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20888 shdr_add_status_2, shdr_change_status,
20889 shdr_csf);
20890 return rc;
20891 }
20892
20893 /**
20894 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20895 * @vport: pointer to vport data structure.
20896 *
20897 * This function iterate through the mailboxq and clean up all REG_LOGIN
20898 * and REG_VPI mailbox commands associated with the vport. This function
20899 * is called when driver want to restart discovery of the vport due to
20900 * a Clear Virtual Link event.
20901 **/
20902 void
lpfc_cleanup_pending_mbox(struct lpfc_vport * vport)20903 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20904 {
20905 struct lpfc_hba *phba = vport->phba;
20906 LPFC_MBOXQ_t *mb, *nextmb;
20907 struct lpfc_nodelist *ndlp;
20908 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20909 LIST_HEAD(mbox_cmd_list);
20910 uint8_t restart_loop;
20911
20912 /* Clean up internally queued mailbox commands with the vport */
20913 spin_lock_irq(&phba->hbalock);
20914 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20915 if (mb->vport != vport)
20916 continue;
20917
20918 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20919 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20920 continue;
20921
20922 list_move_tail(&mb->list, &mbox_cmd_list);
20923 }
20924 /* Clean up active mailbox command with the vport */
20925 mb = phba->sli.mbox_active;
20926 if (mb && (mb->vport == vport)) {
20927 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20928 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20929 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20930 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20931 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20932
20933 /* This reference is local to this routine. The
20934 * reference is removed at routine exit.
20935 */
20936 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20937
20938 /* Unregister the RPI when mailbox complete */
20939 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20940 }
20941 }
20942 /* Cleanup any mailbox completions which are not yet processed */
20943 do {
20944 restart_loop = 0;
20945 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20946 /*
20947 * If this mailox is already processed or it is
20948 * for another vport ignore it.
20949 */
20950 if ((mb->vport != vport) ||
20951 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20952 continue;
20953
20954 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20955 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20956 continue;
20957
20958 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20959 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20960 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20961 /* Unregister the RPI when mailbox complete */
20962 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20963 restart_loop = 1;
20964 spin_unlock_irq(&phba->hbalock);
20965 spin_lock(&ndlp->lock);
20966 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20967 spin_unlock(&ndlp->lock);
20968 spin_lock_irq(&phba->hbalock);
20969 break;
20970 }
20971 }
20972 } while (restart_loop);
20973
20974 spin_unlock_irq(&phba->hbalock);
20975
20976 /* Release the cleaned-up mailbox commands */
20977 while (!list_empty(&mbox_cmd_list)) {
20978 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20979 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20980 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20981 mb->ctx_ndlp = NULL;
20982 if (ndlp) {
20983 spin_lock(&ndlp->lock);
20984 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20985 spin_unlock(&ndlp->lock);
20986 lpfc_nlp_put(ndlp);
20987 }
20988 }
20989 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
20990 }
20991
20992 /* Release the ndlp with the cleaned-up active mailbox command */
20993 if (act_mbx_ndlp) {
20994 spin_lock(&act_mbx_ndlp->lock);
20995 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20996 spin_unlock(&act_mbx_ndlp->lock);
20997 lpfc_nlp_put(act_mbx_ndlp);
20998 }
20999 }
21000
21001 /**
21002 * lpfc_drain_txq - Drain the txq
21003 * @phba: Pointer to HBA context object.
21004 *
21005 * This function attempt to submit IOCBs on the txq
21006 * to the adapter. For SLI4 adapters, the txq contains
21007 * ELS IOCBs that have been deferred because the there
21008 * are no SGLs. This congestion can occur with large
21009 * vport counts during node discovery.
21010 **/
21011
21012 uint32_t
lpfc_drain_txq(struct lpfc_hba * phba)21013 lpfc_drain_txq(struct lpfc_hba *phba)
21014 {
21015 LIST_HEAD(completions);
21016 struct lpfc_sli_ring *pring;
21017 struct lpfc_iocbq *piocbq = NULL;
21018 unsigned long iflags = 0;
21019 char *fail_msg = NULL;
21020 uint32_t txq_cnt = 0;
21021 struct lpfc_queue *wq;
21022 int ret = 0;
21023
21024 if (phba->link_flag & LS_MDS_LOOPBACK) {
21025 /* MDS WQE are posted only to first WQ*/
21026 wq = phba->sli4_hba.hdwq[0].io_wq;
21027 if (unlikely(!wq))
21028 return 0;
21029 pring = wq->pring;
21030 } else {
21031 wq = phba->sli4_hba.els_wq;
21032 if (unlikely(!wq))
21033 return 0;
21034 pring = lpfc_phba_elsring(phba);
21035 }
21036
21037 if (unlikely(!pring) || list_empty(&pring->txq))
21038 return 0;
21039
21040 spin_lock_irqsave(&pring->ring_lock, iflags);
21041 list_for_each_entry(piocbq, &pring->txq, list) {
21042 txq_cnt++;
21043 }
21044
21045 if (txq_cnt > pring->txq_max)
21046 pring->txq_max = txq_cnt;
21047
21048 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21049
21050 while (!list_empty(&pring->txq)) {
21051 spin_lock_irqsave(&pring->ring_lock, iflags);
21052
21053 piocbq = lpfc_sli_ringtx_get(phba, pring);
21054 if (!piocbq) {
21055 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21057 "2823 txq empty and txq_cnt is %d\n ",
21058 txq_cnt);
21059 break;
21060 }
21061 txq_cnt--;
21062
21063 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21064
21065 if (ret && ret != IOCB_BUSY) {
21066 fail_msg = " - Cannot send IO ";
21067 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21068 }
21069 if (fail_msg) {
21070 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21071 /* Failed means we can't issue and need to cancel */
21072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21073 "2822 IOCB failed %s iotag 0x%x "
21074 "xri 0x%x %d flg x%x\n",
21075 fail_msg, piocbq->iotag,
21076 piocbq->sli4_xritag, ret,
21077 piocbq->cmd_flag);
21078 list_add_tail(&piocbq->list, &completions);
21079 fail_msg = NULL;
21080 }
21081 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21082 if (txq_cnt == 0 || ret == IOCB_BUSY)
21083 break;
21084 }
21085 /* Cancel all the IOCBs that cannot be issued */
21086 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21087 IOERR_SLI_ABORTED);
21088
21089 return txq_cnt;
21090 }
21091
21092 /**
21093 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21094 * @phba: Pointer to HBA context object.
21095 * @pwqeq: Pointer to command WQE.
21096 * @sglq: Pointer to the scatter gather queue object.
21097 *
21098 * This routine converts the bpl or bde that is in the WQE
21099 * to a sgl list for the sli4 hardware. The physical address
21100 * of the bpl/bde is converted back to a virtual address.
21101 * If the WQE contains a BPL then the list of BDE's is
21102 * converted to sli4_sge's. If the WQE contains a single
21103 * BDE then it is converted to a single sli_sge.
21104 * The WQE is still in cpu endianness so the contents of
21105 * the bpl can be used without byte swapping.
21106 *
21107 * Returns valid XRI = Success, NO_XRI = Failure.
21108 */
21109 static uint16_t
lpfc_wqe_bpl2sgl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeq,struct lpfc_sglq * sglq)21110 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21111 struct lpfc_sglq *sglq)
21112 {
21113 uint16_t xritag = NO_XRI;
21114 struct ulp_bde64 *bpl = NULL;
21115 struct ulp_bde64 bde;
21116 struct sli4_sge *sgl = NULL;
21117 struct lpfc_dmabuf *dmabuf;
21118 union lpfc_wqe128 *wqe;
21119 int numBdes = 0;
21120 int i = 0;
21121 uint32_t offset = 0; /* accumulated offset in the sg request list */
21122 int inbound = 0; /* number of sg reply entries inbound from firmware */
21123 uint32_t cmd;
21124
21125 if (!pwqeq || !sglq)
21126 return xritag;
21127
21128 sgl = (struct sli4_sge *)sglq->sgl;
21129 wqe = &pwqeq->wqe;
21130 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21131
21132 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21133 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21134 return sglq->sli4_xritag;
21135 numBdes = pwqeq->num_bdes;
21136 if (numBdes) {
21137 /* The addrHigh and addrLow fields within the WQE
21138 * have not been byteswapped yet so there is no
21139 * need to swap them back.
21140 */
21141 if (pwqeq->bpl_dmabuf)
21142 dmabuf = pwqeq->bpl_dmabuf;
21143 else
21144 return xritag;
21145
21146 bpl = (struct ulp_bde64 *)dmabuf->virt;
21147 if (!bpl)
21148 return xritag;
21149
21150 for (i = 0; i < numBdes; i++) {
21151 /* Should already be byte swapped. */
21152 sgl->addr_hi = bpl->addrHigh;
21153 sgl->addr_lo = bpl->addrLow;
21154
21155 sgl->word2 = le32_to_cpu(sgl->word2);
21156 if ((i+1) == numBdes)
21157 bf_set(lpfc_sli4_sge_last, sgl, 1);
21158 else
21159 bf_set(lpfc_sli4_sge_last, sgl, 0);
21160 /* swap the size field back to the cpu so we
21161 * can assign it to the sgl.
21162 */
21163 bde.tus.w = le32_to_cpu(bpl->tus.w);
21164 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21165 /* The offsets in the sgl need to be accumulated
21166 * separately for the request and reply lists.
21167 * The request is always first, the reply follows.
21168 */
21169 switch (cmd) {
21170 case CMD_GEN_REQUEST64_WQE:
21171 /* add up the reply sg entries */
21172 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21173 inbound++;
21174 /* first inbound? reset the offset */
21175 if (inbound == 1)
21176 offset = 0;
21177 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21178 bf_set(lpfc_sli4_sge_type, sgl,
21179 LPFC_SGE_TYPE_DATA);
21180 offset += bde.tus.f.bdeSize;
21181 break;
21182 case CMD_FCP_TRSP64_WQE:
21183 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21184 bf_set(lpfc_sli4_sge_type, sgl,
21185 LPFC_SGE_TYPE_DATA);
21186 break;
21187 case CMD_FCP_TSEND64_WQE:
21188 case CMD_FCP_TRECEIVE64_WQE:
21189 bf_set(lpfc_sli4_sge_type, sgl,
21190 bpl->tus.f.bdeFlags);
21191 if (i < 3)
21192 offset = 0;
21193 else
21194 offset += bde.tus.f.bdeSize;
21195 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21196 break;
21197 }
21198 sgl->word2 = cpu_to_le32(sgl->word2);
21199 bpl++;
21200 sgl++;
21201 }
21202 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21203 /* The addrHigh and addrLow fields of the BDE have not
21204 * been byteswapped yet so they need to be swapped
21205 * before putting them in the sgl.
21206 */
21207 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21208 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21209 sgl->word2 = le32_to_cpu(sgl->word2);
21210 bf_set(lpfc_sli4_sge_last, sgl, 1);
21211 sgl->word2 = cpu_to_le32(sgl->word2);
21212 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21213 }
21214 return sglq->sli4_xritag;
21215 }
21216
21217 /**
21218 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21219 * @phba: Pointer to HBA context object.
21220 * @qp: Pointer to HDW queue.
21221 * @pwqe: Pointer to command WQE.
21222 **/
21223 int
lpfc_sli4_issue_wqe(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_iocbq * pwqe)21224 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21225 struct lpfc_iocbq *pwqe)
21226 {
21227 union lpfc_wqe128 *wqe = &pwqe->wqe;
21228 struct lpfc_async_xchg_ctx *ctxp;
21229 struct lpfc_queue *wq;
21230 struct lpfc_sglq *sglq;
21231 struct lpfc_sli_ring *pring;
21232 unsigned long iflags;
21233 uint32_t ret = 0;
21234
21235 /* NVME_LS and NVME_LS ABTS requests. */
21236 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21237 pring = phba->sli4_hba.nvmels_wq->pring;
21238 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21239 qp, wq_access);
21240 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21241 if (!sglq) {
21242 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21243 return WQE_BUSY;
21244 }
21245 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21246 pwqe->sli4_xritag = sglq->sli4_xritag;
21247 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21248 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21249 return WQE_ERROR;
21250 }
21251 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21252 pwqe->sli4_xritag);
21253 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21254 if (ret) {
21255 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21256 return ret;
21257 }
21258
21259 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21260 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21261
21262 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21263 return 0;
21264 }
21265
21266 /* NVME_FCREQ and NVME_ABTS requests */
21267 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21268 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21269 wq = qp->io_wq;
21270 pring = wq->pring;
21271
21272 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21273
21274 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21275 qp, wq_access);
21276 ret = lpfc_sli4_wq_put(wq, wqe);
21277 if (ret) {
21278 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21279 return ret;
21280 }
21281 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21282 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21283
21284 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21285 return 0;
21286 }
21287
21288 /* NVMET requests */
21289 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21290 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21291 wq = qp->io_wq;
21292 pring = wq->pring;
21293
21294 ctxp = pwqe->context_un.axchg;
21295 sglq = ctxp->ctxbuf->sglq;
21296 if (pwqe->sli4_xritag == NO_XRI) {
21297 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21298 pwqe->sli4_xritag = sglq->sli4_xritag;
21299 }
21300 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21301 pwqe->sli4_xritag);
21302 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21303
21304 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21305 qp, wq_access);
21306 ret = lpfc_sli4_wq_put(wq, wqe);
21307 if (ret) {
21308 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21309 return ret;
21310 }
21311 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21312 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21313
21314 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21315 return 0;
21316 }
21317 return WQE_ERROR;
21318 }
21319
21320 /**
21321 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21322 * @phba: Pointer to HBA context object.
21323 * @cmdiocb: Pointer to driver command iocb object.
21324 * @cmpl: completion function.
21325 *
21326 * Fill the appropriate fields for the abort WQE and call
21327 * internal routine lpfc_sli4_issue_wqe to send the WQE
21328 * This function is called with hbalock held and no ring_lock held.
21329 *
21330 * RETURNS 0 - SUCCESS
21331 **/
21332
21333 int
lpfc_sli4_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,void * cmpl)21334 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21335 void *cmpl)
21336 {
21337 struct lpfc_vport *vport = cmdiocb->vport;
21338 struct lpfc_iocbq *abtsiocb = NULL;
21339 union lpfc_wqe128 *abtswqe;
21340 struct lpfc_io_buf *lpfc_cmd;
21341 int retval = IOCB_ERROR;
21342 u16 xritag = cmdiocb->sli4_xritag;
21343
21344 /*
21345 * The scsi command can not be in txq and it is in flight because the
21346 * pCmd is still pointing at the SCSI command we have to abort. There
21347 * is no need to search the txcmplq. Just send an abort to the FW.
21348 */
21349
21350 abtsiocb = __lpfc_sli_get_iocbq(phba);
21351 if (!abtsiocb)
21352 return WQE_NORESOURCE;
21353
21354 /* Indicate the IO is being aborted by the driver. */
21355 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21356
21357 abtswqe = &abtsiocb->wqe;
21358 memset(abtswqe, 0, sizeof(*abtswqe));
21359
21360 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21361 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21362 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21363 abtswqe->abort_cmd.rsrvd5 = 0;
21364 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21365 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21366 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21367 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21368 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21369 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21370 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21371
21372 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21373 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21374 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21375 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21376 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21377 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21378 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21379 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21380 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21381 abtsiocb->vport = vport;
21382 abtsiocb->cmd_cmpl = cmpl;
21383
21384 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21385 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21386
21387 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21388 "0359 Abort xri x%x, original iotag x%x, "
21389 "abort cmd iotag x%x retval x%x\n",
21390 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21391
21392 if (retval) {
21393 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21394 __lpfc_sli_release_iocbq(phba, abtsiocb);
21395 }
21396
21397 return retval;
21398 }
21399
21400 #ifdef LPFC_MXP_STAT
21401 /**
21402 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21403 * @phba: pointer to lpfc hba data structure.
21404 * @hwqid: belong to which HWQ.
21405 *
21406 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21407 * 15 seconds after a test case is running.
21408 *
21409 * The user should call lpfc_debugfs_multixripools_write before running a test
21410 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21411 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21412 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21413 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21414 **/
lpfc_snapshot_mxp(struct lpfc_hba * phba,u32 hwqid)21415 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21416 {
21417 struct lpfc_sli4_hdw_queue *qp;
21418 struct lpfc_multixri_pool *multixri_pool;
21419 struct lpfc_pvt_pool *pvt_pool;
21420 struct lpfc_pbl_pool *pbl_pool;
21421 u32 txcmplq_cnt;
21422
21423 qp = &phba->sli4_hba.hdwq[hwqid];
21424 multixri_pool = qp->p_multixri_pool;
21425 if (!multixri_pool)
21426 return;
21427
21428 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21429 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21430 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21431 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21432
21433 multixri_pool->stat_pbl_count = pbl_pool->count;
21434 multixri_pool->stat_pvt_count = pvt_pool->count;
21435 multixri_pool->stat_busy_count = txcmplq_cnt;
21436 }
21437
21438 multixri_pool->stat_snapshot_taken++;
21439 }
21440 #endif
21441
21442 /**
21443 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21444 * @phba: pointer to lpfc hba data structure.
21445 * @hwqid: belong to which HWQ.
21446 *
21447 * This routine moves some XRIs from private to public pool when private pool
21448 * is not busy.
21449 **/
lpfc_adjust_pvt_pool_count(struct lpfc_hba * phba,u32 hwqid)21450 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21451 {
21452 struct lpfc_multixri_pool *multixri_pool;
21453 u32 io_req_count;
21454 u32 prev_io_req_count;
21455
21456 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21457 if (!multixri_pool)
21458 return;
21459 io_req_count = multixri_pool->io_req_count;
21460 prev_io_req_count = multixri_pool->prev_io_req_count;
21461
21462 if (prev_io_req_count != io_req_count) {
21463 /* Private pool is busy */
21464 multixri_pool->prev_io_req_count = io_req_count;
21465 } else {
21466 /* Private pool is not busy.
21467 * Move XRIs from private to public pool.
21468 */
21469 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21470 }
21471 }
21472
21473 /**
21474 * lpfc_adjust_high_watermark - Adjust high watermark
21475 * @phba: pointer to lpfc hba data structure.
21476 * @hwqid: belong to which HWQ.
21477 *
21478 * This routine sets high watermark as number of outstanding XRIs,
21479 * but make sure the new value is between xri_limit/2 and xri_limit.
21480 **/
lpfc_adjust_high_watermark(struct lpfc_hba * phba,u32 hwqid)21481 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21482 {
21483 u32 new_watermark;
21484 u32 watermark_max;
21485 u32 watermark_min;
21486 u32 xri_limit;
21487 u32 txcmplq_cnt;
21488 u32 abts_io_bufs;
21489 struct lpfc_multixri_pool *multixri_pool;
21490 struct lpfc_sli4_hdw_queue *qp;
21491
21492 qp = &phba->sli4_hba.hdwq[hwqid];
21493 multixri_pool = qp->p_multixri_pool;
21494 if (!multixri_pool)
21495 return;
21496 xri_limit = multixri_pool->xri_limit;
21497
21498 watermark_max = xri_limit;
21499 watermark_min = xri_limit / 2;
21500
21501 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21502 abts_io_bufs = qp->abts_scsi_io_bufs;
21503 abts_io_bufs += qp->abts_nvme_io_bufs;
21504
21505 new_watermark = txcmplq_cnt + abts_io_bufs;
21506 new_watermark = min(watermark_max, new_watermark);
21507 new_watermark = max(watermark_min, new_watermark);
21508 multixri_pool->pvt_pool.high_watermark = new_watermark;
21509
21510 #ifdef LPFC_MXP_STAT
21511 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21512 new_watermark);
21513 #endif
21514 }
21515
21516 /**
21517 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21518 * @phba: pointer to lpfc hba data structure.
21519 * @hwqid: belong to which HWQ.
21520 *
21521 * This routine is called from hearbeat timer when pvt_pool is idle.
21522 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21523 * The first step moves (all - low_watermark) amount of XRIs.
21524 * The second step moves the rest of XRIs.
21525 **/
lpfc_move_xri_pvt_to_pbl(struct lpfc_hba * phba,u32 hwqid)21526 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21527 {
21528 struct lpfc_pbl_pool *pbl_pool;
21529 struct lpfc_pvt_pool *pvt_pool;
21530 struct lpfc_sli4_hdw_queue *qp;
21531 struct lpfc_io_buf *lpfc_ncmd;
21532 struct lpfc_io_buf *lpfc_ncmd_next;
21533 unsigned long iflag;
21534 struct list_head tmp_list;
21535 u32 tmp_count;
21536
21537 qp = &phba->sli4_hba.hdwq[hwqid];
21538 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21539 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21540 tmp_count = 0;
21541
21542 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21543 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21544
21545 if (pvt_pool->count > pvt_pool->low_watermark) {
21546 /* Step 1: move (all - low_watermark) from pvt_pool
21547 * to pbl_pool
21548 */
21549
21550 /* Move low watermark of bufs from pvt_pool to tmp_list */
21551 INIT_LIST_HEAD(&tmp_list);
21552 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21553 &pvt_pool->list, list) {
21554 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21555 tmp_count++;
21556 if (tmp_count >= pvt_pool->low_watermark)
21557 break;
21558 }
21559
21560 /* Move all bufs from pvt_pool to pbl_pool */
21561 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21562
21563 /* Move all bufs from tmp_list to pvt_pool */
21564 list_splice(&tmp_list, &pvt_pool->list);
21565
21566 pbl_pool->count += (pvt_pool->count - tmp_count);
21567 pvt_pool->count = tmp_count;
21568 } else {
21569 /* Step 2: move the rest from pvt_pool to pbl_pool */
21570 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21571 pbl_pool->count += pvt_pool->count;
21572 pvt_pool->count = 0;
21573 }
21574
21575 spin_unlock(&pvt_pool->lock);
21576 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21577 }
21578
21579 /**
21580 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21581 * @phba: pointer to lpfc hba data structure
21582 * @qp: pointer to HDW queue
21583 * @pbl_pool: specified public free XRI pool
21584 * @pvt_pool: specified private free XRI pool
21585 * @count: number of XRIs to move
21586 *
21587 * This routine tries to move some free common bufs from the specified pbl_pool
21588 * to the specified pvt_pool. It might move less than count XRIs if there's not
21589 * enough in public pool.
21590 *
21591 * Return:
21592 * true - if XRIs are successfully moved from the specified pbl_pool to the
21593 * specified pvt_pool
21594 * false - if the specified pbl_pool is empty or locked by someone else
21595 **/
21596 static bool
_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pbl_pool * pbl_pool,struct lpfc_pvt_pool * pvt_pool,u32 count)21597 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21598 struct lpfc_pbl_pool *pbl_pool,
21599 struct lpfc_pvt_pool *pvt_pool, u32 count)
21600 {
21601 struct lpfc_io_buf *lpfc_ncmd;
21602 struct lpfc_io_buf *lpfc_ncmd_next;
21603 unsigned long iflag;
21604 int ret;
21605
21606 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21607 if (ret) {
21608 if (pbl_pool->count) {
21609 /* Move a batch of XRIs from public to private pool */
21610 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21611 list_for_each_entry_safe(lpfc_ncmd,
21612 lpfc_ncmd_next,
21613 &pbl_pool->list,
21614 list) {
21615 list_move_tail(&lpfc_ncmd->list,
21616 &pvt_pool->list);
21617 pvt_pool->count++;
21618 pbl_pool->count--;
21619 count--;
21620 if (count == 0)
21621 break;
21622 }
21623
21624 spin_unlock(&pvt_pool->lock);
21625 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21626 return true;
21627 }
21628 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21629 }
21630
21631 return false;
21632 }
21633
21634 /**
21635 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21636 * @phba: pointer to lpfc hba data structure.
21637 * @hwqid: belong to which HWQ.
21638 * @count: number of XRIs to move
21639 *
21640 * This routine tries to find some free common bufs in one of public pools with
21641 * Round Robin method. The search always starts from local hwqid, then the next
21642 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21643 * a batch of free common bufs are moved to private pool on hwqid.
21644 * It might move less than count XRIs if there's not enough in public pool.
21645 **/
lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,u32 hwqid,u32 count)21646 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21647 {
21648 struct lpfc_multixri_pool *multixri_pool;
21649 struct lpfc_multixri_pool *next_multixri_pool;
21650 struct lpfc_pvt_pool *pvt_pool;
21651 struct lpfc_pbl_pool *pbl_pool;
21652 struct lpfc_sli4_hdw_queue *qp;
21653 u32 next_hwqid;
21654 u32 hwq_count;
21655 int ret;
21656
21657 qp = &phba->sli4_hba.hdwq[hwqid];
21658 multixri_pool = qp->p_multixri_pool;
21659 pvt_pool = &multixri_pool->pvt_pool;
21660 pbl_pool = &multixri_pool->pbl_pool;
21661
21662 /* Check if local pbl_pool is available */
21663 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21664 if (ret) {
21665 #ifdef LPFC_MXP_STAT
21666 multixri_pool->local_pbl_hit_count++;
21667 #endif
21668 return;
21669 }
21670
21671 hwq_count = phba->cfg_hdw_queue;
21672
21673 /* Get the next hwqid which was found last time */
21674 next_hwqid = multixri_pool->rrb_next_hwqid;
21675
21676 do {
21677 /* Go to next hwq */
21678 next_hwqid = (next_hwqid + 1) % hwq_count;
21679
21680 next_multixri_pool =
21681 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21682 pbl_pool = &next_multixri_pool->pbl_pool;
21683
21684 /* Check if the public free xri pool is available */
21685 ret = _lpfc_move_xri_pbl_to_pvt(
21686 phba, qp, pbl_pool, pvt_pool, count);
21687
21688 /* Exit while-loop if success or all hwqid are checked */
21689 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21690
21691 /* Starting point for the next time */
21692 multixri_pool->rrb_next_hwqid = next_hwqid;
21693
21694 if (!ret) {
21695 /* stats: all public pools are empty*/
21696 multixri_pool->pbl_empty_count++;
21697 }
21698
21699 #ifdef LPFC_MXP_STAT
21700 if (ret) {
21701 if (next_hwqid == hwqid)
21702 multixri_pool->local_pbl_hit_count++;
21703 else
21704 multixri_pool->other_pbl_hit_count++;
21705 }
21706 #endif
21707 }
21708
21709 /**
21710 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21711 * @phba: pointer to lpfc hba data structure.
21712 * @hwqid: belong to which HWQ.
21713 *
21714 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21715 * low watermark.
21716 **/
lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba * phba,u32 hwqid)21717 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21718 {
21719 struct lpfc_multixri_pool *multixri_pool;
21720 struct lpfc_pvt_pool *pvt_pool;
21721
21722 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21723 pvt_pool = &multixri_pool->pvt_pool;
21724
21725 if (pvt_pool->count < pvt_pool->low_watermark)
21726 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21727 }
21728
21729 /**
21730 * lpfc_release_io_buf - Return one IO buf back to free pool
21731 * @phba: pointer to lpfc hba data structure.
21732 * @lpfc_ncmd: IO buf to be returned.
21733 * @qp: belong to which HWQ.
21734 *
21735 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21736 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21737 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21738 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21739 * lpfc_io_buf_list_put.
21740 **/
lpfc_release_io_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_ncmd,struct lpfc_sli4_hdw_queue * qp)21741 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21742 struct lpfc_sli4_hdw_queue *qp)
21743 {
21744 unsigned long iflag;
21745 struct lpfc_pbl_pool *pbl_pool;
21746 struct lpfc_pvt_pool *pvt_pool;
21747 struct lpfc_epd_pool *epd_pool;
21748 u32 txcmplq_cnt;
21749 u32 xri_owned;
21750 u32 xri_limit;
21751 u32 abts_io_bufs;
21752
21753 /* MUST zero fields if buffer is reused by another protocol */
21754 lpfc_ncmd->nvmeCmd = NULL;
21755 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21756
21757 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21758 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21759 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21760
21761 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21762 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21763
21764 if (phba->cfg_xri_rebalancing) {
21765 if (lpfc_ncmd->expedite) {
21766 /* Return to expedite pool */
21767 epd_pool = &phba->epd_pool;
21768 spin_lock_irqsave(&epd_pool->lock, iflag);
21769 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21770 epd_pool->count++;
21771 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21772 return;
21773 }
21774
21775 /* Avoid invalid access if an IO sneaks in and is being rejected
21776 * just _after_ xri pools are destroyed in lpfc_offline.
21777 * Nothing much can be done at this point.
21778 */
21779 if (!qp->p_multixri_pool)
21780 return;
21781
21782 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21783 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21784
21785 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21786 abts_io_bufs = qp->abts_scsi_io_bufs;
21787 abts_io_bufs += qp->abts_nvme_io_bufs;
21788
21789 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21790 xri_limit = qp->p_multixri_pool->xri_limit;
21791
21792 #ifdef LPFC_MXP_STAT
21793 if (xri_owned <= xri_limit)
21794 qp->p_multixri_pool->below_limit_count++;
21795 else
21796 qp->p_multixri_pool->above_limit_count++;
21797 #endif
21798
21799 /* XRI goes to either public or private free xri pool
21800 * based on watermark and xri_limit
21801 */
21802 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21803 (xri_owned < xri_limit &&
21804 pvt_pool->count < pvt_pool->high_watermark)) {
21805 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21806 qp, free_pvt_pool);
21807 list_add_tail(&lpfc_ncmd->list,
21808 &pvt_pool->list);
21809 pvt_pool->count++;
21810 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21811 } else {
21812 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21813 qp, free_pub_pool);
21814 list_add_tail(&lpfc_ncmd->list,
21815 &pbl_pool->list);
21816 pbl_pool->count++;
21817 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21818 }
21819 } else {
21820 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21821 qp, free_xri);
21822 list_add_tail(&lpfc_ncmd->list,
21823 &qp->lpfc_io_buf_list_put);
21824 qp->put_io_bufs++;
21825 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21826 iflag);
21827 }
21828 }
21829
21830 /**
21831 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21832 * @phba: pointer to lpfc hba data structure.
21833 * @qp: pointer to HDW queue
21834 * @pvt_pool: pointer to private pool data structure.
21835 * @ndlp: pointer to lpfc nodelist data structure.
21836 *
21837 * This routine tries to get one free IO buf from private pool.
21838 *
21839 * Return:
21840 * pointer to one free IO buf - if private pool is not empty
21841 * NULL - if private pool is empty
21842 **/
21843 static struct lpfc_io_buf *
lpfc_get_io_buf_from_private_pool(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pvt_pool * pvt_pool,struct lpfc_nodelist * ndlp)21844 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21845 struct lpfc_sli4_hdw_queue *qp,
21846 struct lpfc_pvt_pool *pvt_pool,
21847 struct lpfc_nodelist *ndlp)
21848 {
21849 struct lpfc_io_buf *lpfc_ncmd;
21850 struct lpfc_io_buf *lpfc_ncmd_next;
21851 unsigned long iflag;
21852
21853 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21854 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21855 &pvt_pool->list, list) {
21856 if (lpfc_test_rrq_active(
21857 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21858 continue;
21859 list_del(&lpfc_ncmd->list);
21860 pvt_pool->count--;
21861 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21862 return lpfc_ncmd;
21863 }
21864 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21865
21866 return NULL;
21867 }
21868
21869 /**
21870 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21871 * @phba: pointer to lpfc hba data structure.
21872 *
21873 * This routine tries to get one free IO buf from expedite pool.
21874 *
21875 * Return:
21876 * pointer to one free IO buf - if expedite pool is not empty
21877 * NULL - if expedite pool is empty
21878 **/
21879 static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba * phba)21880 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21881 {
21882 struct lpfc_io_buf *lpfc_ncmd;
21883 struct lpfc_io_buf *lpfc_ncmd_next;
21884 unsigned long iflag;
21885 struct lpfc_epd_pool *epd_pool;
21886
21887 epd_pool = &phba->epd_pool;
21888 lpfc_ncmd = NULL;
21889
21890 spin_lock_irqsave(&epd_pool->lock, iflag);
21891 if (epd_pool->count > 0) {
21892 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21893 &epd_pool->list, list) {
21894 list_del(&lpfc_ncmd->list);
21895 epd_pool->count--;
21896 break;
21897 }
21898 }
21899 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21900
21901 return lpfc_ncmd;
21902 }
21903
21904 /**
21905 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21906 * @phba: pointer to lpfc hba data structure.
21907 * @ndlp: pointer to lpfc nodelist data structure.
21908 * @hwqid: belong to which HWQ
21909 * @expedite: 1 means this request is urgent.
21910 *
21911 * This routine will do the following actions and then return a pointer to
21912 * one free IO buf.
21913 *
21914 * 1. If private free xri count is empty, move some XRIs from public to
21915 * private pool.
21916 * 2. Get one XRI from private free xri pool.
21917 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21918 * get one free xri from expedite pool.
21919 *
21920 * Note: ndlp is only used on SCSI side for RRQ testing.
21921 * The caller should pass NULL for ndlp on NVME side.
21922 *
21923 * Return:
21924 * pointer to one free IO buf - if private pool is not empty
21925 * NULL - if private pool is empty
21926 **/
21927 static struct lpfc_io_buf *
lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int hwqid,int expedite)21928 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21929 struct lpfc_nodelist *ndlp,
21930 int hwqid, int expedite)
21931 {
21932 struct lpfc_sli4_hdw_queue *qp;
21933 struct lpfc_multixri_pool *multixri_pool;
21934 struct lpfc_pvt_pool *pvt_pool;
21935 struct lpfc_io_buf *lpfc_ncmd;
21936
21937 qp = &phba->sli4_hba.hdwq[hwqid];
21938 lpfc_ncmd = NULL;
21939 if (!qp) {
21940 lpfc_printf_log(phba, KERN_INFO,
21941 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21942 "5556 NULL qp for hwqid x%x\n", hwqid);
21943 return lpfc_ncmd;
21944 }
21945 multixri_pool = qp->p_multixri_pool;
21946 if (!multixri_pool) {
21947 lpfc_printf_log(phba, KERN_INFO,
21948 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21949 "5557 NULL multixri for hwqid x%x\n", hwqid);
21950 return lpfc_ncmd;
21951 }
21952 pvt_pool = &multixri_pool->pvt_pool;
21953 if (!pvt_pool) {
21954 lpfc_printf_log(phba, KERN_INFO,
21955 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21956 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
21957 return lpfc_ncmd;
21958 }
21959 multixri_pool->io_req_count++;
21960
21961 /* If pvt_pool is empty, move some XRIs from public to private pool */
21962 if (pvt_pool->count == 0)
21963 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21964
21965 /* Get one XRI from private free xri pool */
21966 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21967
21968 if (lpfc_ncmd) {
21969 lpfc_ncmd->hdwq = qp;
21970 lpfc_ncmd->hdwq_no = hwqid;
21971 } else if (expedite) {
21972 /* If we fail to get one from pvt_pool and this is an expedite
21973 * request, get one free xri from expedite pool.
21974 */
21975 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21976 }
21977
21978 return lpfc_ncmd;
21979 }
21980
21981 static inline struct lpfc_io_buf *
lpfc_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int idx)21982 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21983 {
21984 struct lpfc_sli4_hdw_queue *qp;
21985 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21986
21987 qp = &phba->sli4_hba.hdwq[idx];
21988 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21989 &qp->lpfc_io_buf_list_get, list) {
21990 if (lpfc_test_rrq_active(phba, ndlp,
21991 lpfc_cmd->cur_iocbq.sli4_lxritag))
21992 continue;
21993
21994 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21995 continue;
21996
21997 list_del_init(&lpfc_cmd->list);
21998 qp->get_io_bufs--;
21999 lpfc_cmd->hdwq = qp;
22000 lpfc_cmd->hdwq_no = idx;
22001 return lpfc_cmd;
22002 }
22003 return NULL;
22004 }
22005
22006 /**
22007 * lpfc_get_io_buf - Get one IO buffer from free pool
22008 * @phba: The HBA for which this call is being executed.
22009 * @ndlp: pointer to lpfc nodelist data structure.
22010 * @hwqid: belong to which HWQ
22011 * @expedite: 1 means this request is urgent.
22012 *
22013 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22014 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22015 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22016 *
22017 * Note: ndlp is only used on SCSI side for RRQ testing.
22018 * The caller should pass NULL for ndlp on NVME side.
22019 *
22020 * Return codes:
22021 * NULL - Error
22022 * Pointer to lpfc_io_buf - Success
22023 **/
lpfc_get_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,u32 hwqid,int expedite)22024 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22025 struct lpfc_nodelist *ndlp,
22026 u32 hwqid, int expedite)
22027 {
22028 struct lpfc_sli4_hdw_queue *qp;
22029 unsigned long iflag;
22030 struct lpfc_io_buf *lpfc_cmd;
22031
22032 qp = &phba->sli4_hba.hdwq[hwqid];
22033 lpfc_cmd = NULL;
22034 if (!qp) {
22035 lpfc_printf_log(phba, KERN_WARNING,
22036 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22037 "5555 NULL qp for hwqid x%x\n", hwqid);
22038 return lpfc_cmd;
22039 }
22040
22041 if (phba->cfg_xri_rebalancing)
22042 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22043 phba, ndlp, hwqid, expedite);
22044 else {
22045 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22046 qp, alloc_xri_get);
22047 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22048 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22049 if (!lpfc_cmd) {
22050 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22051 qp, alloc_xri_put);
22052 list_splice(&qp->lpfc_io_buf_list_put,
22053 &qp->lpfc_io_buf_list_get);
22054 qp->get_io_bufs += qp->put_io_bufs;
22055 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22056 qp->put_io_bufs = 0;
22057 spin_unlock(&qp->io_buf_list_put_lock);
22058 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22059 expedite)
22060 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22061 }
22062 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22063 }
22064
22065 return lpfc_cmd;
22066 }
22067
22068 /**
22069 * lpfc_read_object - Retrieve object data from HBA
22070 * @phba: The HBA for which this call is being executed.
22071 * @rdobject: Pathname of object data we want to read.
22072 * @datap: Pointer to where data will be copied to.
22073 * @datasz: size of data area
22074 *
22075 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22076 * The data will be truncated if datasz is not large enough.
22077 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22078 * Returns the actual bytes read from the object.
22079 */
22080 int
lpfc_read_object(struct lpfc_hba * phba,char * rdobject,uint32_t * datap,uint32_t datasz)22081 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22082 uint32_t datasz)
22083 {
22084 struct lpfc_mbx_read_object *read_object;
22085 LPFC_MBOXQ_t *mbox;
22086 int rc, length, eof, j, byte_cnt = 0;
22087 uint32_t shdr_status, shdr_add_status;
22088 union lpfc_sli4_cfg_shdr *shdr;
22089 struct lpfc_dmabuf *pcmd;
22090 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22091
22092 /* sanity check on queue memory */
22093 if (!datap)
22094 return -ENODEV;
22095
22096 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22097 if (!mbox)
22098 return -ENOMEM;
22099 length = (sizeof(struct lpfc_mbx_read_object) -
22100 sizeof(struct lpfc_sli4_cfg_mhdr));
22101 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22102 LPFC_MBOX_OPCODE_READ_OBJECT,
22103 length, LPFC_SLI4_MBX_EMBED);
22104 read_object = &mbox->u.mqe.un.read_object;
22105 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22106
22107 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22108 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22109 read_object->u.request.rd_object_offset = 0;
22110 read_object->u.request.rd_object_cnt = 1;
22111
22112 memset((void *)read_object->u.request.rd_object_name, 0,
22113 LPFC_OBJ_NAME_SZ);
22114 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22115 for (j = 0; j < strlen(rdobject); j++)
22116 read_object->u.request.rd_object_name[j] =
22117 cpu_to_le32(rd_object_name[j]);
22118
22119 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22120 if (pcmd)
22121 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22122 if (!pcmd || !pcmd->virt) {
22123 kfree(pcmd);
22124 mempool_free(mbox, phba->mbox_mem_pool);
22125 return -ENOMEM;
22126 }
22127 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22128 read_object->u.request.rd_object_hbuf[0].pa_lo =
22129 putPaddrLow(pcmd->phys);
22130 read_object->u.request.rd_object_hbuf[0].pa_hi =
22131 putPaddrHigh(pcmd->phys);
22132 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22133
22134 mbox->vport = phba->pport;
22135 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22136 mbox->ctx_ndlp = NULL;
22137
22138 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22139 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22140 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22141
22142 if (shdr_status == STATUS_FAILED &&
22143 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22144 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22145 "4674 No port cfg file in FW.\n");
22146 byte_cnt = -ENOENT;
22147 } else if (shdr_status || shdr_add_status || rc) {
22148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22149 "2625 READ_OBJECT mailbox failed with "
22150 "status x%x add_status x%x, mbx status x%x\n",
22151 shdr_status, shdr_add_status, rc);
22152 byte_cnt = -ENXIO;
22153 } else {
22154 /* Success */
22155 length = read_object->u.response.rd_object_actual_rlen;
22156 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22158 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22159 length, datasz, eof);
22160
22161 /* Detect the port config file exists but is empty */
22162 if (!length && eof) {
22163 byte_cnt = 0;
22164 goto exit;
22165 }
22166
22167 byte_cnt = length;
22168 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22169 }
22170
22171 exit:
22172 /* This is an embedded SLI4 mailbox with an external buffer allocated.
22173 * Free the pcmd and then cleanup with the correct routine.
22174 */
22175 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22176 kfree(pcmd);
22177 lpfc_sli4_mbox_cmd_free(phba, mbox);
22178 return byte_cnt;
22179 }
22180
22181 /**
22182 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22183 * @phba: The HBA for which this call is being executed.
22184 * @lpfc_buf: IO buf structure to append the SGL chunk
22185 *
22186 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22187 * and will allocate an SGL chunk if the pool is empty.
22188 *
22189 * Return codes:
22190 * NULL - Error
22191 * Pointer to sli4_hybrid_sgl - Success
22192 **/
22193 struct sli4_hybrid_sgl *
lpfc_get_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22194 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22195 {
22196 struct sli4_hybrid_sgl *list_entry = NULL;
22197 struct sli4_hybrid_sgl *tmp = NULL;
22198 struct sli4_hybrid_sgl *allocated_sgl = NULL;
22199 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22200 struct list_head *buf_list = &hdwq->sgl_list;
22201 unsigned long iflags;
22202
22203 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22204
22205 if (likely(!list_empty(buf_list))) {
22206 /* break off 1 chunk from the sgl_list */
22207 list_for_each_entry_safe(list_entry, tmp,
22208 buf_list, list_node) {
22209 list_move_tail(&list_entry->list_node,
22210 &lpfc_buf->dma_sgl_xtra_list);
22211 break;
22212 }
22213 } else {
22214 /* allocate more */
22215 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22216 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22217 cpu_to_node(hdwq->io_wq->chann));
22218 if (!tmp) {
22219 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22220 "8353 error kmalloc memory for HDWQ "
22221 "%d %s\n",
22222 lpfc_buf->hdwq_no, __func__);
22223 return NULL;
22224 }
22225
22226 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22227 GFP_ATOMIC, &tmp->dma_phys_sgl);
22228 if (!tmp->dma_sgl) {
22229 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22230 "8354 error pool_alloc memory for HDWQ "
22231 "%d %s\n",
22232 lpfc_buf->hdwq_no, __func__);
22233 kfree(tmp);
22234 return NULL;
22235 }
22236
22237 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22238 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22239 }
22240
22241 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22242 struct sli4_hybrid_sgl,
22243 list_node);
22244
22245 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22246
22247 return allocated_sgl;
22248 }
22249
22250 /**
22251 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22252 * @phba: The HBA for which this call is being executed.
22253 * @lpfc_buf: IO buf structure with the SGL chunk
22254 *
22255 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22256 *
22257 * Return codes:
22258 * 0 - Success
22259 * -EINVAL - Error
22260 **/
22261 int
lpfc_put_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22262 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22263 {
22264 int rc = 0;
22265 struct sli4_hybrid_sgl *list_entry = NULL;
22266 struct sli4_hybrid_sgl *tmp = NULL;
22267 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22268 struct list_head *buf_list = &hdwq->sgl_list;
22269 unsigned long iflags;
22270
22271 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22272
22273 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22274 list_for_each_entry_safe(list_entry, tmp,
22275 &lpfc_buf->dma_sgl_xtra_list,
22276 list_node) {
22277 list_move_tail(&list_entry->list_node,
22278 buf_list);
22279 }
22280 } else {
22281 rc = -EINVAL;
22282 }
22283
22284 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22285 return rc;
22286 }
22287
22288 /**
22289 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22290 * @phba: phba object
22291 * @hdwq: hdwq to cleanup sgl buff resources on
22292 *
22293 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22294 *
22295 * Return codes:
22296 * None
22297 **/
22298 void
lpfc_free_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)22299 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22300 struct lpfc_sli4_hdw_queue *hdwq)
22301 {
22302 struct list_head *buf_list = &hdwq->sgl_list;
22303 struct sli4_hybrid_sgl *list_entry = NULL;
22304 struct sli4_hybrid_sgl *tmp = NULL;
22305 unsigned long iflags;
22306
22307 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22308
22309 /* Free sgl pool */
22310 list_for_each_entry_safe(list_entry, tmp,
22311 buf_list, list_node) {
22312 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22313 list_entry->dma_sgl,
22314 list_entry->dma_phys_sgl);
22315 list_del(&list_entry->list_node);
22316 kfree(list_entry);
22317 }
22318
22319 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22320 }
22321
22322 /**
22323 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22324 * @phba: The HBA for which this call is being executed.
22325 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22326 *
22327 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22328 * and will allocate an CMD/RSP buffer if the pool is empty.
22329 *
22330 * Return codes:
22331 * NULL - Error
22332 * Pointer to fcp_cmd_rsp_buf - Success
22333 **/
22334 struct fcp_cmd_rsp_buf *
lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22335 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22336 struct lpfc_io_buf *lpfc_buf)
22337 {
22338 struct fcp_cmd_rsp_buf *list_entry = NULL;
22339 struct fcp_cmd_rsp_buf *tmp = NULL;
22340 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22341 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22342 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22343 unsigned long iflags;
22344
22345 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22346
22347 if (likely(!list_empty(buf_list))) {
22348 /* break off 1 chunk from the list */
22349 list_for_each_entry_safe(list_entry, tmp,
22350 buf_list,
22351 list_node) {
22352 list_move_tail(&list_entry->list_node,
22353 &lpfc_buf->dma_cmd_rsp_list);
22354 break;
22355 }
22356 } else {
22357 /* allocate more */
22358 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22359 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22360 cpu_to_node(hdwq->io_wq->chann));
22361 if (!tmp) {
22362 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22363 "8355 error kmalloc memory for HDWQ "
22364 "%d %s\n",
22365 lpfc_buf->hdwq_no, __func__);
22366 return NULL;
22367 }
22368
22369 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22370 GFP_ATOMIC,
22371 &tmp->fcp_cmd_rsp_dma_handle);
22372
22373 if (!tmp->fcp_cmnd) {
22374 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22375 "8356 error pool_alloc memory for HDWQ "
22376 "%d %s\n",
22377 lpfc_buf->hdwq_no, __func__);
22378 kfree(tmp);
22379 return NULL;
22380 }
22381
22382 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22383 sizeof(struct fcp_cmnd));
22384
22385 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22386 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22387 }
22388
22389 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22390 struct fcp_cmd_rsp_buf,
22391 list_node);
22392
22393 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22394
22395 return allocated_buf;
22396 }
22397
22398 /**
22399 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22400 * @phba: The HBA for which this call is being executed.
22401 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22402 *
22403 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22404 *
22405 * Return codes:
22406 * 0 - Success
22407 * -EINVAL - Error
22408 **/
22409 int
lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22410 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22411 struct lpfc_io_buf *lpfc_buf)
22412 {
22413 int rc = 0;
22414 struct fcp_cmd_rsp_buf *list_entry = NULL;
22415 struct fcp_cmd_rsp_buf *tmp = NULL;
22416 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22417 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22418 unsigned long iflags;
22419
22420 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22421
22422 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22423 list_for_each_entry_safe(list_entry, tmp,
22424 &lpfc_buf->dma_cmd_rsp_list,
22425 list_node) {
22426 list_move_tail(&list_entry->list_node,
22427 buf_list);
22428 }
22429 } else {
22430 rc = -EINVAL;
22431 }
22432
22433 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22434 return rc;
22435 }
22436
22437 /**
22438 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22439 * @phba: phba object
22440 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22441 *
22442 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22443 *
22444 * Return codes:
22445 * None
22446 **/
22447 void
lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)22448 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22449 struct lpfc_sli4_hdw_queue *hdwq)
22450 {
22451 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22452 struct fcp_cmd_rsp_buf *list_entry = NULL;
22453 struct fcp_cmd_rsp_buf *tmp = NULL;
22454 unsigned long iflags;
22455
22456 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22457
22458 /* Free cmd_rsp buf pool */
22459 list_for_each_entry_safe(list_entry, tmp,
22460 buf_list,
22461 list_node) {
22462 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22463 list_entry->fcp_cmnd,
22464 list_entry->fcp_cmd_rsp_dma_handle);
22465 list_del(&list_entry->list_node);
22466 kfree(list_entry);
22467 }
22468
22469 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22470 }
22471
22472 /**
22473 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22474 * @phba: phba object
22475 * @job: job entry of the command to be posted.
22476 *
22477 * Fill the common fields of the wqe for each of the command.
22478 *
22479 * Return codes:
22480 * None
22481 **/
22482 void
lpfc_sli_prep_wqe(struct lpfc_hba * phba,struct lpfc_iocbq * job)22483 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22484 {
22485 u8 cmnd;
22486 u32 *pcmd;
22487 u32 if_type = 0;
22488 u32 fip, abort_tag;
22489 struct lpfc_nodelist *ndlp = NULL;
22490 union lpfc_wqe128 *wqe = &job->wqe;
22491 u8 command_type = ELS_COMMAND_NON_FIP;
22492
22493 fip = phba->hba_flag & HBA_FIP_SUPPORT;
22494 /* The fcp commands will set command type */
22495 if (job->cmd_flag & LPFC_IO_FCP)
22496 command_type = FCP_COMMAND;
22497 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22498 command_type = ELS_COMMAND_FIP;
22499 else
22500 command_type = ELS_COMMAND_NON_FIP;
22501
22502 abort_tag = job->iotag;
22503 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22504
22505 switch (cmnd) {
22506 case CMD_ELS_REQUEST64_WQE:
22507 ndlp = job->ndlp;
22508
22509 if_type = bf_get(lpfc_sli_intf_if_type,
22510 &phba->sli4_hba.sli_intf);
22511 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22512 pcmd = (u32 *)job->cmd_dmabuf->virt;
22513 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22514 *pcmd == ELS_CMD_SCR ||
22515 *pcmd == ELS_CMD_RDF ||
22516 *pcmd == ELS_CMD_EDC ||
22517 *pcmd == ELS_CMD_RSCN_XMT ||
22518 *pcmd == ELS_CMD_FDISC ||
22519 *pcmd == ELS_CMD_LOGO ||
22520 *pcmd == ELS_CMD_QFPA ||
22521 *pcmd == ELS_CMD_UVEM ||
22522 *pcmd == ELS_CMD_PLOGI)) {
22523 bf_set(els_req64_sp, &wqe->els_req, 1);
22524 bf_set(els_req64_sid, &wqe->els_req,
22525 job->vport->fc_myDID);
22526
22527 if ((*pcmd == ELS_CMD_FLOGI) &&
22528 !(phba->fc_topology ==
22529 LPFC_TOPOLOGY_LOOP))
22530 bf_set(els_req64_sid, &wqe->els_req, 0);
22531
22532 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22533 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22534 phba->vpi_ids[job->vport->vpi]);
22535 } else if (pcmd) {
22536 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22537 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22538 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22539 }
22540 }
22541
22542 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22543 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22544
22545 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22546 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22547 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22548 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22549 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22550 break;
22551 case CMD_XMIT_ELS_RSP64_WQE:
22552 ndlp = job->ndlp;
22553
22554 /* word4 */
22555 wqe->xmit_els_rsp.word4 = 0;
22556
22557 if_type = bf_get(lpfc_sli_intf_if_type,
22558 &phba->sli4_hba.sli_intf);
22559 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22560 if (job->vport->fc_flag & FC_PT2PT) {
22561 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22562 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22563 job->vport->fc_myDID);
22564 if (job->vport->fc_myDID == Fabric_DID) {
22565 bf_set(wqe_els_did,
22566 &wqe->xmit_els_rsp.wqe_dest, 0);
22567 }
22568 }
22569 }
22570
22571 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22572 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22573 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22574 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22575 LPFC_WQE_LENLOC_WORD3);
22576 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22577
22578 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22579 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22580 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22581 job->vport->fc_myDID);
22582 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22583 }
22584
22585 if (phba->sli_rev == LPFC_SLI_REV4) {
22586 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22587 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22588
22589 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22590 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22591 phba->vpi_ids[job->vport->vpi]);
22592 }
22593 command_type = OTHER_COMMAND;
22594 break;
22595 case CMD_GEN_REQUEST64_WQE:
22596 /* Word 10 */
22597 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22598 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22599 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22600 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22601 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22602 command_type = OTHER_COMMAND;
22603 break;
22604 case CMD_XMIT_SEQUENCE64_WQE:
22605 if (phba->link_flag & LS_LOOPBACK_MODE)
22606 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22607
22608 wqe->xmit_sequence.rsvd3 = 0;
22609 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22610 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22611 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22612 LPFC_WQE_IOD_WRITE);
22613 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22614 LPFC_WQE_LENLOC_WORD12);
22615 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22616 command_type = OTHER_COMMAND;
22617 break;
22618 case CMD_XMIT_BLS_RSP64_WQE:
22619 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22620 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22621 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22622 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22623 phba->vpi_ids[phba->pport->vpi]);
22624 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22625 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22626 LPFC_WQE_LENLOC_NONE);
22627 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22628 command_type = OTHER_COMMAND;
22629 break;
22630 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
22631 case CMD_ABORT_XRI_WQE: /* abort iotag */
22632 case CMD_SEND_FRAME: /* mds loopback */
22633 /* cases already formatted for sli4 wqe - no chgs necessary */
22634 return;
22635 default:
22636 dump_stack();
22637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22638 "6207 Invalid command 0x%x\n",
22639 cmnd);
22640 break;
22641 }
22642
22643 wqe->generic.wqe_com.abort_tag = abort_tag;
22644 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22645 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22646 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22647 }
22648