1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
19
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
28 uint16_t size);
29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
30 void *pkt);
31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
33
34 static void
qla27xx_process_purex_fpin(struct scsi_qla_host * vha,struct purex_item * item)35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
36 {
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
39
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
42
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
46 pkt, pkt_size);
47
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
49 }
50
51 const char *const port_state_str[] = {
52 [FCS_UNKNOWN] = "Unknown",
53 [FCS_UNCONFIGURED] = "UNCONFIGURED",
54 [FCS_DEVICE_DEAD] = "DEAD",
55 [FCS_DEVICE_LOST] = "LOST",
56 [FCS_ONLINE] = "ONLINE"
57 };
58
59 static void
qla24xx_process_abts(struct scsi_qla_host * vha,struct purex_item * pkt)60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
61 {
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
67 dma_addr_t dma;
68 uint32_t fctl;
69 int rval;
70
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
72
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
81
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
83 GFP_KERNEL);
84 if (!rsp_els) {
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
87 return;
88 }
89
90 /* terminate exchange */
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
104 if (rval) {
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
112 } else {
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
115 }
116
117 /* send ABTS response */
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134 /* include flipping bit23 in fctl */
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
155 if (rval) {
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
164 } else {
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
167 }
168
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
170 }
171
172 /**
173 * __qla_consume_iocb - this routine is used to tell fw driver has processed
174 * or consumed the head IOCB along with the continuation IOCB's from the
175 * provided respond queue.
176 * @vha: host adapter pointer
177 * @pkt: pointer to current packet. On return, this pointer shall move
178 * to the next packet.
179 * @rsp: respond queue pointer.
180 *
181 * it is assumed pkt is the head iocb, not the continuation iocbk
182 */
__qla_consume_iocb(struct scsi_qla_host * vha,void ** pkt,struct rsp_que ** rsp)183 void __qla_consume_iocb(struct scsi_qla_host *vha,
184 void **pkt, struct rsp_que **rsp)
185 {
186 struct rsp_que *rsp_q = *rsp;
187 response_t *new_pkt;
188 uint16_t entry_count_remaining;
189 struct purex_entry_24xx *purex = *pkt;
190
191 entry_count_remaining = purex->entry_count;
192 while (entry_count_remaining > 0) {
193 new_pkt = rsp_q->ring_ptr;
194 *pkt = new_pkt;
195
196 rsp_q->ring_index++;
197 if (rsp_q->ring_index == rsp_q->length) {
198 rsp_q->ring_index = 0;
199 rsp_q->ring_ptr = rsp_q->ring;
200 } else {
201 rsp_q->ring_ptr++;
202 }
203
204 new_pkt->signature = RESPONSE_PROCESSED;
205 /* flush signature */
206 wmb();
207 --entry_count_remaining;
208 }
209 }
210
211 /**
212 * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
213 * and save to provided buffer
214 * @vha: host adapter pointer
215 * @pkt: pointer Purex IOCB
216 * @rsp: respond queue
217 * @buf: extracted ELS payload copy here
218 * @buf_len: buffer length
219 */
__qla_copy_purex_to_buffer(struct scsi_qla_host * vha,void ** pkt,struct rsp_que ** rsp,u8 * buf,u32 buf_len)220 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
221 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
222 {
223 struct purex_entry_24xx *purex = *pkt;
224 struct rsp_que *rsp_q = *rsp;
225 sts_cont_entry_t *new_pkt;
226 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
227 uint16_t buffer_copy_offset = 0;
228 uint16_t entry_count_remaining;
229 u16 tpad;
230
231 entry_count_remaining = purex->entry_count;
232 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
233 - PURX_ELS_HEADER_SIZE;
234
235 /*
236 * end of payload may not end in 4bytes boundary. Need to
237 * round up / pad for room to swap, before saving data
238 */
239 tpad = roundup(total_bytes, 4);
240
241 if (buf_len < tpad) {
242 ql_dbg(ql_dbg_async, vha, 0x5084,
243 "%s buffer is too small %d < %d\n",
244 __func__, buf_len, tpad);
245 __qla_consume_iocb(vha, pkt, rsp);
246 return -EIO;
247 }
248
249 pending_bytes = total_bytes = tpad;
250 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
251 sizeof(purex->els_frame_payload) : pending_bytes;
252
253 memcpy(buf, &purex->els_frame_payload[0], no_bytes);
254 buffer_copy_offset += no_bytes;
255 pending_bytes -= no_bytes;
256 --entry_count_remaining;
257
258 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
259 /* flush signature */
260 wmb();
261
262 do {
263 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
264 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
265 *pkt = new_pkt;
266
267 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
268 ql_log(ql_log_warn, vha, 0x507a,
269 "Unexpected IOCB type, partial data 0x%x\n",
270 buffer_copy_offset);
271 break;
272 }
273
274 rsp_q->ring_index++;
275 if (rsp_q->ring_index == rsp_q->length) {
276 rsp_q->ring_index = 0;
277 rsp_q->ring_ptr = rsp_q->ring;
278 } else {
279 rsp_q->ring_ptr++;
280 }
281 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
282 sizeof(new_pkt->data) : pending_bytes;
283 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
284 memcpy((buf + buffer_copy_offset), new_pkt->data,
285 no_bytes);
286 buffer_copy_offset += no_bytes;
287 pending_bytes -= no_bytes;
288 --entry_count_remaining;
289 } else {
290 ql_log(ql_log_warn, vha, 0x5044,
291 "Attempt to copy more that we got, optimizing..%x\n",
292 buffer_copy_offset);
293 memcpy((buf + buffer_copy_offset), new_pkt->data,
294 total_bytes - buffer_copy_offset);
295 }
296
297 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
298 /* flush signature */
299 wmb();
300 }
301
302 if (pending_bytes != 0 || entry_count_remaining != 0) {
303 ql_log(ql_log_fatal, vha, 0x508b,
304 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
305 total_bytes, entry_count_remaining);
306 return -EIO;
307 }
308 } while (entry_count_remaining > 0);
309
310 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
311
312 return 0;
313 }
314
315 /**
316 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
317 * @irq: interrupt number
318 * @dev_id: SCSI driver HA context
319 *
320 * Called by system whenever the host adapter generates an interrupt.
321 *
322 * Returns handled flag.
323 */
324 irqreturn_t
qla2100_intr_handler(int irq,void * dev_id)325 qla2100_intr_handler(int irq, void *dev_id)
326 {
327 scsi_qla_host_t *vha;
328 struct qla_hw_data *ha;
329 struct device_reg_2xxx __iomem *reg;
330 int status;
331 unsigned long iter;
332 uint16_t hccr;
333 uint16_t mb[8];
334 struct rsp_que *rsp;
335 unsigned long flags;
336
337 rsp = (struct rsp_que *) dev_id;
338 if (!rsp) {
339 ql_log(ql_log_info, NULL, 0x505d,
340 "%s: NULL response queue pointer.\n", __func__);
341 return (IRQ_NONE);
342 }
343
344 ha = rsp->hw;
345 reg = &ha->iobase->isp;
346 status = 0;
347
348 spin_lock_irqsave(&ha->hardware_lock, flags);
349 vha = pci_get_drvdata(ha->pdev);
350 for (iter = 50; iter--; ) {
351 hccr = rd_reg_word(®->hccr);
352 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
353 break;
354 if (hccr & HCCR_RISC_PAUSE) {
355 if (pci_channel_offline(ha->pdev))
356 break;
357
358 /*
359 * Issue a "HARD" reset in order for the RISC interrupt
360 * bit to be cleared. Schedule a big hammer to get
361 * out of the RISC PAUSED state.
362 */
363 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
364 rd_reg_word(®->hccr);
365
366 ha->isp_ops->fw_dump(vha);
367 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break;
369 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
370 break;
371
372 if (rd_reg_word(®->semaphore) & BIT_0) {
373 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
374 rd_reg_word(®->hccr);
375
376 /* Get mailbox data. */
377 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
378 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
379 qla2x00_mbx_completion(vha, mb[0]);
380 status |= MBX_INTERRUPT;
381 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
382 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
383 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
384 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
385 qla2x00_async_event(vha, rsp, mb);
386 } else {
387 /*EMPTY*/
388 ql_dbg(ql_dbg_async, vha, 0x5025,
389 "Unrecognized interrupt type (%d).\n",
390 mb[0]);
391 }
392 /* Release mailbox registers. */
393 wrt_reg_word(®->semaphore, 0);
394 rd_reg_word(®->semaphore);
395 } else {
396 qla2x00_process_response_queue(rsp);
397
398 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
399 rd_reg_word(®->hccr);
400 }
401 }
402 qla2x00_handle_mbx_completion(ha, status);
403 spin_unlock_irqrestore(&ha->hardware_lock, flags);
404
405 return (IRQ_HANDLED);
406 }
407
408 bool
qla2x00_check_reg32_for_disconnect(scsi_qla_host_t * vha,uint32_t reg)409 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
410 {
411 /* Check for PCI disconnection */
412 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
413 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
414 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
415 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
416 qla_schedule_eeh_work(vha);
417 }
418 return true;
419 } else
420 return false;
421 }
422
423 bool
qla2x00_check_reg16_for_disconnect(scsi_qla_host_t * vha,uint16_t reg)424 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
425 {
426 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
427 }
428
429 /**
430 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
431 * @irq: interrupt number
432 * @dev_id: SCSI driver HA context
433 *
434 * Called by system whenever the host adapter generates an interrupt.
435 *
436 * Returns handled flag.
437 */
438 irqreturn_t
qla2300_intr_handler(int irq,void * dev_id)439 qla2300_intr_handler(int irq, void *dev_id)
440 {
441 scsi_qla_host_t *vha;
442 struct device_reg_2xxx __iomem *reg;
443 int status;
444 unsigned long iter;
445 uint32_t stat;
446 uint16_t hccr;
447 uint16_t mb[8];
448 struct rsp_que *rsp;
449 struct qla_hw_data *ha;
450 unsigned long flags;
451
452 rsp = (struct rsp_que *) dev_id;
453 if (!rsp) {
454 ql_log(ql_log_info, NULL, 0x5058,
455 "%s: NULL response queue pointer.\n", __func__);
456 return (IRQ_NONE);
457 }
458
459 ha = rsp->hw;
460 reg = &ha->iobase->isp;
461 status = 0;
462
463 spin_lock_irqsave(&ha->hardware_lock, flags);
464 vha = pci_get_drvdata(ha->pdev);
465 for (iter = 50; iter--; ) {
466 stat = rd_reg_dword(®->u.isp2300.host_status);
467 if (qla2x00_check_reg32_for_disconnect(vha, stat))
468 break;
469 if (stat & HSR_RISC_PAUSED) {
470 if (unlikely(pci_channel_offline(ha->pdev)))
471 break;
472
473 hccr = rd_reg_word(®->hccr);
474
475 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
476 ql_log(ql_log_warn, vha, 0x5026,
477 "Parity error -- HCCR=%x, Dumping "
478 "firmware.\n", hccr);
479 else
480 ql_log(ql_log_warn, vha, 0x5027,
481 "RISC paused -- HCCR=%x, Dumping "
482 "firmware.\n", hccr);
483
484 /*
485 * Issue a "HARD" reset in order for the RISC
486 * interrupt bit to be cleared. Schedule a big
487 * hammer to get out of the RISC PAUSED state.
488 */
489 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
490 rd_reg_word(®->hccr);
491
492 ha->isp_ops->fw_dump(vha);
493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
494 break;
495 } else if ((stat & HSR_RISC_INT) == 0)
496 break;
497
498 switch (stat & 0xff) {
499 case 0x1:
500 case 0x2:
501 case 0x10:
502 case 0x11:
503 qla2x00_mbx_completion(vha, MSW(stat));
504 status |= MBX_INTERRUPT;
505
506 /* Release mailbox registers. */
507 wrt_reg_word(®->semaphore, 0);
508 break;
509 case 0x12:
510 mb[0] = MSW(stat);
511 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
512 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
513 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
514 qla2x00_async_event(vha, rsp, mb);
515 break;
516 case 0x13:
517 qla2x00_process_response_queue(rsp);
518 break;
519 case 0x15:
520 mb[0] = MBA_CMPLT_1_16BIT;
521 mb[1] = MSW(stat);
522 qla2x00_async_event(vha, rsp, mb);
523 break;
524 case 0x16:
525 mb[0] = MBA_SCSI_COMPLETION;
526 mb[1] = MSW(stat);
527 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
528 qla2x00_async_event(vha, rsp, mb);
529 break;
530 default:
531 ql_dbg(ql_dbg_async, vha, 0x5028,
532 "Unrecognized interrupt type (%d).\n", stat & 0xff);
533 break;
534 }
535 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
536 rd_reg_word_relaxed(®->hccr);
537 }
538 qla2x00_handle_mbx_completion(ha, status);
539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
540
541 return (IRQ_HANDLED);
542 }
543
544 /**
545 * qla2x00_mbx_completion() - Process mailbox command completions.
546 * @vha: SCSI driver HA context
547 * @mb0: Mailbox0 register
548 */
549 static void
qla2x00_mbx_completion(scsi_qla_host_t * vha,uint16_t mb0)550 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
551 {
552 uint16_t cnt;
553 uint32_t mboxes;
554 __le16 __iomem *wptr;
555 struct qla_hw_data *ha = vha->hw;
556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
557
558 /* Read all mbox registers? */
559 WARN_ON_ONCE(ha->mbx_count > 32);
560 mboxes = (1ULL << ha->mbx_count) - 1;
561 if (!ha->mcp)
562 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
563 else
564 mboxes = ha->mcp->in_mb;
565
566 /* Load return mailbox registers. */
567 ha->flags.mbox_int = 1;
568 ha->mailbox_out[0] = mb0;
569 mboxes >>= 1;
570 wptr = MAILBOX_REG(ha, reg, 1);
571
572 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
573 if (IS_QLA2200(ha) && cnt == 8)
574 wptr = MAILBOX_REG(ha, reg, 8);
575 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
576 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
577 else if (mboxes & BIT_0)
578 ha->mailbox_out[cnt] = rd_reg_word(wptr);
579
580 wptr++;
581 mboxes >>= 1;
582 }
583 }
584
585 static void
qla81xx_idc_event(scsi_qla_host_t * vha,uint16_t aen,uint16_t descr)586 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
587 {
588 static char *event[] =
589 { "Complete", "Request Notification", "Time Extension" };
590 int rval;
591 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
592 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
593 __le16 __iomem *wptr;
594 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
595
596 /* Seed data -- mailbox1 -> mailbox7. */
597 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
598 wptr = ®24->mailbox1;
599 else if (IS_QLA8044(vha->hw))
600 wptr = ®82->mailbox_out[1];
601 else
602 return;
603
604 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
605 mb[cnt] = rd_reg_word(wptr);
606
607 ql_dbg(ql_dbg_async, vha, 0x5021,
608 "Inter-Driver Communication %s -- "
609 "%04x %04x %04x %04x %04x %04x %04x.\n",
610 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
611 mb[4], mb[5], mb[6]);
612 switch (aen) {
613 /* Handle IDC Error completion case. */
614 case MBA_IDC_COMPLETE:
615 if (mb[1] >> 15) {
616 vha->hw->flags.idc_compl_status = 1;
617 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
618 complete(&vha->hw->dcbx_comp);
619 }
620 break;
621
622 case MBA_IDC_NOTIFY:
623 /* Acknowledgement needed? [Notify && non-zero timeout]. */
624 timeout = (descr >> 8) & 0xf;
625 ql_dbg(ql_dbg_async, vha, 0x5022,
626 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
627 vha->host_no, event[aen & 0xff], timeout);
628
629 if (!timeout)
630 return;
631 rval = qla2x00_post_idc_ack_work(vha, mb);
632 if (rval != QLA_SUCCESS)
633 ql_log(ql_log_warn, vha, 0x5023,
634 "IDC failed to post ACK.\n");
635 break;
636 case MBA_IDC_TIME_EXT:
637 vha->hw->idc_extend_tmo = descr;
638 ql_dbg(ql_dbg_async, vha, 0x5087,
639 "%lu Inter-Driver Communication %s -- "
640 "Extend timeout by=%d.\n",
641 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
642 break;
643 }
644 }
645
646 #define LS_UNKNOWN 2
647 const char *
qla2x00_get_link_speed_str(struct qla_hw_data * ha,uint16_t speed)648 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
649 {
650 static const char *const link_speeds[] = {
651 "1", "2", "?", "4", "8", "16", "32", "64", "10"
652 };
653 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
654
655 if (IS_QLA2100(ha) || IS_QLA2200(ha))
656 return link_speeds[0];
657 else if (speed == 0x13)
658 return link_speeds[QLA_LAST_SPEED];
659 else if (speed < QLA_LAST_SPEED)
660 return link_speeds[speed];
661 else
662 return link_speeds[LS_UNKNOWN];
663 }
664
665 static void
qla83xx_handle_8200_aen(scsi_qla_host_t * vha,uint16_t * mb)666 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
667 {
668 struct qla_hw_data *ha = vha->hw;
669
670 /*
671 * 8200 AEN Interpretation:
672 * mb[0] = AEN code
673 * mb[1] = AEN Reason code
674 * mb[2] = LSW of Peg-Halt Status-1 Register
675 * mb[6] = MSW of Peg-Halt Status-1 Register
676 * mb[3] = LSW of Peg-Halt Status-2 register
677 * mb[7] = MSW of Peg-Halt Status-2 register
678 * mb[4] = IDC Device-State Register value
679 * mb[5] = IDC Driver-Presence Register value
680 */
681 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
682 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
683 mb[0], mb[1], mb[2], mb[6]);
684 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
685 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
686 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
687
688 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
689 IDC_HEARTBEAT_FAILURE)) {
690 ha->flags.nic_core_hung = 1;
691 ql_log(ql_log_warn, vha, 0x5060,
692 "83XX: F/W Error Reported: Check if reset required.\n");
693
694 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
695 uint32_t protocol_engine_id, fw_err_code, err_level;
696
697 /*
698 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
699 * - PEG-Halt Status-1 Register:
700 * (LSW = mb[2], MSW = mb[6])
701 * Bits 0-7 = protocol-engine ID
702 * Bits 8-28 = f/w error code
703 * Bits 29-31 = Error-level
704 * Error-level 0x1 = Non-Fatal error
705 * Error-level 0x2 = Recoverable Fatal error
706 * Error-level 0x4 = UnRecoverable Fatal error
707 * - PEG-Halt Status-2 Register:
708 * (LSW = mb[3], MSW = mb[7])
709 */
710 protocol_engine_id = (mb[2] & 0xff);
711 fw_err_code = (((mb[2] & 0xff00) >> 8) |
712 ((mb[6] & 0x1fff) << 8));
713 err_level = ((mb[6] & 0xe000) >> 13);
714 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
715 "Register: protocol_engine_id=0x%x "
716 "fw_err_code=0x%x err_level=0x%x.\n",
717 protocol_engine_id, fw_err_code, err_level);
718 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
719 "Register: 0x%x%x.\n", mb[7], mb[3]);
720 if (err_level == ERR_LEVEL_NON_FATAL) {
721 ql_log(ql_log_warn, vha, 0x5063,
722 "Not a fatal error, f/w has recovered itself.\n");
723 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
724 ql_log(ql_log_fatal, vha, 0x5064,
725 "Recoverable Fatal error: Chip reset "
726 "required.\n");
727 qla83xx_schedule_work(vha,
728 QLA83XX_NIC_CORE_RESET);
729 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
730 ql_log(ql_log_fatal, vha, 0x5065,
731 "Unrecoverable Fatal error: Set FAILED "
732 "state, reboot required.\n");
733 qla83xx_schedule_work(vha,
734 QLA83XX_NIC_CORE_UNRECOVERABLE);
735 }
736 }
737
738 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
739 uint16_t peg_fw_state, nw_interface_link_up;
740 uint16_t nw_interface_signal_detect, sfp_status;
741 uint16_t htbt_counter, htbt_monitor_enable;
742 uint16_t sfp_additional_info, sfp_multirate;
743 uint16_t sfp_tx_fault, link_speed, dcbx_status;
744
745 /*
746 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
747 * - PEG-to-FC Status Register:
748 * (LSW = mb[2], MSW = mb[6])
749 * Bits 0-7 = Peg-Firmware state
750 * Bit 8 = N/W Interface Link-up
751 * Bit 9 = N/W Interface signal detected
752 * Bits 10-11 = SFP Status
753 * SFP Status 0x0 = SFP+ transceiver not expected
754 * SFP Status 0x1 = SFP+ transceiver not present
755 * SFP Status 0x2 = SFP+ transceiver invalid
756 * SFP Status 0x3 = SFP+ transceiver present and
757 * valid
758 * Bits 12-14 = Heartbeat Counter
759 * Bit 15 = Heartbeat Monitor Enable
760 * Bits 16-17 = SFP Additional Info
761 * SFP info 0x0 = Unregocnized transceiver for
762 * Ethernet
763 * SFP info 0x1 = SFP+ brand validation failed
764 * SFP info 0x2 = SFP+ speed validation failed
765 * SFP info 0x3 = SFP+ access error
766 * Bit 18 = SFP Multirate
767 * Bit 19 = SFP Tx Fault
768 * Bits 20-22 = Link Speed
769 * Bits 23-27 = Reserved
770 * Bits 28-30 = DCBX Status
771 * DCBX Status 0x0 = DCBX Disabled
772 * DCBX Status 0x1 = DCBX Enabled
773 * DCBX Status 0x2 = DCBX Exchange error
774 * Bit 31 = Reserved
775 */
776 peg_fw_state = (mb[2] & 0x00ff);
777 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
778 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
779 sfp_status = ((mb[2] & 0x0c00) >> 10);
780 htbt_counter = ((mb[2] & 0x7000) >> 12);
781 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
782 sfp_additional_info = (mb[6] & 0x0003);
783 sfp_multirate = ((mb[6] & 0x0004) >> 2);
784 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
785 link_speed = ((mb[6] & 0x0070) >> 4);
786 dcbx_status = ((mb[6] & 0x7000) >> 12);
787
788 ql_log(ql_log_warn, vha, 0x5066,
789 "Peg-to-Fc Status Register:\n"
790 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
791 "nw_interface_signal_detect=0x%x"
792 "\nsfp_statis=0x%x.\n ", peg_fw_state,
793 nw_interface_link_up, nw_interface_signal_detect,
794 sfp_status);
795 ql_log(ql_log_warn, vha, 0x5067,
796 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
797 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
798 htbt_counter, htbt_monitor_enable,
799 sfp_additional_info, sfp_multirate);
800 ql_log(ql_log_warn, vha, 0x5068,
801 "sfp_tx_fault=0x%x, link_state=0x%x, "
802 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
803 dcbx_status);
804
805 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
806 }
807
808 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
809 ql_log(ql_log_warn, vha, 0x5069,
810 "Heartbeat Failure encountered, chip reset "
811 "required.\n");
812
813 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
814 }
815 }
816
817 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
818 ql_log(ql_log_info, vha, 0x506a,
819 "IDC Device-State changed = 0x%x.\n", mb[4]);
820 if (ha->flags.nic_core_reset_owner)
821 return;
822 qla83xx_schedule_work(vha, MBA_IDC_AEN);
823 }
824 }
825
826 int
qla2x00_is_a_vp_did(scsi_qla_host_t * vha,uint32_t rscn_entry)827 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
828 {
829 struct qla_hw_data *ha = vha->hw;
830 scsi_qla_host_t *vp;
831 uint32_t vp_did;
832 unsigned long flags;
833 int ret = 0;
834
835 if (!ha->num_vhosts)
836 return ret;
837
838 spin_lock_irqsave(&ha->vport_slock, flags);
839 list_for_each_entry(vp, &ha->vp_list, list) {
840 vp_did = vp->d_id.b24;
841 if (vp_did == rscn_entry) {
842 ret = 1;
843 break;
844 }
845 }
846 spin_unlock_irqrestore(&ha->vport_slock, flags);
847
848 return ret;
849 }
850
851 fc_port_t *
qla2x00_find_fcport_by_loopid(scsi_qla_host_t * vha,uint16_t loop_id)852 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
853 {
854 fc_port_t *f, *tf;
855
856 f = tf = NULL;
857 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
858 if (f->loop_id == loop_id)
859 return f;
860 return NULL;
861 }
862
863 fc_port_t *
qla2x00_find_fcport_by_wwpn(scsi_qla_host_t * vha,u8 * wwpn,u8 incl_deleted)864 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
865 {
866 fc_port_t *f, *tf;
867
868 f = tf = NULL;
869 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
870 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
871 if (incl_deleted)
872 return f;
873 else if (f->deleted == 0)
874 return f;
875 }
876 }
877 return NULL;
878 }
879
880 fc_port_t *
qla2x00_find_fcport_by_nportid(scsi_qla_host_t * vha,port_id_t * id,u8 incl_deleted)881 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
882 u8 incl_deleted)
883 {
884 fc_port_t *f, *tf;
885
886 f = tf = NULL;
887 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
888 if (f->d_id.b24 == id->b24) {
889 if (incl_deleted)
890 return f;
891 else if (f->deleted == 0)
892 return f;
893 }
894 }
895 return NULL;
896 }
897
898 /* Shall be called only on supported adapters. */
899 static void
qla27xx_handle_8200_aen(scsi_qla_host_t * vha,uint16_t * mb)900 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
901 {
902 struct qla_hw_data *ha = vha->hw;
903 bool reset_isp_needed = false;
904
905 ql_log(ql_log_warn, vha, 0x02f0,
906 "MPI Heartbeat stop. MPI reset is%s needed. "
907 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
908 mb[1] & BIT_8 ? "" : " not",
909 mb[0], mb[1], mb[2], mb[3]);
910
911 if ((mb[1] & BIT_8) == 0)
912 return;
913
914 ql_log(ql_log_warn, vha, 0x02f1,
915 "MPI Heartbeat stop. FW dump needed\n");
916
917 if (ql2xfulldump_on_mpifail) {
918 ha->isp_ops->fw_dump(vha);
919 reset_isp_needed = true;
920 }
921
922 ha->isp_ops->mpi_fw_dump(vha, 1);
923
924 if (reset_isp_needed) {
925 vha->hw->flags.fw_init_done = 0;
926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
927 qla2xxx_wake_dpc(vha);
928 }
929 }
930
931 static struct purex_item *
qla24xx_alloc_purex_item(scsi_qla_host_t * vha,uint16_t size)932 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
933 {
934 struct purex_item *item = NULL;
935 uint8_t item_hdr_size = sizeof(*item);
936
937 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
938 item = kzalloc(item_hdr_size +
939 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
940 } else {
941 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
942 item = &vha->default_item;
943 goto initialize_purex_header;
944 } else {
945 item = kzalloc(item_hdr_size, GFP_ATOMIC);
946 }
947 }
948 if (!item) {
949 ql_log(ql_log_warn, vha, 0x5092,
950 ">> Failed allocate purex list item.\n");
951
952 return NULL;
953 }
954
955 initialize_purex_header:
956 item->vha = vha;
957 item->size = size;
958 return item;
959 }
960
961 static void
qla24xx_queue_purex_item(scsi_qla_host_t * vha,struct purex_item * pkt,void (* process_item)(struct scsi_qla_host * vha,struct purex_item * pkt))962 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
963 void (*process_item)(struct scsi_qla_host *vha,
964 struct purex_item *pkt))
965 {
966 struct purex_list *list = &vha->purex_list;
967 ulong flags;
968
969 pkt->process_item = process_item;
970
971 spin_lock_irqsave(&list->lock, flags);
972 list_add_tail(&pkt->list, &list->head);
973 spin_unlock_irqrestore(&list->lock, flags);
974
975 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
976 }
977
978 /**
979 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
980 * contained in a single IOCB.
981 * purex packet.
982 * @vha: SCSI driver HA context
983 * @pkt: ELS packet
984 */
985 static struct purex_item
qla24xx_copy_std_pkt(struct scsi_qla_host * vha,void * pkt)986 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
987 {
988 struct purex_item *item;
989
990 item = qla24xx_alloc_purex_item(vha,
991 QLA_DEFAULT_PAYLOAD_SIZE);
992 if (!item)
993 return item;
994
995 memcpy(&item->iocb, pkt, sizeof(item->iocb));
996 return item;
997 }
998
999 /**
1000 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
1001 * span over multiple IOCBs.
1002 * @vha: SCSI driver HA context
1003 * @pkt: ELS packet
1004 * @rsp: Response queue
1005 */
1006 static struct purex_item *
qla27xx_copy_fpin_pkt(struct scsi_qla_host * vha,void ** pkt,struct rsp_que ** rsp)1007 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1008 struct rsp_que **rsp)
1009 {
1010 struct purex_entry_24xx *purex = *pkt;
1011 struct rsp_que *rsp_q = *rsp;
1012 sts_cont_entry_t *new_pkt;
1013 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1014 uint16_t buffer_copy_offset = 0;
1015 uint16_t entry_count, entry_count_remaining;
1016 struct purex_item *item;
1017 void *fpin_pkt = NULL;
1018
1019 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1020 - PURX_ELS_HEADER_SIZE;
1021 pending_bytes = total_bytes;
1022 entry_count = entry_count_remaining = purex->entry_count;
1023 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
1024 sizeof(purex->els_frame_payload) : pending_bytes;
1025 ql_log(ql_log_info, vha, 0x509a,
1026 "FPIN ELS, frame_size 0x%x, entry count %d\n",
1027 total_bytes, entry_count);
1028
1029 item = qla24xx_alloc_purex_item(vha, total_bytes);
1030 if (!item)
1031 return item;
1032
1033 fpin_pkt = &item->iocb;
1034
1035 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1036 buffer_copy_offset += no_bytes;
1037 pending_bytes -= no_bytes;
1038 --entry_count_remaining;
1039
1040 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
1041 wmb();
1042
1043 do {
1044 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1045 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1046 ql_dbg(ql_dbg_async, vha, 0x5084,
1047 "Ran out of IOCBs, partial data 0x%x\n",
1048 buffer_copy_offset);
1049 cpu_relax();
1050 continue;
1051 }
1052
1053 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1054 *pkt = new_pkt;
1055
1056 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1057 ql_log(ql_log_warn, vha, 0x507a,
1058 "Unexpected IOCB type, partial data 0x%x\n",
1059 buffer_copy_offset);
1060 break;
1061 }
1062
1063 rsp_q->ring_index++;
1064 if (rsp_q->ring_index == rsp_q->length) {
1065 rsp_q->ring_index = 0;
1066 rsp_q->ring_ptr = rsp_q->ring;
1067 } else {
1068 rsp_q->ring_ptr++;
1069 }
1070 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1071 sizeof(new_pkt->data) : pending_bytes;
1072 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1073 memcpy(((uint8_t *)fpin_pkt +
1074 buffer_copy_offset), new_pkt->data,
1075 no_bytes);
1076 buffer_copy_offset += no_bytes;
1077 pending_bytes -= no_bytes;
1078 --entry_count_remaining;
1079 } else {
1080 ql_log(ql_log_warn, vha, 0x5044,
1081 "Attempt to copy more that we got, optimizing..%x\n",
1082 buffer_copy_offset);
1083 memcpy(((uint8_t *)fpin_pkt +
1084 buffer_copy_offset), new_pkt->data,
1085 total_bytes - buffer_copy_offset);
1086 }
1087
1088 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1089 wmb();
1090 }
1091
1092 if (pending_bytes != 0 || entry_count_remaining != 0) {
1093 ql_log(ql_log_fatal, vha, 0x508b,
1094 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1095 total_bytes, entry_count_remaining);
1096 qla24xx_free_purex_item(item);
1097 return NULL;
1098 }
1099 } while (entry_count_remaining > 0);
1100 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
1101 return item;
1102 }
1103
1104 /**
1105 * qla2x00_async_event() - Process aynchronous events.
1106 * @vha: SCSI driver HA context
1107 * @rsp: response queue
1108 * @mb: Mailbox registers (0 - 3)
1109 */
1110 void
qla2x00_async_event(scsi_qla_host_t * vha,struct rsp_que * rsp,uint16_t * mb)1111 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1112 {
1113 uint16_t handle_cnt;
1114 uint16_t cnt, mbx;
1115 uint32_t handles[5];
1116 struct qla_hw_data *ha = vha->hw;
1117 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1118 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1119 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1120 uint32_t rscn_entry, host_pid;
1121 unsigned long flags;
1122 fc_port_t *fcport = NULL;
1123
1124 if (!vha->hw->flags.fw_started)
1125 return;
1126
1127 /* Setup to process RIO completion. */
1128 handle_cnt = 0;
1129 if (IS_CNA_CAPABLE(ha))
1130 goto skip_rio;
1131 switch (mb[0]) {
1132 case MBA_SCSI_COMPLETION:
1133 handles[0] = make_handle(mb[2], mb[1]);
1134 handle_cnt = 1;
1135 break;
1136 case MBA_CMPLT_1_16BIT:
1137 handles[0] = mb[1];
1138 handle_cnt = 1;
1139 mb[0] = MBA_SCSI_COMPLETION;
1140 break;
1141 case MBA_CMPLT_2_16BIT:
1142 handles[0] = mb[1];
1143 handles[1] = mb[2];
1144 handle_cnt = 2;
1145 mb[0] = MBA_SCSI_COMPLETION;
1146 break;
1147 case MBA_CMPLT_3_16BIT:
1148 handles[0] = mb[1];
1149 handles[1] = mb[2];
1150 handles[2] = mb[3];
1151 handle_cnt = 3;
1152 mb[0] = MBA_SCSI_COMPLETION;
1153 break;
1154 case MBA_CMPLT_4_16BIT:
1155 handles[0] = mb[1];
1156 handles[1] = mb[2];
1157 handles[2] = mb[3];
1158 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1159 handle_cnt = 4;
1160 mb[0] = MBA_SCSI_COMPLETION;
1161 break;
1162 case MBA_CMPLT_5_16BIT:
1163 handles[0] = mb[1];
1164 handles[1] = mb[2];
1165 handles[2] = mb[3];
1166 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1167 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1168 handle_cnt = 5;
1169 mb[0] = MBA_SCSI_COMPLETION;
1170 break;
1171 case MBA_CMPLT_2_32BIT:
1172 handles[0] = make_handle(mb[2], mb[1]);
1173 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1174 RD_MAILBOX_REG(ha, reg, 6));
1175 handle_cnt = 2;
1176 mb[0] = MBA_SCSI_COMPLETION;
1177 break;
1178 default:
1179 break;
1180 }
1181 skip_rio:
1182 switch (mb[0]) {
1183 case MBA_SCSI_COMPLETION: /* Fast Post */
1184 if (!vha->flags.online)
1185 break;
1186
1187 for (cnt = 0; cnt < handle_cnt; cnt++)
1188 qla2x00_process_completed_request(vha, rsp->req,
1189 handles[cnt]);
1190 break;
1191
1192 case MBA_RESET: /* Reset */
1193 ql_dbg(ql_dbg_async, vha, 0x5002,
1194 "Asynchronous RESET.\n");
1195
1196 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1197 break;
1198
1199 case MBA_SYSTEM_ERR: /* System Error */
1200 mbx = 0;
1201
1202 vha->hw_err_cnt++;
1203
1204 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1205 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1206 u16 m[4];
1207
1208 m[0] = rd_reg_word(®24->mailbox4);
1209 m[1] = rd_reg_word(®24->mailbox5);
1210 m[2] = rd_reg_word(®24->mailbox6);
1211 mbx = m[3] = rd_reg_word(®24->mailbox7);
1212
1213 ql_log(ql_log_warn, vha, 0x5003,
1214 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1215 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1216 } else
1217 ql_log(ql_log_warn, vha, 0x5003,
1218 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1219 mb[1], mb[2], mb[3]);
1220
1221 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1222 rd_reg_word(®24->mailbox7) & BIT_8)
1223 ha->isp_ops->mpi_fw_dump(vha, 1);
1224 ha->isp_ops->fw_dump(vha);
1225 ha->flags.fw_init_done = 0;
1226 QLA_FW_STOPPED(ha);
1227
1228 if (IS_FWI2_CAPABLE(ha)) {
1229 if (mb[1] == 0 && mb[2] == 0) {
1230 ql_log(ql_log_fatal, vha, 0x5004,
1231 "Unrecoverable Hardware Error: adapter "
1232 "marked OFFLINE!\n");
1233 vha->flags.online = 0;
1234 vha->device_flags |= DFLG_DEV_FAILED;
1235 } else {
1236 /* Check to see if MPI timeout occurred */
1237 if ((mbx & MBX_3) && (ha->port_no == 0))
1238 set_bit(MPI_RESET_NEEDED,
1239 &vha->dpc_flags);
1240
1241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1242 }
1243 } else if (mb[1] == 0) {
1244 ql_log(ql_log_fatal, vha, 0x5005,
1245 "Unrecoverable Hardware Error: adapter marked "
1246 "OFFLINE!\n");
1247 vha->flags.online = 0;
1248 vha->device_flags |= DFLG_DEV_FAILED;
1249 } else
1250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1251 break;
1252
1253 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
1254 ql_log(ql_log_warn, vha, 0x5006,
1255 "ISP Request Transfer Error (%x).\n", mb[1]);
1256
1257 vha->hw_err_cnt++;
1258
1259 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1260 break;
1261
1262 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
1263 ql_log(ql_log_warn, vha, 0x5007,
1264 "ISP Response Transfer Error (%x).\n", mb[1]);
1265
1266 vha->hw_err_cnt++;
1267
1268 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1269 break;
1270
1271 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
1272 ql_dbg(ql_dbg_async, vha, 0x5008,
1273 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1274 break;
1275
1276 case MBA_LOOP_INIT_ERR:
1277 ql_log(ql_log_warn, vha, 0x5090,
1278 "LOOP INIT ERROR (%x).\n", mb[1]);
1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1280 break;
1281
1282 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1283 ha->flags.lip_ae = 1;
1284
1285 ql_dbg(ql_dbg_async, vha, 0x5009,
1286 "LIP occurred (%x).\n", mb[1]);
1287
1288 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1289 atomic_set(&vha->loop_state, LOOP_DOWN);
1290 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1291 qla2x00_mark_all_devices_lost(vha);
1292 }
1293
1294 if (vha->vp_idx) {
1295 atomic_set(&vha->vp_state, VP_FAILED);
1296 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1297 }
1298
1299 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1300 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1301
1302 vha->flags.management_server_logged_in = 0;
1303 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1304 break;
1305
1306 case MBA_LOOP_UP: /* Loop Up Event */
1307 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1308 ha->link_data_rate = PORT_SPEED_1GB;
1309 else
1310 ha->link_data_rate = mb[1];
1311
1312 ql_log(ql_log_info, vha, 0x500a,
1313 "LOOP UP detected (%s Gbps).\n",
1314 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1315
1316 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1317 if (mb[2] & BIT_0)
1318 ql_log(ql_log_info, vha, 0x11a0,
1319 "FEC=enabled (link up).\n");
1320 }
1321
1322 vha->flags.management_server_logged_in = 0;
1323 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1324
1325 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1326 vha->short_link_down_cnt++;
1327 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1328 }
1329
1330 break;
1331
1332 case MBA_LOOP_DOWN: /* Loop Down Event */
1333 SAVE_TOPO(ha);
1334 ha->flags.lip_ae = 0;
1335 ha->current_topology = 0;
1336 vha->link_down_time = 0;
1337
1338 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1339 ? rd_reg_word(®24->mailbox4) : 0;
1340 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1341 : mbx;
1342 ql_log(ql_log_info, vha, 0x500b,
1343 "LOOP DOWN detected (%x %x %x %x).\n",
1344 mb[1], mb[2], mb[3], mbx);
1345
1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1347 atomic_set(&vha->loop_state, LOOP_DOWN);
1348 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1349 /*
1350 * In case of loop down, restore WWPN from
1351 * NVRAM in case of FA-WWPN capable ISP
1352 * Restore for Physical Port only
1353 */
1354 if (!vha->vp_idx) {
1355 if (ha->flags.fawwpn_enabled &&
1356 (ha->current_topology == ISP_CFG_F)) {
1357 memcpy(vha->port_name, ha->port_name, WWN_SIZE);
1358 fc_host_port_name(vha->host) =
1359 wwn_to_u64(vha->port_name);
1360 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1361 vha, 0x00d8, "LOOP DOWN detected,"
1362 "restore WWPN %016llx\n",
1363 wwn_to_u64(vha->port_name));
1364 }
1365
1366 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1367 }
1368
1369 vha->device_flags |= DFLG_NO_CABLE;
1370 qla2x00_mark_all_devices_lost(vha);
1371 }
1372
1373 if (vha->vp_idx) {
1374 atomic_set(&vha->vp_state, VP_FAILED);
1375 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1376 }
1377
1378 vha->flags.management_server_logged_in = 0;
1379 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1380 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1381 break;
1382
1383 case MBA_LIP_RESET: /* LIP reset occurred */
1384 ql_dbg(ql_dbg_async, vha, 0x500c,
1385 "LIP reset occurred (%x).\n", mb[1]);
1386
1387 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1388 atomic_set(&vha->loop_state, LOOP_DOWN);
1389 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1390 qla2x00_mark_all_devices_lost(vha);
1391 }
1392
1393 if (vha->vp_idx) {
1394 atomic_set(&vha->vp_state, VP_FAILED);
1395 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1396 }
1397
1398 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1399
1400 ha->operating_mode = LOOP;
1401 vha->flags.management_server_logged_in = 0;
1402 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1403 break;
1404
1405 /* case MBA_DCBX_COMPLETE: */
1406 case MBA_POINT_TO_POINT: /* Point-to-Point */
1407 ha->flags.lip_ae = 0;
1408
1409 if (IS_QLA2100(ha))
1410 break;
1411
1412 if (IS_CNA_CAPABLE(ha)) {
1413 ql_dbg(ql_dbg_async, vha, 0x500d,
1414 "DCBX Completed -- %04x %04x %04x.\n",
1415 mb[1], mb[2], mb[3]);
1416 if (ha->notify_dcbx_comp && !vha->vp_idx)
1417 complete(&ha->dcbx_comp);
1418
1419 } else
1420 ql_dbg(ql_dbg_async, vha, 0x500e,
1421 "Asynchronous P2P MODE received.\n");
1422
1423 /*
1424 * Until there's a transition from loop down to loop up, treat
1425 * this as loop down only.
1426 */
1427 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1428 atomic_set(&vha->loop_state, LOOP_DOWN);
1429 if (!atomic_read(&vha->loop_down_timer))
1430 atomic_set(&vha->loop_down_timer,
1431 LOOP_DOWN_TIME);
1432 if (!N2N_TOPO(ha))
1433 qla2x00_mark_all_devices_lost(vha);
1434 }
1435
1436 if (vha->vp_idx) {
1437 atomic_set(&vha->vp_state, VP_FAILED);
1438 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1439 }
1440
1441 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1442 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1443
1444 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1445 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1446
1447 vha->flags.management_server_logged_in = 0;
1448 break;
1449
1450 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1451 if (IS_QLA2100(ha))
1452 break;
1453
1454 ql_dbg(ql_dbg_async, vha, 0x500f,
1455 "Configuration change detected: value=%x.\n", mb[1]);
1456
1457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1458 atomic_set(&vha->loop_state, LOOP_DOWN);
1459 if (!atomic_read(&vha->loop_down_timer))
1460 atomic_set(&vha->loop_down_timer,
1461 LOOP_DOWN_TIME);
1462 qla2x00_mark_all_devices_lost(vha);
1463 }
1464
1465 if (vha->vp_idx) {
1466 atomic_set(&vha->vp_state, VP_FAILED);
1467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1468 }
1469
1470 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1471 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1472 break;
1473
1474 case MBA_PORT_UPDATE: /* Port database update */
1475 /*
1476 * Handle only global and vn-port update events
1477 *
1478 * Relevant inputs:
1479 * mb[1] = N_Port handle of changed port
1480 * OR 0xffff for global event
1481 * mb[2] = New login state
1482 * 7 = Port logged out
1483 * mb[3] = LSB is vp_idx, 0xff = all vps
1484 *
1485 * Skip processing if:
1486 * Event is global, vp_idx is NOT all vps,
1487 * vp_idx does not match
1488 * Event is not global, vp_idx does not match
1489 */
1490 if (IS_QLA2XXX_MIDTYPE(ha) &&
1491 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1492 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1493 break;
1494
1495 if (mb[2] == 0x7) {
1496 ql_dbg(ql_dbg_async, vha, 0x5010,
1497 "Port %s %04x %04x %04x.\n",
1498 mb[1] == 0xffff ? "unavailable" : "logout",
1499 mb[1], mb[2], mb[3]);
1500
1501 if (mb[1] == 0xffff)
1502 goto global_port_update;
1503
1504 if (mb[1] == NPH_SNS_LID(ha)) {
1505 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1506 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1507 break;
1508 }
1509
1510 /* use handle_cnt for loop id/nport handle */
1511 if (IS_FWI2_CAPABLE(ha))
1512 handle_cnt = NPH_SNS;
1513 else
1514 handle_cnt = SIMPLE_NAME_SERVER;
1515 if (mb[1] == handle_cnt) {
1516 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1518 break;
1519 }
1520
1521 /* Port logout */
1522 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1523 if (!fcport)
1524 break;
1525 if (atomic_read(&fcport->state) != FCS_ONLINE)
1526 break;
1527 ql_dbg(ql_dbg_async, vha, 0x508a,
1528 "Marking port lost loopid=%04x portid=%06x.\n",
1529 fcport->loop_id, fcport->d_id.b24);
1530 if (qla_ini_mode_enabled(vha)) {
1531 fcport->logout_on_delete = 0;
1532 qlt_schedule_sess_for_deletion(fcport);
1533 }
1534 break;
1535
1536 global_port_update:
1537 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1538 atomic_set(&vha->loop_state, LOOP_DOWN);
1539 atomic_set(&vha->loop_down_timer,
1540 LOOP_DOWN_TIME);
1541 vha->device_flags |= DFLG_NO_CABLE;
1542 qla2x00_mark_all_devices_lost(vha);
1543 }
1544
1545 if (vha->vp_idx) {
1546 atomic_set(&vha->vp_state, VP_FAILED);
1547 fc_vport_set_state(vha->fc_vport,
1548 FC_VPORT_FAILED);
1549 qla2x00_mark_all_devices_lost(vha);
1550 }
1551
1552 vha->flags.management_server_logged_in = 0;
1553 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1554 break;
1555 }
1556
1557 /*
1558 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1559 * event etc. earlier indicating loop is down) then process
1560 * it. Otherwise ignore it and Wait for RSCN to come in.
1561 */
1562 atomic_set(&vha->loop_down_timer, 0);
1563 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1564 !ha->flags.n2n_ae &&
1565 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1566 ql_dbg(ql_dbg_async, vha, 0x5011,
1567 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1568 mb[1], mb[2], mb[3]);
1569 break;
1570 }
1571
1572 ql_dbg(ql_dbg_async, vha, 0x5012,
1573 "Port database changed %04x %04x %04x.\n",
1574 mb[1], mb[2], mb[3]);
1575
1576 /*
1577 * Mark all devices as missing so we will login again.
1578 */
1579 atomic_set(&vha->loop_state, LOOP_UP);
1580 vha->scan.scan_retry = 0;
1581
1582 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1583 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1584 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1585 break;
1586
1587 case MBA_RSCN_UPDATE: /* State Change Registration */
1588 /* Check if the Vport has issued a SCR */
1589 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1590 break;
1591 /* Only handle SCNs for our Vport index. */
1592 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1593 break;
1594
1595 ql_log(ql_log_warn, vha, 0x5013,
1596 "RSCN database changed -- %04x %04x %04x.\n",
1597 mb[1], mb[2], mb[3]);
1598
1599 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1600 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1601 | vha->d_id.b.al_pa;
1602 if (rscn_entry == host_pid) {
1603 ql_dbg(ql_dbg_async, vha, 0x5014,
1604 "Ignoring RSCN update to local host "
1605 "port ID (%06x).\n", host_pid);
1606 break;
1607 }
1608
1609 /* Ignore reserved bits from RSCN-payload. */
1610 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1611
1612 /* Skip RSCNs for virtual ports on the same physical port */
1613 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1614 break;
1615
1616 atomic_set(&vha->loop_down_timer, 0);
1617 vha->flags.management_server_logged_in = 0;
1618 {
1619 struct event_arg ea;
1620
1621 memset(&ea, 0, sizeof(ea));
1622 ea.id.b24 = rscn_entry;
1623 ea.id.b.rsvd_1 = rscn_entry >> 24;
1624 qla2x00_handle_rscn(vha, &ea);
1625 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1626 }
1627 break;
1628 case MBA_CONGN_NOTI_RECV:
1629 if (!ha->flags.scm_enabled ||
1630 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1631 break;
1632
1633 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1634 ql_dbg(ql_dbg_async, vha, 0x509b,
1635 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1636 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1637 ql_log(ql_log_warn, vha, 0x509b,
1638 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1639 }
1640 break;
1641 /* case MBA_RIO_RESPONSE: */
1642 case MBA_ZIO_RESPONSE:
1643 ql_dbg(ql_dbg_async, vha, 0x5015,
1644 "[R|Z]IO update completion.\n");
1645
1646 if (IS_FWI2_CAPABLE(ha))
1647 qla24xx_process_response_queue(vha, rsp);
1648 else
1649 qla2x00_process_response_queue(rsp);
1650 break;
1651
1652 case MBA_DISCARD_RND_FRAME:
1653 ql_dbg(ql_dbg_async, vha, 0x5016,
1654 "Discard RND Frame -- %04x %04x %04x.\n",
1655 mb[1], mb[2], mb[3]);
1656 vha->interface_err_cnt++;
1657 break;
1658
1659 case MBA_TRACE_NOTIFICATION:
1660 ql_dbg(ql_dbg_async, vha, 0x5017,
1661 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1662 break;
1663
1664 case MBA_ISP84XX_ALERT:
1665 ql_dbg(ql_dbg_async, vha, 0x5018,
1666 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1667 mb[1], mb[2], mb[3]);
1668
1669 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1670 switch (mb[1]) {
1671 case A84_PANIC_RECOVERY:
1672 ql_log(ql_log_info, vha, 0x5019,
1673 "Alert 84XX: panic recovery %04x %04x.\n",
1674 mb[2], mb[3]);
1675 break;
1676 case A84_OP_LOGIN_COMPLETE:
1677 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1678 ql_log(ql_log_info, vha, 0x501a,
1679 "Alert 84XX: firmware version %x.\n",
1680 ha->cs84xx->op_fw_version);
1681 break;
1682 case A84_DIAG_LOGIN_COMPLETE:
1683 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1684 ql_log(ql_log_info, vha, 0x501b,
1685 "Alert 84XX: diagnostic firmware version %x.\n",
1686 ha->cs84xx->diag_fw_version);
1687 break;
1688 case A84_GOLD_LOGIN_COMPLETE:
1689 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1690 ha->cs84xx->fw_update = 1;
1691 ql_log(ql_log_info, vha, 0x501c,
1692 "Alert 84XX: gold firmware version %x.\n",
1693 ha->cs84xx->gold_fw_version);
1694 break;
1695 default:
1696 ql_log(ql_log_warn, vha, 0x501d,
1697 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1698 mb[1], mb[2], mb[3]);
1699 }
1700 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1701 break;
1702 case MBA_DCBX_START:
1703 ql_dbg(ql_dbg_async, vha, 0x501e,
1704 "DCBX Started -- %04x %04x %04x.\n",
1705 mb[1], mb[2], mb[3]);
1706 break;
1707 case MBA_DCBX_PARAM_UPDATE:
1708 ql_dbg(ql_dbg_async, vha, 0x501f,
1709 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1710 mb[1], mb[2], mb[3]);
1711 break;
1712 case MBA_FCF_CONF_ERR:
1713 ql_dbg(ql_dbg_async, vha, 0x5020,
1714 "FCF Configuration Error -- %04x %04x %04x.\n",
1715 mb[1], mb[2], mb[3]);
1716 break;
1717 case MBA_IDC_NOTIFY:
1718 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1719 mb[4] = rd_reg_word(®24->mailbox4);
1720 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1721 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1722 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1723 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1724 /*
1725 * Extend loop down timer since port is active.
1726 */
1727 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1728 atomic_set(&vha->loop_down_timer,
1729 LOOP_DOWN_TIME);
1730 qla2xxx_wake_dpc(vha);
1731 }
1732 }
1733 fallthrough;
1734 case MBA_IDC_COMPLETE:
1735 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1736 complete(&ha->lb_portup_comp);
1737 fallthrough;
1738 case MBA_IDC_TIME_EXT:
1739 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1740 IS_QLA8044(ha))
1741 qla81xx_idc_event(vha, mb[0], mb[1]);
1742 break;
1743
1744 case MBA_IDC_AEN:
1745 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1746 vha->hw_err_cnt++;
1747 qla27xx_handle_8200_aen(vha, mb);
1748 } else if (IS_QLA83XX(ha)) {
1749 mb[4] = rd_reg_word(®24->mailbox4);
1750 mb[5] = rd_reg_word(®24->mailbox5);
1751 mb[6] = rd_reg_word(®24->mailbox6);
1752 mb[7] = rd_reg_word(®24->mailbox7);
1753 qla83xx_handle_8200_aen(vha, mb);
1754 } else {
1755 ql_dbg(ql_dbg_async, vha, 0x5052,
1756 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1757 mb[0], mb[1], mb[2], mb[3]);
1758 }
1759 break;
1760
1761 case MBA_DPORT_DIAGNOSTICS:
1762 ql_dbg(ql_dbg_async, vha, 0x5052,
1763 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1764 mb[0], mb[1], mb[2], mb[3]);
1765 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1766 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1767 static char *results[] = {
1768 "start", "done(pass)", "done(error)", "undefined" };
1769 static char *types[] = {
1770 "none", "dynamic", "static", "other" };
1771 uint result = mb[1] >> 0 & 0x3;
1772 uint type = mb[1] >> 6 & 0x3;
1773 uint sw = mb[1] >> 15 & 0x1;
1774 ql_dbg(ql_dbg_async, vha, 0x5052,
1775 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1776 results[result], types[type], sw);
1777 if (result == 2) {
1778 static char *reasons[] = {
1779 "reserved", "unexpected reject",
1780 "unexpected phase", "retry exceeded",
1781 "timed out", "not supported",
1782 "user stopped" };
1783 uint reason = mb[2] >> 0 & 0xf;
1784 uint phase = mb[2] >> 12 & 0xf;
1785 ql_dbg(ql_dbg_async, vha, 0x5052,
1786 "D-Port Diagnostics: reason=%s phase=%u \n",
1787 reason < 7 ? reasons[reason] : "other",
1788 phase >> 1);
1789 }
1790 }
1791 break;
1792
1793 case MBA_TEMPERATURE_ALERT:
1794 ql_dbg(ql_dbg_async, vha, 0x505e,
1795 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1796 break;
1797
1798 case MBA_TRANS_INSERT:
1799 ql_dbg(ql_dbg_async, vha, 0x5091,
1800 "Transceiver Insertion: %04x\n", mb[1]);
1801 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1802 break;
1803
1804 case MBA_TRANS_REMOVE:
1805 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1806 break;
1807
1808 default:
1809 ql_dbg(ql_dbg_async, vha, 0x5057,
1810 "Unknown AEN:%04x %04x %04x %04x\n",
1811 mb[0], mb[1], mb[2], mb[3]);
1812 }
1813
1814 qlt_async_event(mb[0], vha, mb);
1815
1816 if (!vha->vp_idx && ha->num_vhosts)
1817 qla2x00_alert_all_vps(rsp, mb);
1818 }
1819
1820 /**
1821 * qla2x00_process_completed_request() - Process a Fast Post response.
1822 * @vha: SCSI driver HA context
1823 * @req: request queue
1824 * @index: SRB index
1825 */
1826 void
qla2x00_process_completed_request(struct scsi_qla_host * vha,struct req_que * req,uint32_t index)1827 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1828 struct req_que *req, uint32_t index)
1829 {
1830 srb_t *sp;
1831 struct qla_hw_data *ha = vha->hw;
1832
1833 /* Validate handle. */
1834 if (index >= req->num_outstanding_cmds) {
1835 ql_log(ql_log_warn, vha, 0x3014,
1836 "Invalid SCSI command index (%x).\n", index);
1837
1838 if (IS_P3P_TYPE(ha))
1839 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1840 else
1841 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1842 return;
1843 }
1844
1845 sp = req->outstanding_cmds[index];
1846 if (sp) {
1847 /* Free outstanding command slot. */
1848 req->outstanding_cmds[index] = NULL;
1849
1850 /* Save ISP completion status */
1851 sp->done(sp, DID_OK << 16);
1852 } else {
1853 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1854
1855 if (IS_P3P_TYPE(ha))
1856 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1857 else
1858 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1859 }
1860 }
1861
1862 srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t * vha,const char * func,struct req_que * req,void * iocb)1863 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1864 struct req_que *req, void *iocb)
1865 {
1866 struct qla_hw_data *ha = vha->hw;
1867 sts_entry_t *pkt = iocb;
1868 srb_t *sp;
1869 uint16_t index;
1870
1871 if (pkt->handle == QLA_SKIP_HANDLE)
1872 return NULL;
1873
1874 index = LSW(pkt->handle);
1875 if (index >= req->num_outstanding_cmds) {
1876 ql_log(ql_log_warn, vha, 0x5031,
1877 "%s: Invalid command index (%x) type %8ph.\n",
1878 func, index, iocb);
1879 if (IS_P3P_TYPE(ha))
1880 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1881 else
1882 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1883 return NULL;
1884 }
1885 sp = req->outstanding_cmds[index];
1886 if (!sp) {
1887 ql_log(ql_log_warn, vha, 0x5032,
1888 "%s: Invalid completion handle (%x) -- timed-out.\n",
1889 func, index);
1890 return NULL;
1891 }
1892 if (sp->handle != index) {
1893 ql_log(ql_log_warn, vha, 0x5033,
1894 "%s: SRB handle (%x) mismatch %x.\n", func,
1895 sp->handle, index);
1896 return NULL;
1897 }
1898
1899 req->outstanding_cmds[index] = NULL;
1900 return sp;
1901 }
1902
1903 static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t * vha,struct req_que * req,struct mbx_entry * mbx)1904 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1905 struct mbx_entry *mbx)
1906 {
1907 const char func[] = "MBX-IOCB";
1908 const char *type;
1909 fc_port_t *fcport;
1910 srb_t *sp;
1911 struct srb_iocb *lio;
1912 uint16_t *data;
1913 uint16_t status;
1914
1915 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1916 if (!sp)
1917 return;
1918
1919 lio = &sp->u.iocb_cmd;
1920 type = sp->name;
1921 fcport = sp->fcport;
1922 data = lio->u.logio.data;
1923
1924 data[0] = MBS_COMMAND_ERROR;
1925 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1926 QLA_LOGIO_LOGIN_RETRIED : 0;
1927 if (mbx->entry_status) {
1928 ql_dbg(ql_dbg_async, vha, 0x5043,
1929 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1930 "entry-status=%x status=%x state-flag=%x "
1931 "status-flags=%x.\n", type, sp->handle,
1932 fcport->d_id.b.domain, fcport->d_id.b.area,
1933 fcport->d_id.b.al_pa, mbx->entry_status,
1934 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1935 le16_to_cpu(mbx->status_flags));
1936
1937 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1938 mbx, sizeof(*mbx));
1939
1940 goto logio_done;
1941 }
1942
1943 status = le16_to_cpu(mbx->status);
1944 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1945 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1946 status = 0;
1947 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1948 ql_dbg(ql_dbg_async, vha, 0x5045,
1949 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1950 type, sp->handle, fcport->d_id.b.domain,
1951 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1952 le16_to_cpu(mbx->mb1));
1953
1954 data[0] = MBS_COMMAND_COMPLETE;
1955 if (sp->type == SRB_LOGIN_CMD) {
1956 fcport->port_type = FCT_TARGET;
1957 if (le16_to_cpu(mbx->mb1) & BIT_0)
1958 fcport->port_type = FCT_INITIATOR;
1959 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1960 fcport->flags |= FCF_FCP2_DEVICE;
1961 }
1962 goto logio_done;
1963 }
1964
1965 data[0] = le16_to_cpu(mbx->mb0);
1966 switch (data[0]) {
1967 case MBS_PORT_ID_USED:
1968 data[1] = le16_to_cpu(mbx->mb1);
1969 break;
1970 case MBS_LOOP_ID_USED:
1971 break;
1972 default:
1973 data[0] = MBS_COMMAND_ERROR;
1974 break;
1975 }
1976
1977 ql_log(ql_log_warn, vha, 0x5046,
1978 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1979 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1980 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1981 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1982 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1983 le16_to_cpu(mbx->mb7));
1984
1985 logio_done:
1986 sp->done(sp, 0);
1987 }
1988
1989 static void
qla24xx_mbx_iocb_entry(scsi_qla_host_t * vha,struct req_que * req,struct mbx_24xx_entry * pkt)1990 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1991 struct mbx_24xx_entry *pkt)
1992 {
1993 const char func[] = "MBX-IOCB2";
1994 struct qla_hw_data *ha = vha->hw;
1995 srb_t *sp;
1996 struct srb_iocb *si;
1997 u16 sz, i;
1998 int res;
1999
2000 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2001 if (!sp)
2002 return;
2003
2004 if (sp->type == SRB_SCSI_CMD ||
2005 sp->type == SRB_NVME_CMD ||
2006 sp->type == SRB_TM_CMD) {
2007 ql_log(ql_log_warn, vha, 0x509d,
2008 "Inconsistent event entry type %d\n", sp->type);
2009 if (IS_P3P_TYPE(ha))
2010 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2011 else
2012 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2013 return;
2014 }
2015
2016 si = &sp->u.iocb_cmd;
2017 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2018
2019 for (i = 0; i < sz; i++)
2020 si->u.mbx.in_mb[i] = pkt->mb[i];
2021
2022 res = (si->u.mbx.in_mb[0] & MBS_MASK);
2023
2024 sp->done(sp, res);
2025 }
2026
2027 static void
qla24xxx_nack_iocb_entry(scsi_qla_host_t * vha,struct req_que * req,struct nack_to_isp * pkt)2028 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2029 struct nack_to_isp *pkt)
2030 {
2031 const char func[] = "nack";
2032 srb_t *sp;
2033 int res = 0;
2034
2035 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2036 if (!sp)
2037 return;
2038
2039 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2040 res = QLA_FUNCTION_FAILED;
2041
2042 sp->done(sp, res);
2043 }
2044
2045 static void
qla2x00_ct_entry(scsi_qla_host_t * vha,struct req_que * req,sts_entry_t * pkt,int iocb_type)2046 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2047 sts_entry_t *pkt, int iocb_type)
2048 {
2049 const char func[] = "CT_IOCB";
2050 const char *type;
2051 srb_t *sp;
2052 struct bsg_job *bsg_job;
2053 struct fc_bsg_reply *bsg_reply;
2054 uint16_t comp_status;
2055 int res = 0;
2056
2057 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2058 if (!sp)
2059 return;
2060
2061 switch (sp->type) {
2062 case SRB_CT_CMD:
2063 bsg_job = sp->u.bsg_job;
2064 bsg_reply = bsg_job->reply;
2065
2066 type = "ct pass-through";
2067
2068 comp_status = le16_to_cpu(pkt->comp_status);
2069
2070 /*
2071 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2072 * fc payload to the caller
2073 */
2074 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2075 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2076
2077 if (comp_status != CS_COMPLETE) {
2078 if (comp_status == CS_DATA_UNDERRUN) {
2079 res = DID_OK << 16;
2080 bsg_reply->reply_payload_rcv_len =
2081 le16_to_cpu(pkt->rsp_info_len);
2082
2083 ql_log(ql_log_warn, vha, 0x5048,
2084 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2085 type, comp_status,
2086 bsg_reply->reply_payload_rcv_len);
2087 } else {
2088 ql_log(ql_log_warn, vha, 0x5049,
2089 "CT pass-through-%s error comp_status=0x%x.\n",
2090 type, comp_status);
2091 res = DID_ERROR << 16;
2092 bsg_reply->reply_payload_rcv_len = 0;
2093 }
2094 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2095 pkt, sizeof(*pkt));
2096 } else {
2097 res = DID_OK << 16;
2098 bsg_reply->reply_payload_rcv_len =
2099 bsg_job->reply_payload.payload_len;
2100 bsg_job->reply_len = 0;
2101 }
2102 break;
2103 case SRB_CT_PTHRU_CMD:
2104 /*
2105 * borrowing sts_entry_24xx.comp_status.
2106 * same location as ct_entry_24xx.comp_status
2107 */
2108 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2109 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2110 sp->name);
2111 break;
2112 }
2113
2114 sp->done(sp, res);
2115 }
2116
2117 static void
qla24xx_els_ct_entry(scsi_qla_host_t * v,struct req_que * req,struct sts_entry_24xx * pkt,int iocb_type)2118 qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2119 struct sts_entry_24xx *pkt, int iocb_type)
2120 {
2121 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2122 const char func[] = "ELS_CT_IOCB";
2123 const char *type;
2124 srb_t *sp;
2125 struct bsg_job *bsg_job;
2126 struct fc_bsg_reply *bsg_reply;
2127 uint16_t comp_status;
2128 uint32_t fw_status[3];
2129 int res, logit = 1;
2130 struct srb_iocb *els;
2131 uint n;
2132 scsi_qla_host_t *vha;
2133 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2134
2135 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2136 if (!sp)
2137 return;
2138 bsg_job = sp->u.bsg_job;
2139 vha = sp->vha;
2140
2141 type = NULL;
2142
2143 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2144 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2145 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2146
2147 switch (sp->type) {
2148 case SRB_ELS_CMD_RPT:
2149 case SRB_ELS_CMD_HST:
2150 type = "rpt hst";
2151 break;
2152 case SRB_ELS_CMD_HST_NOLOGIN:
2153 type = "els";
2154 {
2155 struct els_entry_24xx *els = (void *)pkt;
2156 struct qla_bsg_auth_els_request *p =
2157 (struct qla_bsg_auth_els_request *)bsg_job->request;
2158
2159 ql_dbg(ql_dbg_user, vha, 0x700f,
2160 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2161 __func__, sc_to_str(p->e.sub_cmd),
2162 e->d_id[2], e->d_id[1], e->d_id[0],
2163 comp_status, p->e.extra_rx_xchg_address, bsg_job);
2164
2165 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2166 if (sp->remap.remapped) {
2167 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2168 bsg_job->reply_payload.sg_cnt,
2169 sp->remap.rsp.buf,
2170 sp->remap.rsp.len);
2171 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2172 "%s: SG copied %x of %x\n",
2173 __func__, n, sp->remap.rsp.len);
2174 } else {
2175 ql_dbg(ql_dbg_user, vha, 0x700f,
2176 "%s: NOT REMAPPED (error)...!!!\n",
2177 __func__);
2178 }
2179 }
2180 }
2181 break;
2182 case SRB_CT_CMD:
2183 type = "ct pass-through";
2184 break;
2185 case SRB_ELS_DCMD:
2186 type = "Driver ELS logo";
2187 if (iocb_type != ELS_IOCB_TYPE) {
2188 ql_dbg(ql_dbg_user, vha, 0x5047,
2189 "Completing %s: (%p) type=%d.\n",
2190 type, sp, sp->type);
2191 sp->done(sp, 0);
2192 return;
2193 }
2194 break;
2195 case SRB_CT_PTHRU_CMD:
2196 /* borrowing sts_entry_24xx.comp_status.
2197 same location as ct_entry_24xx.comp_status
2198 */
2199 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2200 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2201 sp->name);
2202 sp->done(sp, res);
2203 return;
2204 default:
2205 ql_dbg(ql_dbg_user, vha, 0x503e,
2206 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2207 return;
2208 }
2209
2210 if (iocb_type == ELS_IOCB_TYPE) {
2211 els = &sp->u.iocb_cmd;
2212 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2213 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2214 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2215 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2216 if (comp_status == CS_COMPLETE) {
2217 res = DID_OK << 16;
2218 } else {
2219 if (comp_status == CS_DATA_UNDERRUN) {
2220 res = DID_OK << 16;
2221 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2222 ese->total_byte_count));
2223
2224 if (sp->remap.remapped &&
2225 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2226 ql_dbg(ql_dbg_user, vha, 0x503f,
2227 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2228 __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2229 e->d_id[2], e->d_id[1], e->d_id[0]);
2230 logit = 0;
2231 }
2232
2233 } else if (comp_status == CS_PORT_LOGGED_OUT) {
2234 ql_dbg(ql_dbg_disc, vha, 0x911e,
2235 "%s %d schedule session deletion\n",
2236 __func__, __LINE__);
2237
2238 els->u.els_plogi.len = 0;
2239 res = DID_IMM_RETRY << 16;
2240 qlt_schedule_sess_for_deletion(sp->fcport);
2241 } else {
2242 els->u.els_plogi.len = 0;
2243 res = DID_ERROR << 16;
2244 }
2245
2246 if (logit) {
2247 if (sp->remap.remapped &&
2248 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2249 ql_dbg(ql_dbg_user, vha, 0x503f,
2250 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2251 type, sp->handle, comp_status);
2252
2253 ql_dbg(ql_dbg_user, vha, 0x503f,
2254 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2255 fw_status[1], fw_status[2],
2256 le32_to_cpu(((struct els_sts_entry_24xx *)
2257 pkt)->total_byte_count),
2258 e->s_id[0], e->s_id[2], e->s_id[1],
2259 e->d_id[2], e->d_id[1], e->d_id[0]);
2260 } else {
2261 ql_log(ql_log_info, vha, 0x503f,
2262 "%s IOCB Done hdl=%x comp_status=0x%x\n",
2263 type, sp->handle, comp_status);
2264 ql_log(ql_log_info, vha, 0x503f,
2265 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2266 fw_status[1], fw_status[2],
2267 le32_to_cpu(((struct els_sts_entry_24xx *)
2268 pkt)->total_byte_count),
2269 e->s_id[0], e->s_id[2], e->s_id[1],
2270 e->d_id[2], e->d_id[1], e->d_id[0]);
2271 }
2272 }
2273 }
2274 goto els_ct_done;
2275 }
2276
2277 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2278 * fc payload to the caller
2279 */
2280 bsg_job = sp->u.bsg_job;
2281 bsg_reply = bsg_job->reply;
2282 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2283 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2284
2285 if (comp_status != CS_COMPLETE) {
2286 if (comp_status == CS_DATA_UNDERRUN) {
2287 res = DID_OK << 16;
2288 bsg_reply->reply_payload_rcv_len =
2289 le32_to_cpu(ese->total_byte_count);
2290
2291 ql_dbg(ql_dbg_user, vha, 0x503f,
2292 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2293 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2294 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2295 le32_to_cpu(ese->total_byte_count));
2296 } else {
2297 ql_dbg(ql_dbg_user, vha, 0x5040,
2298 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2299 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2300 type, sp->handle, comp_status,
2301 le32_to_cpu(ese->error_subcode_1),
2302 le32_to_cpu(ese->error_subcode_2));
2303 res = DID_ERROR << 16;
2304 bsg_reply->reply_payload_rcv_len = 0;
2305 }
2306 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2307 fw_status, sizeof(fw_status));
2308 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2309 pkt, sizeof(*pkt));
2310 }
2311 else {
2312 res = DID_OK << 16;
2313 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2314 bsg_job->reply_len = 0;
2315 }
2316 els_ct_done:
2317
2318 sp->done(sp, res);
2319 }
2320
2321 static void
qla24xx_logio_entry(scsi_qla_host_t * vha,struct req_que * req,struct logio_entry_24xx * logio)2322 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2323 struct logio_entry_24xx *logio)
2324 {
2325 const char func[] = "LOGIO-IOCB";
2326 const char *type;
2327 fc_port_t *fcport;
2328 srb_t *sp;
2329 struct srb_iocb *lio;
2330 uint16_t *data;
2331 uint32_t iop[2];
2332 int logit = 1;
2333
2334 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2335 if (!sp)
2336 return;
2337
2338 lio = &sp->u.iocb_cmd;
2339 type = sp->name;
2340 fcport = sp->fcport;
2341 data = lio->u.logio.data;
2342
2343 data[0] = MBS_COMMAND_ERROR;
2344 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2345 QLA_LOGIO_LOGIN_RETRIED : 0;
2346 if (logio->entry_status) {
2347 ql_log(ql_log_warn, fcport->vha, 0x5034,
2348 "Async-%s error entry - %8phC hdl=%x"
2349 "portid=%02x%02x%02x entry-status=%x.\n",
2350 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2351 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2352 logio->entry_status);
2353 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2354 logio, sizeof(*logio));
2355
2356 goto logio_done;
2357 }
2358
2359 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2360 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2361 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2362 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2363 le32_to_cpu(logio->io_parameter[0]));
2364
2365 vha->hw->exch_starvation = 0;
2366 data[0] = MBS_COMMAND_COMPLETE;
2367
2368 if (sp->type == SRB_PRLI_CMD) {
2369 lio->u.logio.iop[0] =
2370 le32_to_cpu(logio->io_parameter[0]);
2371 lio->u.logio.iop[1] =
2372 le32_to_cpu(logio->io_parameter[1]);
2373 goto logio_done;
2374 }
2375
2376 if (sp->type != SRB_LOGIN_CMD)
2377 goto logio_done;
2378
2379 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2380 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2381 fcport->flags |= FCF_FCSP_DEVICE;
2382
2383 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2384 if (iop[0] & BIT_4) {
2385 fcport->port_type = FCT_TARGET;
2386 if (iop[0] & BIT_8)
2387 fcport->flags |= FCF_FCP2_DEVICE;
2388 } else if (iop[0] & BIT_5)
2389 fcport->port_type = FCT_INITIATOR;
2390
2391 if (iop[0] & BIT_7)
2392 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2393
2394 if (logio->io_parameter[7] || logio->io_parameter[8])
2395 fcport->supported_classes |= FC_COS_CLASS2;
2396 if (logio->io_parameter[9] || logio->io_parameter[10])
2397 fcport->supported_classes |= FC_COS_CLASS3;
2398
2399 goto logio_done;
2400 }
2401
2402 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2403 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2404 lio->u.logio.iop[0] = iop[0];
2405 lio->u.logio.iop[1] = iop[1];
2406 switch (iop[0]) {
2407 case LSC_SCODE_PORTID_USED:
2408 data[0] = MBS_PORT_ID_USED;
2409 data[1] = LSW(iop[1]);
2410 logit = 0;
2411 break;
2412 case LSC_SCODE_NPORT_USED:
2413 data[0] = MBS_LOOP_ID_USED;
2414 logit = 0;
2415 break;
2416 case LSC_SCODE_CMD_FAILED:
2417 if (iop[1] == 0x0606) {
2418 /*
2419 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2420 * Target side acked.
2421 */
2422 data[0] = MBS_COMMAND_COMPLETE;
2423 goto logio_done;
2424 }
2425 data[0] = MBS_COMMAND_ERROR;
2426 break;
2427 case LSC_SCODE_NOXCB:
2428 vha->hw->exch_starvation++;
2429 if (vha->hw->exch_starvation > 5) {
2430 ql_log(ql_log_warn, vha, 0xd046,
2431 "Exchange starvation. Resetting RISC\n");
2432
2433 vha->hw->exch_starvation = 0;
2434
2435 if (IS_P3P_TYPE(vha->hw))
2436 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2437 else
2438 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2439 qla2xxx_wake_dpc(vha);
2440 }
2441 fallthrough;
2442 default:
2443 data[0] = MBS_COMMAND_ERROR;
2444 break;
2445 }
2446
2447 if (logit)
2448 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
2449 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2450 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2451 le16_to_cpu(logio->comp_status),
2452 le32_to_cpu(logio->io_parameter[0]),
2453 le32_to_cpu(logio->io_parameter[1]));
2454 else
2455 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
2456 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2457 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2458 le16_to_cpu(logio->comp_status),
2459 le32_to_cpu(logio->io_parameter[0]),
2460 le32_to_cpu(logio->io_parameter[1]));
2461
2462 logio_done:
2463 sp->done(sp, 0);
2464 }
2465
2466 static void
qla24xx_tm_iocb_entry(scsi_qla_host_t * vha,struct req_que * req,void * tsk)2467 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2468 {
2469 const char func[] = "TMF-IOCB";
2470 const char *type;
2471 fc_port_t *fcport;
2472 srb_t *sp;
2473 struct srb_iocb *iocb;
2474 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2475 u16 comp_status;
2476
2477 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2478 if (!sp)
2479 return;
2480
2481 comp_status = le16_to_cpu(sts->comp_status);
2482 iocb = &sp->u.iocb_cmd;
2483 type = sp->name;
2484 fcport = sp->fcport;
2485 iocb->u.tmf.data = QLA_SUCCESS;
2486
2487 if (sts->entry_status) {
2488 ql_log(ql_log_warn, fcport->vha, 0x5038,
2489 "Async-%s error - hdl=%x entry-status(%x).\n",
2490 type, sp->handle, sts->entry_status);
2491 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2492 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2493 ql_log(ql_log_warn, fcport->vha, 0x5039,
2494 "Async-%s error - hdl=%x completion status(%x).\n",
2495 type, sp->handle, comp_status);
2496 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2497 } else if ((le16_to_cpu(sts->scsi_status) &
2498 SS_RESPONSE_INFO_LEN_VALID)) {
2499 host_to_fcp_swap(sts->data, sizeof(sts->data));
2500 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2501 ql_log(ql_log_warn, fcport->vha, 0x503b,
2502 "Async-%s error - hdl=%x not enough response(%d).\n",
2503 type, sp->handle, sts->rsp_data_len);
2504 } else if (sts->data[3]) {
2505 ql_log(ql_log_warn, fcport->vha, 0x503c,
2506 "Async-%s error - hdl=%x response(%x).\n",
2507 type, sp->handle, sts->data[3]);
2508 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2509 }
2510 }
2511
2512 switch (comp_status) {
2513 case CS_PORT_LOGGED_OUT:
2514 case CS_PORT_CONFIG_CHG:
2515 case CS_PORT_BUSY:
2516 case CS_INCOMPLETE:
2517 case CS_PORT_UNAVAILABLE:
2518 case CS_TIMEOUT:
2519 case CS_RESET:
2520 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2521 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2522 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2523 fcport->d_id.b.domain, fcport->d_id.b.area,
2524 fcport->d_id.b.al_pa,
2525 port_state_str[FCS_ONLINE],
2526 comp_status);
2527
2528 qlt_schedule_sess_for_deletion(fcport);
2529 }
2530 break;
2531
2532 default:
2533 break;
2534 }
2535
2536 if (iocb->u.tmf.data != QLA_SUCCESS)
2537 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2538 sts, sizeof(*sts));
2539
2540 sp->done(sp, 0);
2541 }
2542
qla24xx_nvme_iocb_entry(scsi_qla_host_t * vha,struct req_que * req,void * tsk,srb_t * sp)2543 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2544 void *tsk, srb_t *sp)
2545 {
2546 fc_port_t *fcport;
2547 struct srb_iocb *iocb;
2548 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2549 uint16_t state_flags;
2550 struct nvmefc_fcp_req *fd;
2551 uint16_t ret = QLA_SUCCESS;
2552 __le16 comp_status = sts->comp_status;
2553 int logit = 0;
2554
2555 iocb = &sp->u.iocb_cmd;
2556 fcport = sp->fcport;
2557 iocb->u.nvme.comp_status = comp_status;
2558 state_flags = le16_to_cpu(sts->state_flags);
2559 fd = iocb->u.nvme.desc;
2560
2561 if (unlikely(iocb->u.nvme.aen_op))
2562 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2563 else
2564 sp->qpair->cmd_completion_cnt++;
2565
2566 if (unlikely(comp_status != CS_COMPLETE))
2567 logit = 1;
2568
2569 fd->transferred_length = fd->payload_length -
2570 le32_to_cpu(sts->residual_len);
2571
2572 /*
2573 * State flags: Bit 6 and 0.
2574 * If 0 is set, we don't care about 6.
2575 * both cases resp was dma'd to host buffer
2576 * if both are 0, that is good path case.
2577 * if six is set and 0 is clear, we need to
2578 * copy resp data from status iocb to resp buffer.
2579 */
2580 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2581 iocb->u.nvme.rsp_pyld_len = 0;
2582 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2583 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2584 /* Response already DMA'd to fd->rspaddr. */
2585 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2586 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2587 /*
2588 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2589 * as an error.
2590 */
2591 iocb->u.nvme.rsp_pyld_len = 0;
2592 fd->transferred_length = 0;
2593 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2594 "Unexpected values in NVMe_RSP IU.\n");
2595 logit = 1;
2596 } else if (state_flags & SF_NVME_ERSP) {
2597 uint32_t *inbuf, *outbuf;
2598 uint16_t iter;
2599
2600 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2601 outbuf = (uint32_t *)fd->rspaddr;
2602 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2603 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2604 sizeof(struct nvme_fc_ersp_iu))) {
2605 if (ql_mask_match(ql_dbg_io)) {
2606 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2607 iocb->u.nvme.rsp_pyld_len);
2608 ql_log(ql_log_warn, fcport->vha, 0x5100,
2609 "Unexpected response payload length %u.\n",
2610 iocb->u.nvme.rsp_pyld_len);
2611 }
2612 iocb->u.nvme.rsp_pyld_len =
2613 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2614 }
2615 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2616 for (; iter; iter--)
2617 *outbuf++ = swab32(*inbuf++);
2618 }
2619
2620 if (state_flags & SF_NVME_ERSP) {
2621 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2622 u32 tgt_xfer_len;
2623
2624 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2625 if (fd->transferred_length != tgt_xfer_len) {
2626 ql_log(ql_log_warn, fcport->vha, 0x3079,
2627 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2628 tgt_xfer_len, fd->transferred_length);
2629 logit = 1;
2630 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2631 /*
2632 * Do not log if this is just an underflow and there
2633 * is no data loss.
2634 */
2635 logit = 0;
2636 }
2637 }
2638
2639 if (unlikely(logit))
2640 ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
2641 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2642 sp->name, sp->handle, comp_status,
2643 fd->transferred_length, le32_to_cpu(sts->residual_len),
2644 sts->ox_id);
2645
2646 /*
2647 * If transport error then Failure (HBA rejects request)
2648 * otherwise transport will handle.
2649 */
2650 switch (le16_to_cpu(comp_status)) {
2651 case CS_COMPLETE:
2652 break;
2653
2654 case CS_RESET:
2655 case CS_PORT_UNAVAILABLE:
2656 case CS_PORT_LOGGED_OUT:
2657 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2658 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2659 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2660 "Port to be marked lost on fcport=%06x, current "
2661 "port state= %s comp_status %x.\n",
2662 fcport->d_id.b24, port_state_str[FCS_ONLINE],
2663 comp_status);
2664
2665 qlt_schedule_sess_for_deletion(fcport);
2666 }
2667 fallthrough;
2668 case CS_ABORTED:
2669 case CS_PORT_BUSY:
2670 fd->transferred_length = 0;
2671 iocb->u.nvme.rsp_pyld_len = 0;
2672 ret = QLA_ABORTED;
2673 break;
2674 case CS_DATA_UNDERRUN:
2675 break;
2676 default:
2677 ret = QLA_FUNCTION_FAILED;
2678 break;
2679 }
2680 sp->done(sp, ret);
2681 }
2682
qla_ctrlvp_completed(scsi_qla_host_t * vha,struct req_que * req,struct vp_ctrl_entry_24xx * vce)2683 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2684 struct vp_ctrl_entry_24xx *vce)
2685 {
2686 const char func[] = "CTRLVP-IOCB";
2687 srb_t *sp;
2688 int rval = QLA_SUCCESS;
2689
2690 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2691 if (!sp)
2692 return;
2693
2694 if (vce->entry_status != 0) {
2695 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2696 "%s: Failed to complete IOCB -- error status (%x)\n",
2697 sp->name, vce->entry_status);
2698 rval = QLA_FUNCTION_FAILED;
2699 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2700 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2701 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2702 sp->name, le16_to_cpu(vce->comp_status),
2703 le16_to_cpu(vce->vp_idx_failed));
2704 rval = QLA_FUNCTION_FAILED;
2705 } else {
2706 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2707 "Done %s.\n", __func__);
2708 }
2709
2710 sp->rc = rval;
2711 sp->done(sp, rval);
2712 }
2713
2714 /* Process a single response queue entry. */
qla2x00_process_response_entry(struct scsi_qla_host * vha,struct rsp_que * rsp,sts_entry_t * pkt)2715 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2716 struct rsp_que *rsp,
2717 sts_entry_t *pkt)
2718 {
2719 sts21_entry_t *sts21_entry;
2720 sts22_entry_t *sts22_entry;
2721 uint16_t handle_cnt;
2722 uint16_t cnt;
2723
2724 switch (pkt->entry_type) {
2725 case STATUS_TYPE:
2726 qla2x00_status_entry(vha, rsp, pkt);
2727 break;
2728 case STATUS_TYPE_21:
2729 sts21_entry = (sts21_entry_t *)pkt;
2730 handle_cnt = sts21_entry->handle_count;
2731 for (cnt = 0; cnt < handle_cnt; cnt++)
2732 qla2x00_process_completed_request(vha, rsp->req,
2733 sts21_entry->handle[cnt]);
2734 break;
2735 case STATUS_TYPE_22:
2736 sts22_entry = (sts22_entry_t *)pkt;
2737 handle_cnt = sts22_entry->handle_count;
2738 for (cnt = 0; cnt < handle_cnt; cnt++)
2739 qla2x00_process_completed_request(vha, rsp->req,
2740 sts22_entry->handle[cnt]);
2741 break;
2742 case STATUS_CONT_TYPE:
2743 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2744 break;
2745 case MBX_IOCB_TYPE:
2746 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2747 break;
2748 case CT_IOCB_TYPE:
2749 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2750 break;
2751 default:
2752 /* Type Not Supported. */
2753 ql_log(ql_log_warn, vha, 0x504a,
2754 "Received unknown response pkt type %x entry status=%x.\n",
2755 pkt->entry_type, pkt->entry_status);
2756 break;
2757 }
2758 }
2759
2760 /**
2761 * qla2x00_process_response_queue() - Process response queue entries.
2762 * @rsp: response queue
2763 */
2764 void
qla2x00_process_response_queue(struct rsp_que * rsp)2765 qla2x00_process_response_queue(struct rsp_que *rsp)
2766 {
2767 struct scsi_qla_host *vha;
2768 struct qla_hw_data *ha = rsp->hw;
2769 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2770 sts_entry_t *pkt;
2771
2772 vha = pci_get_drvdata(ha->pdev);
2773
2774 if (!vha->flags.online)
2775 return;
2776
2777 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2778 pkt = (sts_entry_t *)rsp->ring_ptr;
2779
2780 rsp->ring_index++;
2781 if (rsp->ring_index == rsp->length) {
2782 rsp->ring_index = 0;
2783 rsp->ring_ptr = rsp->ring;
2784 } else {
2785 rsp->ring_ptr++;
2786 }
2787
2788 if (pkt->entry_status != 0) {
2789 qla2x00_error_entry(vha, rsp, pkt);
2790 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2791 wmb();
2792 continue;
2793 }
2794
2795 qla2x00_process_response_entry(vha, rsp, pkt);
2796 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2797 wmb();
2798 }
2799
2800 /* Adjust ring index */
2801 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2802 }
2803
2804 static inline void
qla2x00_handle_sense(srb_t * sp,uint8_t * sense_data,uint32_t par_sense_len,uint32_t sense_len,struct rsp_que * rsp,int res)2805 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2806 uint32_t sense_len, struct rsp_que *rsp, int res)
2807 {
2808 struct scsi_qla_host *vha = sp->vha;
2809 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2810 uint32_t track_sense_len;
2811
2812 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2813 sense_len = SCSI_SENSE_BUFFERSIZE;
2814
2815 SET_CMD_SENSE_LEN(sp, sense_len);
2816 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2817 track_sense_len = sense_len;
2818
2819 if (sense_len > par_sense_len)
2820 sense_len = par_sense_len;
2821
2822 memcpy(cp->sense_buffer, sense_data, sense_len);
2823
2824 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2825 track_sense_len -= sense_len;
2826 SET_CMD_SENSE_LEN(sp, track_sense_len);
2827
2828 if (track_sense_len != 0) {
2829 rsp->status_srb = sp;
2830 cp->result = res;
2831 }
2832
2833 if (sense_len) {
2834 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2835 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2836 sp->vha->host_no, cp->device->id, cp->device->lun,
2837 cp);
2838 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2839 cp->sense_buffer, sense_len);
2840 }
2841 }
2842
2843 struct scsi_dif_tuple {
2844 __be16 guard; /* Checksum */
2845 __be16 app_tag; /* APPL identifier */
2846 __be32 ref_tag; /* Target LBA or indirect LBA */
2847 };
2848
2849 /*
2850 * Checks the guard or meta-data for the type of error
2851 * detected by the HBA. In case of errors, we set the
2852 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2853 * to indicate to the kernel that the HBA detected error.
2854 */
2855 static inline int
qla2x00_handle_dif_error(srb_t * sp,struct sts_entry_24xx * sts24)2856 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2857 {
2858 struct scsi_qla_host *vha = sp->vha;
2859 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2860 uint8_t *ap = &sts24->data[12];
2861 uint8_t *ep = &sts24->data[20];
2862 uint32_t e_ref_tag, a_ref_tag;
2863 uint16_t e_app_tag, a_app_tag;
2864 uint16_t e_guard, a_guard;
2865
2866 /*
2867 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2868 * would make guard field appear at offset 2
2869 */
2870 a_guard = get_unaligned_le16(ap + 2);
2871 a_app_tag = get_unaligned_le16(ap + 0);
2872 a_ref_tag = get_unaligned_le32(ap + 4);
2873 e_guard = get_unaligned_le16(ep + 2);
2874 e_app_tag = get_unaligned_le16(ep + 0);
2875 e_ref_tag = get_unaligned_le32(ep + 4);
2876
2877 ql_dbg(ql_dbg_io, vha, 0x3023,
2878 "iocb(s) %p Returned STATUS.\n", sts24);
2879
2880 ql_dbg(ql_dbg_io, vha, 0x3024,
2881 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2882 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2883 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2884 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2885 a_app_tag, e_app_tag, a_guard, e_guard);
2886
2887 /*
2888 * Ignore sector if:
2889 * For type 3: ref & app tag is all 'f's
2890 * For type 0,1,2: app tag is all 'f's
2891 */
2892 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2893 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2894 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2895 uint32_t blocks_done, resid;
2896 sector_t lba_s = scsi_get_lba(cmd);
2897
2898 /* 2TB boundary case covered automatically with this */
2899 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2900
2901 resid = scsi_bufflen(cmd) - (blocks_done *
2902 cmd->device->sector_size);
2903
2904 scsi_set_resid(cmd, resid);
2905 cmd->result = DID_OK << 16;
2906
2907 /* Update protection tag */
2908 if (scsi_prot_sg_count(cmd)) {
2909 uint32_t i, j = 0, k = 0, num_ent;
2910 struct scatterlist *sg;
2911 struct t10_pi_tuple *spt;
2912
2913 /* Patch the corresponding protection tags */
2914 scsi_for_each_prot_sg(cmd, sg,
2915 scsi_prot_sg_count(cmd), i) {
2916 num_ent = sg_dma_len(sg) / 8;
2917 if (k + num_ent < blocks_done) {
2918 k += num_ent;
2919 continue;
2920 }
2921 j = blocks_done - k - 1;
2922 k = blocks_done;
2923 break;
2924 }
2925
2926 if (k != blocks_done) {
2927 ql_log(ql_log_warn, vha, 0x302f,
2928 "unexpected tag values tag:lba=%x:%llx)\n",
2929 e_ref_tag, (unsigned long long)lba_s);
2930 return 1;
2931 }
2932
2933 spt = page_address(sg_page(sg)) + sg->offset;
2934 spt += j;
2935
2936 spt->app_tag = T10_PI_APP_ESCAPE;
2937 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2938 spt->ref_tag = T10_PI_REF_ESCAPE;
2939 }
2940
2941 return 0;
2942 }
2943
2944 /* check guard */
2945 if (e_guard != a_guard) {
2946 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2947 set_host_byte(cmd, DID_ABORT);
2948 return 1;
2949 }
2950
2951 /* check ref tag */
2952 if (e_ref_tag != a_ref_tag) {
2953 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2954 set_host_byte(cmd, DID_ABORT);
2955 return 1;
2956 }
2957
2958 /* check appl tag */
2959 if (e_app_tag != a_app_tag) {
2960 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2961 set_host_byte(cmd, DID_ABORT);
2962 return 1;
2963 }
2964
2965 return 1;
2966 }
2967
2968 static void
qla25xx_process_bidir_status_iocb(scsi_qla_host_t * vha,void * pkt,struct req_que * req,uint32_t index)2969 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2970 struct req_que *req, uint32_t index)
2971 {
2972 struct qla_hw_data *ha = vha->hw;
2973 srb_t *sp;
2974 uint16_t comp_status;
2975 uint16_t scsi_status;
2976 uint16_t thread_id;
2977 uint32_t rval = EXT_STATUS_OK;
2978 struct bsg_job *bsg_job = NULL;
2979 struct fc_bsg_request *bsg_request;
2980 struct fc_bsg_reply *bsg_reply;
2981 sts_entry_t *sts = pkt;
2982 struct sts_entry_24xx *sts24 = pkt;
2983
2984 /* Validate handle. */
2985 if (index >= req->num_outstanding_cmds) {
2986 ql_log(ql_log_warn, vha, 0x70af,
2987 "Invalid SCSI completion handle 0x%x.\n", index);
2988 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2989 return;
2990 }
2991
2992 sp = req->outstanding_cmds[index];
2993 if (!sp) {
2994 ql_log(ql_log_warn, vha, 0x70b0,
2995 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2996 req->id, index);
2997
2998 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2999 return;
3000 }
3001
3002 /* Free outstanding command slot. */
3003 req->outstanding_cmds[index] = NULL;
3004 bsg_job = sp->u.bsg_job;
3005 bsg_request = bsg_job->request;
3006 bsg_reply = bsg_job->reply;
3007
3008 if (IS_FWI2_CAPABLE(ha)) {
3009 comp_status = le16_to_cpu(sts24->comp_status);
3010 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3011 } else {
3012 comp_status = le16_to_cpu(sts->comp_status);
3013 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3014 }
3015
3016 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3017 switch (comp_status) {
3018 case CS_COMPLETE:
3019 if (scsi_status == 0) {
3020 bsg_reply->reply_payload_rcv_len =
3021 bsg_job->reply_payload.payload_len;
3022 vha->qla_stats.input_bytes +=
3023 bsg_reply->reply_payload_rcv_len;
3024 vha->qla_stats.input_requests++;
3025 rval = EXT_STATUS_OK;
3026 }
3027 goto done;
3028
3029 case CS_DATA_OVERRUN:
3030 ql_dbg(ql_dbg_user, vha, 0x70b1,
3031 "Command completed with data overrun thread_id=%d\n",
3032 thread_id);
3033 rval = EXT_STATUS_DATA_OVERRUN;
3034 break;
3035
3036 case CS_DATA_UNDERRUN:
3037 ql_dbg(ql_dbg_user, vha, 0x70b2,
3038 "Command completed with data underrun thread_id=%d\n",
3039 thread_id);
3040 rval = EXT_STATUS_DATA_UNDERRUN;
3041 break;
3042 case CS_BIDIR_RD_OVERRUN:
3043 ql_dbg(ql_dbg_user, vha, 0x70b3,
3044 "Command completed with read data overrun thread_id=%d\n",
3045 thread_id);
3046 rval = EXT_STATUS_DATA_OVERRUN;
3047 break;
3048
3049 case CS_BIDIR_RD_WR_OVERRUN:
3050 ql_dbg(ql_dbg_user, vha, 0x70b4,
3051 "Command completed with read and write data overrun "
3052 "thread_id=%d\n", thread_id);
3053 rval = EXT_STATUS_DATA_OVERRUN;
3054 break;
3055
3056 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3057 ql_dbg(ql_dbg_user, vha, 0x70b5,
3058 "Command completed with read data over and write data "
3059 "underrun thread_id=%d\n", thread_id);
3060 rval = EXT_STATUS_DATA_OVERRUN;
3061 break;
3062
3063 case CS_BIDIR_RD_UNDERRUN:
3064 ql_dbg(ql_dbg_user, vha, 0x70b6,
3065 "Command completed with read data underrun "
3066 "thread_id=%d\n", thread_id);
3067 rval = EXT_STATUS_DATA_UNDERRUN;
3068 break;
3069
3070 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3071 ql_dbg(ql_dbg_user, vha, 0x70b7,
3072 "Command completed with read data under and write data "
3073 "overrun thread_id=%d\n", thread_id);
3074 rval = EXT_STATUS_DATA_UNDERRUN;
3075 break;
3076
3077 case CS_BIDIR_RD_WR_UNDERRUN:
3078 ql_dbg(ql_dbg_user, vha, 0x70b8,
3079 "Command completed with read and write data underrun "
3080 "thread_id=%d\n", thread_id);
3081 rval = EXT_STATUS_DATA_UNDERRUN;
3082 break;
3083
3084 case CS_BIDIR_DMA:
3085 ql_dbg(ql_dbg_user, vha, 0x70b9,
3086 "Command completed with data DMA error thread_id=%d\n",
3087 thread_id);
3088 rval = EXT_STATUS_DMA_ERR;
3089 break;
3090
3091 case CS_TIMEOUT:
3092 ql_dbg(ql_dbg_user, vha, 0x70ba,
3093 "Command completed with timeout thread_id=%d\n",
3094 thread_id);
3095 rval = EXT_STATUS_TIMEOUT;
3096 break;
3097 default:
3098 ql_dbg(ql_dbg_user, vha, 0x70bb,
3099 "Command completed with completion status=0x%x "
3100 "thread_id=%d\n", comp_status, thread_id);
3101 rval = EXT_STATUS_ERR;
3102 break;
3103 }
3104 bsg_reply->reply_payload_rcv_len = 0;
3105
3106 done:
3107 /* Return the vendor specific reply to API */
3108 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3109 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3110 /* Always return DID_OK, bsg will send the vendor specific response
3111 * in this case only */
3112 sp->done(sp, DID_OK << 16);
3113
3114 }
3115
3116 /**
3117 * qla2x00_status_entry() - Process a Status IOCB entry.
3118 * @vha: SCSI driver HA context
3119 * @rsp: response queue
3120 * @pkt: Entry pointer
3121 */
3122 static void
qla2x00_status_entry(scsi_qla_host_t * vha,struct rsp_que * rsp,void * pkt)3123 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3124 {
3125 srb_t *sp;
3126 fc_port_t *fcport;
3127 struct scsi_cmnd *cp;
3128 sts_entry_t *sts = pkt;
3129 struct sts_entry_24xx *sts24 = pkt;
3130 uint16_t comp_status;
3131 uint16_t scsi_status;
3132 uint16_t ox_id;
3133 uint8_t lscsi_status;
3134 int32_t resid;
3135 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3136 fw_resid_len;
3137 uint8_t *rsp_info, *sense_data;
3138 struct qla_hw_data *ha = vha->hw;
3139 uint32_t handle;
3140 uint16_t que;
3141 struct req_que *req;
3142 int logit = 1;
3143 int res = 0;
3144 uint16_t state_flags = 0;
3145 uint16_t sts_qual = 0;
3146
3147 if (IS_FWI2_CAPABLE(ha)) {
3148 comp_status = le16_to_cpu(sts24->comp_status);
3149 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3150 state_flags = le16_to_cpu(sts24->state_flags);
3151 } else {
3152 comp_status = le16_to_cpu(sts->comp_status);
3153 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3154 }
3155 handle = (uint32_t) LSW(sts->handle);
3156 que = MSW(sts->handle);
3157 req = ha->req_q_map[que];
3158
3159 /* Check for invalid queue pointer */
3160 if (req == NULL ||
3161 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3162 ql_dbg(ql_dbg_io, vha, 0x3059,
3163 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3164 "que=%u.\n", sts->handle, req, que);
3165 return;
3166 }
3167
3168 /* Validate handle. */
3169 if (handle < req->num_outstanding_cmds) {
3170 sp = req->outstanding_cmds[handle];
3171 if (!sp) {
3172 ql_dbg(ql_dbg_io, vha, 0x3075,
3173 "%s(%ld): Already returned command for status handle (0x%x).\n",
3174 __func__, vha->host_no, sts->handle);
3175 return;
3176 }
3177 } else {
3178 ql_dbg(ql_dbg_io, vha, 0x3017,
3179 "Invalid status handle, out of range (0x%x).\n",
3180 sts->handle);
3181
3182 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3183 if (IS_P3P_TYPE(ha))
3184 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3185 else
3186 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3187 qla2xxx_wake_dpc(vha);
3188 }
3189 return;
3190 }
3191 qla_put_iocbs(sp->qpair, &sp->iores);
3192
3193 if (sp->cmd_type != TYPE_SRB) {
3194 req->outstanding_cmds[handle] = NULL;
3195 ql_dbg(ql_dbg_io, vha, 0x3015,
3196 "Unknown sp->cmd_type %x %p).\n",
3197 sp->cmd_type, sp);
3198 return;
3199 }
3200
3201 /* NVME completion. */
3202 if (sp->type == SRB_NVME_CMD) {
3203 req->outstanding_cmds[handle] = NULL;
3204 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3205 return;
3206 }
3207
3208 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3209 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
3210 return;
3211 }
3212
3213 /* Task Management completion. */
3214 if (sp->type == SRB_TM_CMD) {
3215 qla24xx_tm_iocb_entry(vha, req, pkt);
3216 return;
3217 }
3218
3219 /* Fast path completion. */
3220 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3221 sp->qpair->cmd_completion_cnt++;
3222
3223 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3224 qla2x00_process_completed_request(vha, req, handle);
3225
3226 return;
3227 }
3228
3229 req->outstanding_cmds[handle] = NULL;
3230 cp = GET_CMD_SP(sp);
3231 if (cp == NULL) {
3232 ql_dbg(ql_dbg_io, vha, 0x3018,
3233 "Command already returned (0x%x/%p).\n",
3234 sts->handle, sp);
3235
3236 return;
3237 }
3238
3239 lscsi_status = scsi_status & STATUS_MASK;
3240
3241 fcport = sp->fcport;
3242
3243 ox_id = 0;
3244 sense_len = par_sense_len = rsp_info_len = resid_len =
3245 fw_resid_len = 0;
3246 if (IS_FWI2_CAPABLE(ha)) {
3247 if (scsi_status & SS_SENSE_LEN_VALID)
3248 sense_len = le32_to_cpu(sts24->sense_len);
3249 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3250 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3251 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3252 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3253 if (comp_status == CS_DATA_UNDERRUN)
3254 fw_resid_len = le32_to_cpu(sts24->residual_len);
3255 rsp_info = sts24->data;
3256 sense_data = sts24->data;
3257 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3258 ox_id = le16_to_cpu(sts24->ox_id);
3259 par_sense_len = sizeof(sts24->data);
3260 sts_qual = le16_to_cpu(sts24->status_qualifier);
3261 } else {
3262 if (scsi_status & SS_SENSE_LEN_VALID)
3263 sense_len = le16_to_cpu(sts->req_sense_length);
3264 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3265 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3266 resid_len = le32_to_cpu(sts->residual_length);
3267 rsp_info = sts->rsp_info;
3268 sense_data = sts->req_sense_data;
3269 par_sense_len = sizeof(sts->req_sense_data);
3270 }
3271
3272 /* Check for any FCP transport errors. */
3273 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3274 /* Sense data lies beyond any FCP RESPONSE data. */
3275 if (IS_FWI2_CAPABLE(ha)) {
3276 sense_data += rsp_info_len;
3277 par_sense_len -= rsp_info_len;
3278 }
3279 if (rsp_info_len > 3 && rsp_info[3]) {
3280 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3281 "FCP I/O protocol failure (0x%x/0x%x).\n",
3282 rsp_info_len, rsp_info[3]);
3283
3284 res = DID_BUS_BUSY << 16;
3285 goto out;
3286 }
3287 }
3288
3289 /* Check for overrun. */
3290 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3291 scsi_status & SS_RESIDUAL_OVER)
3292 comp_status = CS_DATA_OVERRUN;
3293
3294 /*
3295 * Check retry_delay_timer value if we receive a busy or
3296 * queue full.
3297 */
3298 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3299 lscsi_status == SAM_STAT_BUSY))
3300 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3301
3302 /*
3303 * Based on Host and scsi status generate status code for Linux
3304 */
3305 switch (comp_status) {
3306 case CS_COMPLETE:
3307 case CS_QUEUE_FULL:
3308 if (scsi_status == 0) {
3309 res = DID_OK << 16;
3310 break;
3311 }
3312 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3313 resid = resid_len;
3314 scsi_set_resid(cp, resid);
3315
3316 if (!lscsi_status &&
3317 ((unsigned)(scsi_bufflen(cp) - resid) <
3318 cp->underflow)) {
3319 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3320 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3321 resid, scsi_bufflen(cp));
3322
3323 res = DID_ERROR << 16;
3324 break;
3325 }
3326 }
3327 res = DID_OK << 16 | lscsi_status;
3328
3329 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3330 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3331 "QUEUE FULL detected.\n");
3332 break;
3333 }
3334 logit = 0;
3335 if (lscsi_status != SS_CHECK_CONDITION)
3336 break;
3337
3338 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3339 if (!(scsi_status & SS_SENSE_LEN_VALID))
3340 break;
3341
3342 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3343 rsp, res);
3344 break;
3345
3346 case CS_DATA_UNDERRUN:
3347 /* Use F/W calculated residual length. */
3348 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3349 scsi_set_resid(cp, resid);
3350 if (scsi_status & SS_RESIDUAL_UNDER) {
3351 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3352 ql_log(ql_log_warn, fcport->vha, 0x301d,
3353 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3354 resid, scsi_bufflen(cp));
3355
3356 vha->interface_err_cnt++;
3357
3358 res = DID_ERROR << 16 | lscsi_status;
3359 goto check_scsi_status;
3360 }
3361
3362 if (!lscsi_status &&
3363 ((unsigned)(scsi_bufflen(cp) - resid) <
3364 cp->underflow)) {
3365 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3366 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3367 resid, scsi_bufflen(cp));
3368
3369 res = DID_ERROR << 16;
3370 break;
3371 }
3372 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3373 lscsi_status != SAM_STAT_BUSY) {
3374 /*
3375 * scsi status of task set and busy are considered to be
3376 * task not completed.
3377 */
3378
3379 ql_log(ql_log_warn, fcport->vha, 0x301f,
3380 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3381 resid, scsi_bufflen(cp));
3382
3383 vha->interface_err_cnt++;
3384
3385 res = DID_ERROR << 16 | lscsi_status;
3386 goto check_scsi_status;
3387 } else {
3388 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3389 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3390 scsi_status, lscsi_status);
3391 }
3392
3393 res = DID_OK << 16 | lscsi_status;
3394 logit = 0;
3395
3396 check_scsi_status:
3397 /*
3398 * Check to see if SCSI Status is non zero. If so report SCSI
3399 * Status.
3400 */
3401 if (lscsi_status != 0) {
3402 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3403 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3404 "QUEUE FULL detected.\n");
3405 logit = 1;
3406 break;
3407 }
3408 if (lscsi_status != SS_CHECK_CONDITION)
3409 break;
3410
3411 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3412 if (!(scsi_status & SS_SENSE_LEN_VALID))
3413 break;
3414
3415 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3416 sense_len, rsp, res);
3417 }
3418 break;
3419
3420 case CS_PORT_LOGGED_OUT:
3421 case CS_PORT_CONFIG_CHG:
3422 case CS_PORT_BUSY:
3423 case CS_INCOMPLETE:
3424 case CS_PORT_UNAVAILABLE:
3425 case CS_TIMEOUT:
3426 case CS_RESET:
3427 case CS_EDIF_INV_REQ:
3428
3429 /*
3430 * We are going to have the fc class block the rport
3431 * while we try to recover so instruct the mid layer
3432 * to requeue until the class decides how to handle this.
3433 */
3434 res = DID_TRANSPORT_DISRUPTED << 16;
3435
3436 if (comp_status == CS_TIMEOUT) {
3437 if (IS_FWI2_CAPABLE(ha))
3438 break;
3439 else if ((le16_to_cpu(sts->status_flags) &
3440 SF_LOGOUT_SENT) == 0)
3441 break;
3442 }
3443
3444 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3445 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3446 "Port to be marked lost on fcport=%02x%02x%02x, current "
3447 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3448 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3449 port_state_str[FCS_ONLINE],
3450 comp_status);
3451
3452 qlt_schedule_sess_for_deletion(fcport);
3453 }
3454
3455 break;
3456
3457 case CS_ABORTED:
3458 res = DID_RESET << 16;
3459 break;
3460
3461 case CS_DIF_ERROR:
3462 logit = qla2x00_handle_dif_error(sp, sts24);
3463 res = cp->result;
3464 break;
3465
3466 case CS_TRANSPORT:
3467 res = DID_ERROR << 16;
3468 vha->hw_err_cnt++;
3469
3470 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3471 break;
3472
3473 if (state_flags & BIT_4)
3474 scmd_printk(KERN_WARNING, cp,
3475 "Unsupported device '%s' found.\n",
3476 cp->device->vendor);
3477 break;
3478
3479 case CS_DMA:
3480 ql_log(ql_log_info, fcport->vha, 0x3022,
3481 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3482 comp_status, scsi_status, res, vha->host_no,
3483 cp->device->id, cp->device->lun, fcport->d_id.b24,
3484 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3485 resid_len, fw_resid_len, sp, cp);
3486 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3487 pkt, sizeof(*sts24));
3488 res = DID_ERROR << 16;
3489 vha->hw_err_cnt++;
3490 break;
3491 default:
3492 res = DID_ERROR << 16;
3493 break;
3494 }
3495
3496 out:
3497 if (logit)
3498 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
3499 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3500 comp_status, scsi_status, res, vha->host_no,
3501 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3502 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3503 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3504 resid_len, fw_resid_len, sp, cp);
3505
3506 if (rsp->status_srb == NULL)
3507 sp->done(sp, res);
3508 }
3509
3510 /**
3511 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3512 * @rsp: response queue
3513 * @pkt: Entry pointer
3514 *
3515 * Extended sense data.
3516 */
3517 static void
qla2x00_status_cont_entry(struct rsp_que * rsp,sts_cont_entry_t * pkt)3518 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3519 {
3520 uint8_t sense_sz = 0;
3521 struct qla_hw_data *ha = rsp->hw;
3522 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3523 srb_t *sp = rsp->status_srb;
3524 struct scsi_cmnd *cp;
3525 uint32_t sense_len;
3526 uint8_t *sense_ptr;
3527
3528 if (!sp || !GET_CMD_SENSE_LEN(sp))
3529 return;
3530
3531 sense_len = GET_CMD_SENSE_LEN(sp);
3532 sense_ptr = GET_CMD_SENSE_PTR(sp);
3533
3534 cp = GET_CMD_SP(sp);
3535 if (cp == NULL) {
3536 ql_log(ql_log_warn, vha, 0x3025,
3537 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3538
3539 rsp->status_srb = NULL;
3540 return;
3541 }
3542
3543 if (sense_len > sizeof(pkt->data))
3544 sense_sz = sizeof(pkt->data);
3545 else
3546 sense_sz = sense_len;
3547
3548 /* Move sense data. */
3549 if (IS_FWI2_CAPABLE(ha))
3550 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3551 memcpy(sense_ptr, pkt->data, sense_sz);
3552 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3553 sense_ptr, sense_sz);
3554
3555 sense_len -= sense_sz;
3556 sense_ptr += sense_sz;
3557
3558 SET_CMD_SENSE_PTR(sp, sense_ptr);
3559 SET_CMD_SENSE_LEN(sp, sense_len);
3560
3561 /* Place command on done queue. */
3562 if (sense_len == 0) {
3563 rsp->status_srb = NULL;
3564 sp->done(sp, cp->result);
3565 }
3566 }
3567
3568 /**
3569 * qla2x00_error_entry() - Process an error entry.
3570 * @vha: SCSI driver HA context
3571 * @rsp: response queue
3572 * @pkt: Entry pointer
3573 * return : 1=allow further error analysis. 0=no additional error analysis.
3574 */
3575 static int
qla2x00_error_entry(scsi_qla_host_t * vha,struct rsp_que * rsp,sts_entry_t * pkt)3576 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3577 {
3578 srb_t *sp;
3579 struct qla_hw_data *ha = vha->hw;
3580 const char func[] = "ERROR-IOCB";
3581 uint16_t que = MSW(pkt->handle);
3582 struct req_que *req = NULL;
3583 int res = DID_ERROR << 16;
3584
3585 ql_dbg(ql_dbg_async, vha, 0x502a,
3586 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3587 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3588
3589 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3590 goto fatal;
3591
3592 req = ha->req_q_map[que];
3593
3594 if (pkt->entry_status & RF_BUSY)
3595 res = DID_BUS_BUSY << 16;
3596
3597 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3598 return 0;
3599
3600 switch (pkt->entry_type) {
3601 case NOTIFY_ACK_TYPE:
3602 case STATUS_TYPE:
3603 case STATUS_CONT_TYPE:
3604 case LOGINOUT_PORT_IOCB_TYPE:
3605 case CT_IOCB_TYPE:
3606 case ELS_IOCB_TYPE:
3607 case ABORT_IOCB_TYPE:
3608 case MBX_IOCB_TYPE:
3609 default:
3610 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3611 if (sp) {
3612 qla_put_iocbs(sp->qpair, &sp->iores);
3613 sp->done(sp, res);
3614 return 0;
3615 }
3616 break;
3617
3618 case SA_UPDATE_IOCB_TYPE:
3619 case ABTS_RESP_24XX:
3620 case CTIO_TYPE7:
3621 case CTIO_CRC2:
3622 return 1;
3623 }
3624 fatal:
3625 ql_log(ql_log_warn, vha, 0x5030,
3626 "Error entry - invalid handle/queue (%04x).\n", que);
3627 return 0;
3628 }
3629
3630 /**
3631 * qla24xx_mbx_completion() - Process mailbox command completions.
3632 * @vha: SCSI driver HA context
3633 * @mb0: Mailbox0 register
3634 */
3635 static void
qla24xx_mbx_completion(scsi_qla_host_t * vha,uint16_t mb0)3636 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3637 {
3638 uint16_t cnt;
3639 uint32_t mboxes;
3640 __le16 __iomem *wptr;
3641 struct qla_hw_data *ha = vha->hw;
3642 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3643
3644 /* Read all mbox registers? */
3645 WARN_ON_ONCE(ha->mbx_count > 32);
3646 mboxes = (1ULL << ha->mbx_count) - 1;
3647 if (!ha->mcp)
3648 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3649 else
3650 mboxes = ha->mcp->in_mb;
3651
3652 /* Load return mailbox registers. */
3653 ha->flags.mbox_int = 1;
3654 ha->mailbox_out[0] = mb0;
3655 mboxes >>= 1;
3656 wptr = ®->mailbox1;
3657
3658 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3659 if (mboxes & BIT_0)
3660 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3661
3662 mboxes >>= 1;
3663 wptr++;
3664 }
3665 }
3666
3667 static void
qla24xx_abort_iocb_entry(scsi_qla_host_t * vha,struct req_que * req,struct abort_entry_24xx * pkt)3668 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3669 struct abort_entry_24xx *pkt)
3670 {
3671 const char func[] = "ABT_IOCB";
3672 srb_t *sp;
3673 srb_t *orig_sp = NULL;
3674 struct srb_iocb *abt;
3675
3676 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3677 if (!sp)
3678 return;
3679
3680 abt = &sp->u.iocb_cmd;
3681 abt->u.abt.comp_status = pkt->comp_status;
3682 orig_sp = sp->cmd_sp;
3683 /* Need to pass original sp */
3684 if (orig_sp)
3685 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3686
3687 sp->done(sp, 0);
3688 }
3689
qla24xx_nvme_ls4_iocb(struct scsi_qla_host * vha,struct pt_ls4_request * pkt,struct req_que * req)3690 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3691 struct pt_ls4_request *pkt, struct req_que *req)
3692 {
3693 srb_t *sp;
3694 const char func[] = "LS4_IOCB";
3695 uint16_t comp_status;
3696
3697 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3698 if (!sp)
3699 return;
3700
3701 comp_status = le16_to_cpu(pkt->status);
3702 sp->done(sp, comp_status);
3703 }
3704
3705 /**
3706 * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
3707 * before iocb processing can start.
3708 * @vha: host adapter pointer
3709 * @rsp: respond queue
3710 * @pkt: head iocb describing how many continuation iocb
3711 * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
3712 */
qla_chk_cont_iocb_avail(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)3713 static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3714 struct rsp_que *rsp, response_t *pkt)
3715 {
3716 int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
3717 response_t *end_pkt;
3718 int rc = 0;
3719 u32 rsp_q_in;
3720
3721 if (pkt->entry_count == 1)
3722 return rc;
3723
3724 /* ring_index was pre-increment. set it back to current pkt */
3725 if (rsp->ring_index == 0)
3726 start_pkt_ring_index = rsp->length - 1;
3727 else
3728 start_pkt_ring_index = rsp->ring_index - 1;
3729
3730 if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
3731 end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
3732 rsp->length - 1;
3733 else
3734 end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
3735
3736 end_pkt = rsp->ring + end_pkt_ring_index;
3737
3738 /* next pkt = end_pkt + 1 */
3739 n_ring_index = end_pkt_ring_index + 1;
3740 if (n_ring_index >= rsp->length)
3741 n_ring_index = 0;
3742
3743 rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
3744 rd_reg_dword(rsp->rsp_q_in);
3745
3746 /* rsp_q_in is either wrapped or pointing beyond endpkt */
3747 if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
3748 rsp_q_in >= n_ring_index)
3749 /* all IOCBs arrived. */
3750 rc = 0;
3751 else
3752 rc = -EIO;
3753
3754 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
3755 "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
3756 __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
3757 rsp_q_in, rc);
3758
3759 return rc;
3760 }
3761
3762 /**
3763 * qla24xx_process_response_queue() - Process response queue entries.
3764 * @vha: SCSI driver HA context
3765 * @rsp: response queue
3766 */
qla24xx_process_response_queue(struct scsi_qla_host * vha,struct rsp_que * rsp)3767 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3768 struct rsp_que *rsp)
3769 {
3770 struct sts_entry_24xx *pkt;
3771 struct qla_hw_data *ha = vha->hw;
3772 struct purex_entry_24xx *purex_entry;
3773 struct purex_item *pure_item;
3774
3775 if (!ha->flags.fw_started)
3776 return;
3777
3778 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3779 rsp->qpair->rcv_intr = 1;
3780 qla_cpu_update(rsp->qpair, smp_processor_id());
3781 }
3782
3783 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3784 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3785
3786 rsp->ring_index++;
3787 if (rsp->ring_index == rsp->length) {
3788 rsp->ring_index = 0;
3789 rsp->ring_ptr = rsp->ring;
3790 } else {
3791 rsp->ring_ptr++;
3792 }
3793
3794 if (pkt->entry_status != 0) {
3795 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3796 goto process_err;
3797
3798 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3799 wmb();
3800 continue;
3801 }
3802 process_err:
3803
3804 switch (pkt->entry_type) {
3805 case STATUS_TYPE:
3806 qla2x00_status_entry(vha, rsp, pkt);
3807 break;
3808 case STATUS_CONT_TYPE:
3809 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3810 break;
3811 case VP_RPT_ID_IOCB_TYPE:
3812 qla24xx_report_id_acquisition(vha,
3813 (struct vp_rpt_id_entry_24xx *)pkt);
3814 break;
3815 case LOGINOUT_PORT_IOCB_TYPE:
3816 qla24xx_logio_entry(vha, rsp->req,
3817 (struct logio_entry_24xx *)pkt);
3818 break;
3819 case CT_IOCB_TYPE:
3820 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3821 break;
3822 case ELS_IOCB_TYPE:
3823 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3824 break;
3825 case ABTS_RECV_24XX:
3826 if (qla_ini_mode_enabled(vha)) {
3827 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3828 if (!pure_item)
3829 break;
3830 qla24xx_queue_purex_item(vha, pure_item,
3831 qla24xx_process_abts);
3832 break;
3833 }
3834 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3835 IS_QLA28XX(ha)) {
3836 /* ensure that the ATIO queue is empty */
3837 qlt_handle_abts_recv(vha, rsp,
3838 (response_t *)pkt);
3839 break;
3840 } else {
3841 qlt_24xx_process_atio_queue(vha, 1);
3842 }
3843 fallthrough;
3844 case ABTS_RESP_24XX:
3845 case CTIO_TYPE7:
3846 case CTIO_CRC2:
3847 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3848 break;
3849 case PT_LS4_REQUEST:
3850 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3851 rsp->req);
3852 break;
3853 case NOTIFY_ACK_TYPE:
3854 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3855 qlt_response_pkt_all_vps(vha, rsp,
3856 (response_t *)pkt);
3857 else
3858 qla24xxx_nack_iocb_entry(vha, rsp->req,
3859 (struct nack_to_isp *)pkt);
3860 break;
3861 case MARKER_TYPE:
3862 /* Do nothing in this case, this check is to prevent it
3863 * from falling into default case
3864 */
3865 break;
3866 case ABORT_IOCB_TYPE:
3867 qla24xx_abort_iocb_entry(vha, rsp->req,
3868 (struct abort_entry_24xx *)pkt);
3869 break;
3870 case MBX_IOCB_TYPE:
3871 qla24xx_mbx_iocb_entry(vha, rsp->req,
3872 (struct mbx_24xx_entry *)pkt);
3873 break;
3874 case VP_CTRL_IOCB_TYPE:
3875 qla_ctrlvp_completed(vha, rsp->req,
3876 (struct vp_ctrl_entry_24xx *)pkt);
3877 break;
3878 case PUREX_IOCB_TYPE:
3879 purex_entry = (void *)pkt;
3880 switch (purex_entry->els_frame_payload[3]) {
3881 case ELS_RDP:
3882 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3883 if (!pure_item)
3884 break;
3885 qla24xx_queue_purex_item(vha, pure_item,
3886 qla24xx_process_purex_rdp);
3887 break;
3888 case ELS_FPIN:
3889 if (!vha->hw->flags.scm_enabled) {
3890 ql_log(ql_log_warn, vha, 0x5094,
3891 "SCM not active for this port\n");
3892 break;
3893 }
3894 pure_item = qla27xx_copy_fpin_pkt(vha,
3895 (void **)&pkt, &rsp);
3896 if (!pure_item)
3897 break;
3898 qla24xx_queue_purex_item(vha, pure_item,
3899 qla27xx_process_purex_fpin);
3900 break;
3901
3902 case ELS_AUTH_ELS:
3903 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
3904 ql_dbg(ql_dbg_init, vha, 0x5091,
3905 "Defer processing ELS opcode %#x...\n",
3906 purex_entry->els_frame_payload[3]);
3907 return;
3908 }
3909 qla24xx_auth_els(vha, (void **)&pkt, &rsp);
3910 break;
3911 default:
3912 ql_log(ql_log_warn, vha, 0x509c,
3913 "Discarding ELS Request opcode 0x%x\n",
3914 purex_entry->els_frame_payload[3]);
3915 }
3916 break;
3917 case SA_UPDATE_IOCB_TYPE:
3918 qla28xx_sa_update_iocb_entry(vha, rsp->req,
3919 (struct sa_update_28xx *)pkt);
3920 break;
3921
3922 default:
3923 /* Type Not Supported. */
3924 ql_dbg(ql_dbg_async, vha, 0x5042,
3925 "Received unknown response pkt type 0x%x entry status=%x.\n",
3926 pkt->entry_type, pkt->entry_status);
3927 break;
3928 }
3929 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3930 wmb();
3931 }
3932
3933 /* Adjust ring index */
3934 if (IS_P3P_TYPE(ha)) {
3935 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3936
3937 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3938 } else {
3939 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3940 }
3941 }
3942
3943 static void
qla2xxx_check_risc_status(scsi_qla_host_t * vha)3944 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3945 {
3946 int rval;
3947 uint32_t cnt;
3948 struct qla_hw_data *ha = vha->hw;
3949 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3950
3951 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3952 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3953 return;
3954
3955 rval = QLA_SUCCESS;
3956 wrt_reg_dword(®->iobase_addr, 0x7C00);
3957 rd_reg_dword(®->iobase_addr);
3958 wrt_reg_dword(®->iobase_window, 0x0001);
3959 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3960 rval == QLA_SUCCESS; cnt--) {
3961 if (cnt) {
3962 wrt_reg_dword(®->iobase_window, 0x0001);
3963 udelay(10);
3964 } else
3965 rval = QLA_FUNCTION_TIMEOUT;
3966 }
3967 if (rval == QLA_SUCCESS)
3968 goto next_test;
3969
3970 rval = QLA_SUCCESS;
3971 wrt_reg_dword(®->iobase_window, 0x0003);
3972 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3973 rval == QLA_SUCCESS; cnt--) {
3974 if (cnt) {
3975 wrt_reg_dword(®->iobase_window, 0x0003);
3976 udelay(10);
3977 } else
3978 rval = QLA_FUNCTION_TIMEOUT;
3979 }
3980 if (rval != QLA_SUCCESS)
3981 goto done;
3982
3983 next_test:
3984 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3985 ql_log(ql_log_info, vha, 0x504c,
3986 "Additional code -- 0x55AA.\n");
3987
3988 done:
3989 wrt_reg_dword(®->iobase_window, 0x0000);
3990 rd_reg_dword(®->iobase_window);
3991 }
3992
3993 /**
3994 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3995 * @irq: interrupt number
3996 * @dev_id: SCSI driver HA context
3997 *
3998 * Called by system whenever the host adapter generates an interrupt.
3999 *
4000 * Returns handled flag.
4001 */
4002 irqreturn_t
qla24xx_intr_handler(int irq,void * dev_id)4003 qla24xx_intr_handler(int irq, void *dev_id)
4004 {
4005 scsi_qla_host_t *vha;
4006 struct qla_hw_data *ha;
4007 struct device_reg_24xx __iomem *reg;
4008 int status;
4009 unsigned long iter;
4010 uint32_t stat;
4011 uint32_t hccr;
4012 uint16_t mb[8];
4013 struct rsp_que *rsp;
4014 unsigned long flags;
4015 bool process_atio = false;
4016
4017 rsp = (struct rsp_que *) dev_id;
4018 if (!rsp) {
4019 ql_log(ql_log_info, NULL, 0x5059,
4020 "%s: NULL response queue pointer.\n", __func__);
4021 return IRQ_NONE;
4022 }
4023
4024 ha = rsp->hw;
4025 reg = &ha->iobase->isp24;
4026 status = 0;
4027
4028 if (unlikely(pci_channel_offline(ha->pdev)))
4029 return IRQ_HANDLED;
4030
4031 spin_lock_irqsave(&ha->hardware_lock, flags);
4032 vha = pci_get_drvdata(ha->pdev);
4033 for (iter = 50; iter--; ) {
4034 stat = rd_reg_dword(®->host_status);
4035 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4036 break;
4037 if (stat & HSRX_RISC_PAUSED) {
4038 if (unlikely(pci_channel_offline(ha->pdev)))
4039 break;
4040
4041 hccr = rd_reg_dword(®->hccr);
4042
4043 ql_log(ql_log_warn, vha, 0x504b,
4044 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4045 hccr);
4046
4047 qla2xxx_check_risc_status(vha);
4048
4049 ha->isp_ops->fw_dump(vha);
4050 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4051 break;
4052 } else if ((stat & HSRX_RISC_INT) == 0)
4053 break;
4054
4055 switch (stat & 0xff) {
4056 case INTR_ROM_MB_SUCCESS:
4057 case INTR_ROM_MB_FAILED:
4058 case INTR_MB_SUCCESS:
4059 case INTR_MB_FAILED:
4060 qla24xx_mbx_completion(vha, MSW(stat));
4061 status |= MBX_INTERRUPT;
4062
4063 break;
4064 case INTR_ASYNC_EVENT:
4065 mb[0] = MSW(stat);
4066 mb[1] = rd_reg_word(®->mailbox1);
4067 mb[2] = rd_reg_word(®->mailbox2);
4068 mb[3] = rd_reg_word(®->mailbox3);
4069 qla2x00_async_event(vha, rsp, mb);
4070 break;
4071 case INTR_RSP_QUE_UPDATE:
4072 case INTR_RSP_QUE_UPDATE_83XX:
4073 qla24xx_process_response_queue(vha, rsp);
4074 break;
4075 case INTR_ATIO_QUE_UPDATE_27XX:
4076 case INTR_ATIO_QUE_UPDATE:
4077 process_atio = true;
4078 break;
4079 case INTR_ATIO_RSP_QUE_UPDATE:
4080 process_atio = true;
4081 qla24xx_process_response_queue(vha, rsp);
4082 break;
4083 default:
4084 ql_dbg(ql_dbg_async, vha, 0x504f,
4085 "Unrecognized interrupt type (%d).\n", stat * 0xff);
4086 break;
4087 }
4088 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4089 rd_reg_dword_relaxed(®->hccr);
4090 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4091 ndelay(3500);
4092 }
4093 qla2x00_handle_mbx_completion(ha, status);
4094 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4095
4096 if (process_atio) {
4097 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4098 qlt_24xx_process_atio_queue(vha, 0);
4099 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4100 }
4101
4102 return IRQ_HANDLED;
4103 }
4104
4105 static irqreturn_t
qla24xx_msix_rsp_q(int irq,void * dev_id)4106 qla24xx_msix_rsp_q(int irq, void *dev_id)
4107 {
4108 struct qla_hw_data *ha;
4109 struct rsp_que *rsp;
4110 struct device_reg_24xx __iomem *reg;
4111 struct scsi_qla_host *vha;
4112 unsigned long flags;
4113
4114 rsp = (struct rsp_que *) dev_id;
4115 if (!rsp) {
4116 ql_log(ql_log_info, NULL, 0x505a,
4117 "%s: NULL response queue pointer.\n", __func__);
4118 return IRQ_NONE;
4119 }
4120 ha = rsp->hw;
4121 reg = &ha->iobase->isp24;
4122
4123 spin_lock_irqsave(&ha->hardware_lock, flags);
4124
4125 vha = pci_get_drvdata(ha->pdev);
4126 qla24xx_process_response_queue(vha, rsp);
4127 if (!ha->flags.disable_msix_handshake) {
4128 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4129 rd_reg_dword_relaxed(®->hccr);
4130 }
4131 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4132
4133 return IRQ_HANDLED;
4134 }
4135
4136 static irqreturn_t
qla24xx_msix_default(int irq,void * dev_id)4137 qla24xx_msix_default(int irq, void *dev_id)
4138 {
4139 scsi_qla_host_t *vha;
4140 struct qla_hw_data *ha;
4141 struct rsp_que *rsp;
4142 struct device_reg_24xx __iomem *reg;
4143 int status;
4144 uint32_t stat;
4145 uint32_t hccr;
4146 uint16_t mb[8];
4147 unsigned long flags;
4148 bool process_atio = false;
4149
4150 rsp = (struct rsp_que *) dev_id;
4151 if (!rsp) {
4152 ql_log(ql_log_info, NULL, 0x505c,
4153 "%s: NULL response queue pointer.\n", __func__);
4154 return IRQ_NONE;
4155 }
4156 ha = rsp->hw;
4157 reg = &ha->iobase->isp24;
4158 status = 0;
4159
4160 spin_lock_irqsave(&ha->hardware_lock, flags);
4161 vha = pci_get_drvdata(ha->pdev);
4162 do {
4163 stat = rd_reg_dword(®->host_status);
4164 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4165 break;
4166 if (stat & HSRX_RISC_PAUSED) {
4167 if (unlikely(pci_channel_offline(ha->pdev)))
4168 break;
4169
4170 hccr = rd_reg_dword(®->hccr);
4171
4172 ql_log(ql_log_info, vha, 0x5050,
4173 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4174 hccr);
4175
4176 qla2xxx_check_risc_status(vha);
4177 vha->hw_err_cnt++;
4178
4179 ha->isp_ops->fw_dump(vha);
4180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4181 break;
4182 } else if ((stat & HSRX_RISC_INT) == 0)
4183 break;
4184
4185 switch (stat & 0xff) {
4186 case INTR_ROM_MB_SUCCESS:
4187 case INTR_ROM_MB_FAILED:
4188 case INTR_MB_SUCCESS:
4189 case INTR_MB_FAILED:
4190 qla24xx_mbx_completion(vha, MSW(stat));
4191 status |= MBX_INTERRUPT;
4192
4193 break;
4194 case INTR_ASYNC_EVENT:
4195 mb[0] = MSW(stat);
4196 mb[1] = rd_reg_word(®->mailbox1);
4197 mb[2] = rd_reg_word(®->mailbox2);
4198 mb[3] = rd_reg_word(®->mailbox3);
4199 qla2x00_async_event(vha, rsp, mb);
4200 break;
4201 case INTR_RSP_QUE_UPDATE:
4202 case INTR_RSP_QUE_UPDATE_83XX:
4203 qla24xx_process_response_queue(vha, rsp);
4204 break;
4205 case INTR_ATIO_QUE_UPDATE_27XX:
4206 case INTR_ATIO_QUE_UPDATE:
4207 process_atio = true;
4208 break;
4209 case INTR_ATIO_RSP_QUE_UPDATE:
4210 process_atio = true;
4211 qla24xx_process_response_queue(vha, rsp);
4212 break;
4213 default:
4214 ql_dbg(ql_dbg_async, vha, 0x5051,
4215 "Unrecognized interrupt type (%d).\n", stat & 0xff);
4216 break;
4217 }
4218 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4219 } while (0);
4220 qla2x00_handle_mbx_completion(ha, status);
4221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4222
4223 if (process_atio) {
4224 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4225 qlt_24xx_process_atio_queue(vha, 0);
4226 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4227 }
4228
4229 return IRQ_HANDLED;
4230 }
4231
4232 irqreturn_t
qla2xxx_msix_rsp_q(int irq,void * dev_id)4233 qla2xxx_msix_rsp_q(int irq, void *dev_id)
4234 {
4235 struct qla_hw_data *ha;
4236 struct qla_qpair *qpair;
4237
4238 qpair = dev_id;
4239 if (!qpair) {
4240 ql_log(ql_log_info, NULL, 0x505b,
4241 "%s: NULL response queue pointer.\n", __func__);
4242 return IRQ_NONE;
4243 }
4244 ha = qpair->hw;
4245
4246 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4247
4248 return IRQ_HANDLED;
4249 }
4250
4251 irqreturn_t
qla2xxx_msix_rsp_q_hs(int irq,void * dev_id)4252 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4253 {
4254 struct qla_hw_data *ha;
4255 struct qla_qpair *qpair;
4256 struct device_reg_24xx __iomem *reg;
4257 unsigned long flags;
4258
4259 qpair = dev_id;
4260 if (!qpair) {
4261 ql_log(ql_log_info, NULL, 0x505b,
4262 "%s: NULL response queue pointer.\n", __func__);
4263 return IRQ_NONE;
4264 }
4265 ha = qpair->hw;
4266
4267 reg = &ha->iobase->isp24;
4268 spin_lock_irqsave(&ha->hardware_lock, flags);
4269 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4270 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4271
4272 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4273
4274 return IRQ_HANDLED;
4275 }
4276
4277 /* Interrupt handling helpers. */
4278
4279 struct qla_init_msix_entry {
4280 const char *name;
4281 irq_handler_t handler;
4282 };
4283
4284 static const struct qla_init_msix_entry msix_entries[] = {
4285 { "default", qla24xx_msix_default },
4286 { "rsp_q", qla24xx_msix_rsp_q },
4287 { "atio_q", qla83xx_msix_atio_q },
4288 { "qpair_multiq", qla2xxx_msix_rsp_q },
4289 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4290 };
4291
4292 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4293 { "qla2xxx (default)", qla82xx_msix_default },
4294 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4295 };
4296
4297 static int
qla24xx_enable_msix(struct qla_hw_data * ha,struct rsp_que * rsp)4298 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4299 {
4300 int i, ret;
4301 struct qla_msix_entry *qentry;
4302 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4303 int min_vecs = QLA_BASE_VECTORS;
4304 struct irq_affinity desc = {
4305 .pre_vectors = QLA_BASE_VECTORS,
4306 };
4307
4308 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4309 IS_ATIO_MSIX_CAPABLE(ha)) {
4310 desc.pre_vectors++;
4311 min_vecs++;
4312 }
4313
4314 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4315 /* user wants to control IRQ setting for target mode */
4316 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4317 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4318 PCI_IRQ_MSIX);
4319 } else
4320 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4321 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4322 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4323 &desc);
4324
4325 if (ret < 0) {
4326 ql_log(ql_log_fatal, vha, 0x00c7,
4327 "MSI-X: Failed to enable support, "
4328 "giving up -- %d/%d.\n",
4329 ha->msix_count, ret);
4330 goto msix_out;
4331 } else if (ret < ha->msix_count) {
4332 ql_log(ql_log_info, vha, 0x00c6,
4333 "MSI-X: Using %d vectors\n", ret);
4334 ha->msix_count = ret;
4335 /* Recalculate queue values */
4336 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4337 ha->max_req_queues = ha->msix_count - 1;
4338
4339 /* ATIOQ needs 1 vector. That's 1 less QPair */
4340 if (QLA_TGT_MODE_ENABLED())
4341 ha->max_req_queues--;
4342
4343 ha->max_rsp_queues = ha->max_req_queues;
4344
4345 ha->max_qpairs = ha->max_req_queues - 1;
4346 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4347 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4348 }
4349 }
4350 vha->irq_offset = desc.pre_vectors;
4351 ha->msix_entries = kcalloc(ha->msix_count,
4352 sizeof(struct qla_msix_entry),
4353 GFP_KERNEL);
4354 if (!ha->msix_entries) {
4355 ql_log(ql_log_fatal, vha, 0x00c8,
4356 "Failed to allocate memory for ha->msix_entries.\n");
4357 ret = -ENOMEM;
4358 goto free_irqs;
4359 }
4360 ha->flags.msix_enabled = 1;
4361
4362 for (i = 0; i < ha->msix_count; i++) {
4363 qentry = &ha->msix_entries[i];
4364 qentry->vector = pci_irq_vector(ha->pdev, i);
4365 qentry->entry = i;
4366 qentry->have_irq = 0;
4367 qentry->in_use = 0;
4368 qentry->handle = NULL;
4369 }
4370
4371 /* Enable MSI-X vectors for the base queue */
4372 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4373 qentry = &ha->msix_entries[i];
4374 qentry->handle = rsp;
4375 rsp->msix = qentry;
4376 scnprintf(qentry->name, sizeof(qentry->name),
4377 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4378 if (IS_P3P_TYPE(ha))
4379 ret = request_irq(qentry->vector,
4380 qla82xx_msix_entries[i].handler,
4381 0, qla82xx_msix_entries[i].name, rsp);
4382 else
4383 ret = request_irq(qentry->vector,
4384 msix_entries[i].handler,
4385 0, qentry->name, rsp);
4386 if (ret)
4387 goto msix_register_fail;
4388 qentry->have_irq = 1;
4389 qentry->in_use = 1;
4390 }
4391
4392 /*
4393 * If target mode is enable, also request the vector for the ATIO
4394 * queue.
4395 */
4396 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4397 IS_ATIO_MSIX_CAPABLE(ha)) {
4398 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4399 rsp->msix = qentry;
4400 qentry->handle = rsp;
4401 scnprintf(qentry->name, sizeof(qentry->name),
4402 "qla2xxx%lu_%s", vha->host_no,
4403 msix_entries[QLA_ATIO_VECTOR].name);
4404 qentry->in_use = 1;
4405 ret = request_irq(qentry->vector,
4406 msix_entries[QLA_ATIO_VECTOR].handler,
4407 0, qentry->name, rsp);
4408 qentry->have_irq = 1;
4409 }
4410
4411 msix_register_fail:
4412 if (ret) {
4413 ql_log(ql_log_fatal, vha, 0x00cb,
4414 "MSI-X: unable to register handler -- %x/%d.\n",
4415 qentry->vector, ret);
4416 qla2x00_free_irqs(vha);
4417 ha->mqenable = 0;
4418 goto msix_out;
4419 }
4420
4421 /* Enable MSI-X vector for response queue update for queue 0 */
4422 if (IS_MQUE_CAPABLE(ha) &&
4423 (ha->msixbase && ha->mqiobase && ha->max_qpairs))
4424 ha->mqenable = 1;
4425 else
4426 ha->mqenable = 0;
4427
4428 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4429 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4430 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4431 ql_dbg(ql_dbg_init, vha, 0x0055,
4432 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4433 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4434
4435 msix_out:
4436 return ret;
4437
4438 free_irqs:
4439 pci_free_irq_vectors(ha->pdev);
4440 goto msix_out;
4441 }
4442
4443 int
qla2x00_request_irqs(struct qla_hw_data * ha,struct rsp_que * rsp)4444 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4445 {
4446 int ret = QLA_FUNCTION_FAILED;
4447 device_reg_t *reg = ha->iobase;
4448 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4449
4450 /* If possible, enable MSI-X. */
4451 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4452 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4453 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4454 goto skip_msi;
4455
4456 if (ql2xenablemsix == 2)
4457 goto skip_msix;
4458
4459 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4460 (ha->pdev->subsystem_device == 0x7040 ||
4461 ha->pdev->subsystem_device == 0x7041 ||
4462 ha->pdev->subsystem_device == 0x1705)) {
4463 ql_log(ql_log_warn, vha, 0x0034,
4464 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4465 ha->pdev->subsystem_vendor,
4466 ha->pdev->subsystem_device);
4467 goto skip_msi;
4468 }
4469
4470 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4471 ql_log(ql_log_warn, vha, 0x0035,
4472 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4473 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4474 goto skip_msix;
4475 }
4476
4477 ret = qla24xx_enable_msix(ha, rsp);
4478 if (!ret) {
4479 ql_dbg(ql_dbg_init, vha, 0x0036,
4480 "MSI-X: Enabled (0x%X, 0x%X).\n",
4481 ha->chip_revision, ha->fw_attributes);
4482 goto clear_risc_ints;
4483 }
4484
4485 skip_msix:
4486
4487 ql_log(ql_log_info, vha, 0x0037,
4488 "Falling back-to MSI mode -- ret=%d.\n", ret);
4489
4490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4491 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4492 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4493 goto skip_msi;
4494
4495 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4496 if (ret > 0) {
4497 ql_dbg(ql_dbg_init, vha, 0x0038,
4498 "MSI: Enabled.\n");
4499 ha->flags.msi_enabled = 1;
4500 } else
4501 ql_log(ql_log_warn, vha, 0x0039,
4502 "Falling back-to INTa mode -- ret=%d.\n", ret);
4503 skip_msi:
4504
4505 /* Skip INTx on ISP82xx. */
4506 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4507 return QLA_FUNCTION_FAILED;
4508
4509 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4510 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4511 QLA2XXX_DRIVER_NAME, rsp);
4512 if (ret) {
4513 ql_log(ql_log_warn, vha, 0x003a,
4514 "Failed to reserve interrupt %d already in use.\n",
4515 ha->pdev->irq);
4516 goto fail;
4517 } else if (!ha->flags.msi_enabled) {
4518 ql_dbg(ql_dbg_init, vha, 0x0125,
4519 "INTa mode: Enabled.\n");
4520 ha->flags.mr_intr_valid = 1;
4521 /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
4522 ha->max_qpairs = 0;
4523 }
4524
4525 clear_risc_ints:
4526 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4527 goto fail;
4528
4529 spin_lock_irq(&ha->hardware_lock);
4530 wrt_reg_word(®->isp.semaphore, 0);
4531 spin_unlock_irq(&ha->hardware_lock);
4532
4533 fail:
4534 return ret;
4535 }
4536
4537 void
qla2x00_free_irqs(scsi_qla_host_t * vha)4538 qla2x00_free_irqs(scsi_qla_host_t *vha)
4539 {
4540 struct qla_hw_data *ha = vha->hw;
4541 struct rsp_que *rsp;
4542 struct qla_msix_entry *qentry;
4543 int i;
4544
4545 /*
4546 * We need to check that ha->rsp_q_map is valid in case we are called
4547 * from a probe failure context.
4548 */
4549 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4550 goto free_irqs;
4551 rsp = ha->rsp_q_map[0];
4552
4553 if (ha->flags.msix_enabled) {
4554 for (i = 0; i < ha->msix_count; i++) {
4555 qentry = &ha->msix_entries[i];
4556 if (qentry->have_irq) {
4557 irq_set_affinity_notifier(qentry->vector, NULL);
4558 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4559 }
4560 }
4561 kfree(ha->msix_entries);
4562 ha->msix_entries = NULL;
4563 ha->flags.msix_enabled = 0;
4564 ql_dbg(ql_dbg_init, vha, 0x0042,
4565 "Disabled MSI-X.\n");
4566 } else {
4567 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4568 }
4569
4570 free_irqs:
4571 pci_free_irq_vectors(ha->pdev);
4572 }
4573
qla25xx_request_irq(struct qla_hw_data * ha,struct qla_qpair * qpair,struct qla_msix_entry * msix,int vector_type)4574 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4575 struct qla_msix_entry *msix, int vector_type)
4576 {
4577 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4578 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4579 int ret;
4580
4581 scnprintf(msix->name, sizeof(msix->name),
4582 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4583 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4584 if (ret) {
4585 ql_log(ql_log_fatal, vha, 0x00e6,
4586 "MSI-X: Unable to register handler -- %x/%d.\n",
4587 msix->vector, ret);
4588 return ret;
4589 }
4590 msix->have_irq = 1;
4591 msix->handle = qpair;
4592 return ret;
4593 }
4594