1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
12
13 /**
14 * qla4xxx_copy_sense - copy sense data into cmd sense buffer
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 * @srb: Pointer to srb structure.
18 **/
qla4xxx_copy_sense(struct scsi_qla_host * ha,struct status_entry * sts_entry,struct srb * srb)19 static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
20 struct status_entry *sts_entry,
21 struct srb *srb)
22 {
23 struct scsi_cmnd *cmd = srb->cmd;
24 uint16_t sense_len;
25
26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28 if (sense_len == 0) {
29 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
30 " sense len 0\n", ha->host_no,
31 cmd->device->channel, cmd->device->id,
32 cmd->device->lun, __func__));
33 ha->status_srb = NULL;
34 return;
35 }
36 /* Save total available sense length,
37 * not to exceed cmd's sense buffer size */
38 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
39 srb->req_sense_ptr = cmd->sense_buffer;
40 srb->req_sense_len = sense_len;
41
42 /* Copy sense from sts_entry pkt */
43 sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
44 memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
45
46 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
47 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
48 cmd->device->channel, cmd->device->id,
49 cmd->device->lun, __func__,
50 sts_entry->senseData[2] & 0x0f,
51 sts_entry->senseData[7],
52 sts_entry->senseData[12],
53 sts_entry->senseData[13]));
54
55 DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
56 srb->flags |= SRB_GOT_SENSE;
57
58 /* Update srb, in case a sts_cont pkt follows */
59 srb->req_sense_ptr += sense_len;
60 srb->req_sense_len -= sense_len;
61 if (srb->req_sense_len != 0)
62 ha->status_srb = srb;
63 else
64 ha->status_srb = NULL;
65 }
66
67 /**
68 * qla4xxx_status_cont_entry - Process a Status Continuations entry.
69 * @ha: SCSI driver HA context
70 * @sts_cont: Entry pointer
71 *
72 * Extended sense data.
73 */
74 static void
qla4xxx_status_cont_entry(struct scsi_qla_host * ha,struct status_cont_entry * sts_cont)75 qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
76 struct status_cont_entry *sts_cont)
77 {
78 struct srb *srb = ha->status_srb;
79 struct scsi_cmnd *cmd;
80 uint16_t sense_len;
81
82 if (srb == NULL)
83 return;
84
85 cmd = srb->cmd;
86 if (cmd == NULL) {
87 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
88 "back to OS srb=%p srb->state:%d\n", ha->host_no,
89 __func__, srb, srb->state));
90 ha->status_srb = NULL;
91 return;
92 }
93
94 /* Copy sense data. */
95 sense_len = min_t(uint16_t, srb->req_sense_len,
96 IOCB_MAX_EXT_SENSEDATA_LEN);
97 memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
98 DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
99
100 srb->req_sense_ptr += sense_len;
101 srb->req_sense_len -= sense_len;
102
103 /* Place command on done queue. */
104 if (srb->req_sense_len == 0) {
105 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
106 ha->status_srb = NULL;
107 }
108 }
109
110 /**
111 * qla4xxx_status_entry - processes status IOCBs
112 * @ha: Pointer to host adapter structure.
113 * @sts_entry: Pointer to status entry structure.
114 **/
qla4xxx_status_entry(struct scsi_qla_host * ha,struct status_entry * sts_entry)115 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
116 struct status_entry *sts_entry)
117 {
118 uint8_t scsi_status;
119 struct scsi_cmnd *cmd;
120 struct srb *srb;
121 struct ddb_entry *ddb_entry;
122 uint32_t residual;
123
124 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
125 if (!srb) {
126 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
127 "handle=0x%0x, srb=%p\n", __func__,
128 sts_entry->handle, srb);
129 if (is_qla8022(ha))
130 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
131 else
132 set_bit(DPC_RESET_HA, &ha->dpc_flags);
133 return;
134 }
135
136 cmd = srb->cmd;
137 if (cmd == NULL) {
138 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
139 "OS pkt->handle=%d srb=%p srb->state:%d\n",
140 ha->host_no, __func__, sts_entry->handle,
141 srb, srb->state));
142 ql4_printk(KERN_WARNING, ha, "Command is NULL:"
143 " already returned to OS (srb=%p)\n", srb);
144 return;
145 }
146
147 ddb_entry = srb->ddb;
148 if (ddb_entry == NULL) {
149 cmd->result = DID_NO_CONNECT << 16;
150 goto status_entry_exit;
151 }
152
153 residual = le32_to_cpu(sts_entry->residualByteCnt);
154
155 /* Translate ISP error to a Linux SCSI error. */
156 scsi_status = sts_entry->scsiStatus;
157 switch (sts_entry->completionStatus) {
158 case SCS_COMPLETE:
159
160 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
161 cmd->result = DID_ERROR << 16;
162 break;
163 }
164
165 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
166 scsi_set_resid(cmd, residual);
167 if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
168 cmd->underflow)) {
169
170 cmd->result = DID_ERROR << 16;
171
172 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
173 "Mid-layer Data underrun0, "
174 "xferlen = 0x%x, "
175 "residual = 0x%x\n", ha->host_no,
176 cmd->device->channel,
177 cmd->device->id,
178 cmd->device->lun, __func__,
179 scsi_bufflen(cmd), residual));
180 break;
181 }
182 }
183
184 cmd->result = DID_OK << 16 | scsi_status;
185
186 if (scsi_status != SCSI_CHECK_CONDITION)
187 break;
188
189 /* Copy Sense Data into sense buffer. */
190 qla4xxx_copy_sense(ha, sts_entry, srb);
191 break;
192
193 case SCS_INCOMPLETE:
194 /* Always set the status to DID_ERROR, since
195 * all conditions result in that status anyway */
196 cmd->result = DID_ERROR << 16;
197 break;
198
199 case SCS_RESET_OCCURRED:
200 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
201 ha->host_no, cmd->device->channel,
202 cmd->device->id, cmd->device->lun, __func__));
203
204 cmd->result = DID_RESET << 16;
205 break;
206
207 case SCS_ABORTED:
208 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
209 ha->host_no, cmd->device->channel,
210 cmd->device->id, cmd->device->lun, __func__));
211
212 cmd->result = DID_RESET << 16;
213 break;
214
215 case SCS_TIMEOUT:
216 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
217 ha->host_no, cmd->device->channel,
218 cmd->device->id, cmd->device->lun));
219
220 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
221
222 /*
223 * Mark device missing so that we won't continue to send
224 * I/O to this device. We should get a ddb state change
225 * AEN soon.
226 */
227 if (iscsi_is_session_online(ddb_entry->sess))
228 qla4xxx_mark_device_missing(ddb_entry->sess);
229 break;
230
231 case SCS_DATA_UNDERRUN:
232 case SCS_DATA_OVERRUN:
233 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
234 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
235 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
236 ha->host_no,
237 cmd->device->channel, cmd->device->id,
238 cmd->device->lun, __func__));
239
240 cmd->result = DID_ERROR << 16;
241 break;
242 }
243
244 scsi_set_resid(cmd, residual);
245
246 /*
247 * If there is scsi_status, it takes precedense over
248 * underflow condition.
249 */
250 if (scsi_status != 0) {
251 cmd->result = DID_OK << 16 | scsi_status;
252
253 if (scsi_status != SCSI_CHECK_CONDITION)
254 break;
255
256 /* Copy Sense Data into sense buffer. */
257 qla4xxx_copy_sense(ha, sts_entry, srb);
258 } else {
259 /*
260 * If RISC reports underrun and target does not
261 * report it then we must have a lost frame, so
262 * tell upper layer to retry it by reporting a
263 * bus busy.
264 */
265 if ((sts_entry->iscsiFlags &
266 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
267 cmd->result = DID_BUS_BUSY << 16;
268 } else if ((scsi_bufflen(cmd) - residual) <
269 cmd->underflow) {
270 /*
271 * Handle mid-layer underflow???
272 *
273 * For kernels less than 2.4, the driver must
274 * return an error if an underflow is detected.
275 * For kernels equal-to and above 2.4, the
276 * mid-layer will appearantly handle the
277 * underflow by detecting the residual count --
278 * unfortunately, we do not see where this is
279 * actually being done. In the interim, we
280 * will return DID_ERROR.
281 */
282 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
283 "Mid-layer Data underrun1, "
284 "xferlen = 0x%x, "
285 "residual = 0x%x\n", ha->host_no,
286 cmd->device->channel,
287 cmd->device->id,
288 cmd->device->lun, __func__,
289 scsi_bufflen(cmd), residual));
290
291 cmd->result = DID_ERROR << 16;
292 } else {
293 cmd->result = DID_OK << 16;
294 }
295 }
296 break;
297
298 case SCS_DEVICE_LOGGED_OUT:
299 case SCS_DEVICE_UNAVAILABLE:
300 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
301 "state: 0x%x\n", ha->host_no,
302 cmd->device->channel, cmd->device->id,
303 cmd->device->lun, sts_entry->completionStatus));
304 /*
305 * Mark device missing so that we won't continue to
306 * send I/O to this device. We should get a ddb
307 * state change AEN soon.
308 */
309 if (iscsi_is_session_online(ddb_entry->sess))
310 qla4xxx_mark_device_missing(ddb_entry->sess);
311
312 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
313 break;
314
315 case SCS_QUEUE_FULL:
316 /*
317 * SCSI Mid-Layer handles device queue full
318 */
319 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
320 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
321 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
322 " iResp=%02x\n", ha->host_no, cmd->device->id,
323 cmd->device->lun, __func__,
324 sts_entry->completionStatus,
325 sts_entry->scsiStatus, sts_entry->state_flags,
326 sts_entry->iscsiFlags,
327 sts_entry->iscsiResponse));
328 break;
329
330 default:
331 cmd->result = DID_ERROR << 16;
332 break;
333 }
334
335 status_entry_exit:
336
337 /* complete the request, if not waiting for status_continuation pkt */
338 srb->cc_stat = sts_entry->completionStatus;
339 if (ha->status_srb == NULL)
340 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
341 }
342
343 /**
344 * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
345 * @ha: Pointer to host adapter structure.
346 * @sts_entry: Pointer to status entry structure.
347 **/
qla4xxx_passthru_status_entry(struct scsi_qla_host * ha,struct passthru_status * sts_entry)348 static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
349 struct passthru_status *sts_entry)
350 {
351 struct iscsi_task *task;
352 struct ddb_entry *ddb_entry;
353 struct ql4_task_data *task_data;
354 struct iscsi_cls_conn *cls_conn;
355 struct iscsi_conn *conn;
356 itt_t itt;
357 uint32_t fw_ddb_index;
358
359 itt = sts_entry->handle;
360 fw_ddb_index = le32_to_cpu(sts_entry->target);
361
362 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
363
364 if (ddb_entry == NULL) {
365 ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
366 __func__, sts_entry->target);
367 return;
368 }
369
370 cls_conn = ddb_entry->conn;
371 conn = cls_conn->dd_data;
372 spin_lock(&conn->session->lock);
373 task = iscsi_itt_to_task(conn, itt);
374 spin_unlock(&conn->session->lock);
375
376 if (task == NULL) {
377 ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
378 return;
379 }
380
381 task_data = task->dd_data;
382 memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
383 ha->req_q_count += task_data->iocb_req_cnt;
384 ha->iocb_cnt -= task_data->iocb_req_cnt;
385 queue_work(ha->task_wq, &task_data->task_work);
386 }
387
qla4xxx_del_mrb_from_active_array(struct scsi_qla_host * ha,uint32_t index)388 static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
389 uint32_t index)
390 {
391 struct mrb *mrb = NULL;
392
393 /* validate handle and remove from active array */
394 if (index >= MAX_MRB)
395 return mrb;
396
397 mrb = ha->active_mrb_array[index];
398 ha->active_mrb_array[index] = NULL;
399 if (!mrb)
400 return mrb;
401
402 /* update counters */
403 ha->req_q_count += mrb->iocb_cnt;
404 ha->iocb_cnt -= mrb->iocb_cnt;
405
406 return mrb;
407 }
408
qla4xxx_mbox_status_entry(struct scsi_qla_host * ha,struct mbox_status_iocb * mbox_sts_entry)409 static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
410 struct mbox_status_iocb *mbox_sts_entry)
411 {
412 struct mrb *mrb;
413 uint32_t status;
414 uint32_t data_size;
415
416 mrb = qla4xxx_del_mrb_from_active_array(ha,
417 le32_to_cpu(mbox_sts_entry->handle));
418
419 if (mrb == NULL) {
420 ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
421 mbox_sts_entry->handle);
422 return;
423 }
424
425 switch (mrb->mbox_cmd) {
426 case MBOX_CMD_PING:
427 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
428 "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
429 __func__, mrb->mbox_cmd,
430 mbox_sts_entry->out_mbox[0],
431 mbox_sts_entry->out_mbox[6]));
432
433 if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
434 status = ISCSI_PING_SUCCESS;
435 else
436 status = mbox_sts_entry->out_mbox[6];
437
438 data_size = sizeof(mbox_sts_entry->out_mbox);
439
440 qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
441 (uint8_t *) mbox_sts_entry->out_mbox);
442 break;
443
444 default:
445 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
446 "0x%x\n", __func__, mrb->mbox_cmd));
447 }
448
449 kfree(mrb);
450 return;
451 }
452
453 /**
454 * qla4xxx_process_response_queue - process response queue completions
455 * @ha: Pointer to host adapter structure.
456 *
457 * This routine process response queue completions in interrupt context.
458 * Hardware_lock locked upon entry
459 **/
qla4xxx_process_response_queue(struct scsi_qla_host * ha)460 void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
461 {
462 uint32_t count = 0;
463 struct srb *srb = NULL;
464 struct status_entry *sts_entry;
465
466 /* Process all responses from response queue */
467 while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
468 sts_entry = (struct status_entry *) ha->response_ptr;
469 count++;
470
471 /* Advance pointers for next entry */
472 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
473 ha->response_out = 0;
474 ha->response_ptr = ha->response_ring;
475 } else {
476 ha->response_out++;
477 ha->response_ptr++;
478 }
479
480 /* process entry */
481 switch (sts_entry->hdr.entryType) {
482 case ET_STATUS:
483 /* Common status */
484 qla4xxx_status_entry(ha, sts_entry);
485 break;
486
487 case ET_PASSTHRU_STATUS:
488 if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
489 qla4xxx_passthru_status_entry(ha,
490 (struct passthru_status *)sts_entry);
491 else
492 ql4_printk(KERN_ERR, ha,
493 "%s: Invalid status received\n",
494 __func__);
495
496 break;
497
498 case ET_STATUS_CONTINUATION:
499 qla4xxx_status_cont_entry(ha,
500 (struct status_cont_entry *) sts_entry);
501 break;
502
503 case ET_COMMAND:
504 /* ISP device queue is full. Command not
505 * accepted by ISP. Queue command for
506 * later */
507
508 srb = qla4xxx_del_from_active_array(ha,
509 le32_to_cpu(sts_entry->
510 handle));
511 if (srb == NULL)
512 goto exit_prq_invalid_handle;
513
514 DEBUG2(printk("scsi%ld: %s: FW device queue full, "
515 "srb %p\n", ha->host_no, __func__, srb));
516
517 /* ETRY normally by sending it back with
518 * DID_BUS_BUSY */
519 srb->cmd->result = DID_BUS_BUSY << 16;
520 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
521 break;
522
523 case ET_CONTINUE:
524 /* Just throw away the continuation entries */
525 DEBUG2(printk("scsi%ld: %s: Continuation entry - "
526 "ignoring\n", ha->host_no, __func__));
527 break;
528
529 case ET_MBOX_STATUS:
530 DEBUG2(ql4_printk(KERN_INFO, ha,
531 "%s: mbox status IOCB\n", __func__));
532 qla4xxx_mbox_status_entry(ha,
533 (struct mbox_status_iocb *)sts_entry);
534 break;
535
536 default:
537 /*
538 * Invalid entry in response queue, reset RISC
539 * firmware.
540 */
541 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
542 "response queue \n", ha->host_no,
543 __func__,
544 sts_entry->hdr.entryType));
545 goto exit_prq_error;
546 }
547 ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
548 wmb();
549 }
550
551 /*
552 * Tell ISP we're done with response(s). This also clears the interrupt.
553 */
554 ha->isp_ops->complete_iocb(ha);
555
556 return;
557
558 exit_prq_invalid_handle:
559 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
560 ha->host_no, __func__, srb, sts_entry->hdr.entryType,
561 sts_entry->completionStatus));
562
563 exit_prq_error:
564 ha->isp_ops->complete_iocb(ha);
565 set_bit(DPC_RESET_HA, &ha->dpc_flags);
566 }
567
568 /**
569 * qla4xxx_isr_decode_mailbox - decodes mailbox status
570 * @ha: Pointer to host adapter structure.
571 * @mailbox_status: Mailbox status.
572 *
573 * This routine decodes the mailbox status during the ISR.
574 * Hardware_lock locked upon entry. runs in interrupt context.
575 **/
qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,uint32_t mbox_status)576 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
577 uint32_t mbox_status)
578 {
579 int i;
580 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
581
582 if ((mbox_status == MBOX_STS_BUSY) ||
583 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
584 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
585 ha->mbox_status[0] = mbox_status;
586
587 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
588 /*
589 * Copy all mailbox registers to a temporary
590 * location and set mailbox command done flag
591 */
592 for (i = 0; i < ha->mbox_status_count; i++)
593 ha->mbox_status[i] = is_qla8022(ha)
594 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
595 : readl(&ha->reg->mailbox[i]);
596
597 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
598
599 if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
600 complete(&ha->mbx_intr_comp);
601 }
602 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
603 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
604 mbox_sts[i] = is_qla8022(ha)
605 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
606 : readl(&ha->reg->mailbox[i]);
607
608 /* Immediately process the AENs that don't require much work.
609 * Only queue the database_changed AENs */
610 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
611 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
612 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
613 mbox_sts[i];
614 ha->aen_log.count++;
615 }
616 switch (mbox_status) {
617 case MBOX_ASTS_SYSTEM_ERROR:
618 /* Log Mailbox registers */
619 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
620 qla4xxx_dump_registers(ha);
621
622 if (ql4xdontresethba) {
623 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
624 ha->host_no, __func__));
625 } else {
626 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
627 set_bit(DPC_RESET_HA, &ha->dpc_flags);
628 }
629 break;
630
631 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
632 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
633 case MBOX_ASTS_NVRAM_INVALID:
634 case MBOX_ASTS_IP_ADDRESS_CHANGED:
635 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
636 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
637 "Reset HA\n", ha->host_no, mbox_status));
638 if (is_qla8022(ha))
639 set_bit(DPC_RESET_HA_FW_CONTEXT,
640 &ha->dpc_flags);
641 else
642 set_bit(DPC_RESET_HA, &ha->dpc_flags);
643 break;
644
645 case MBOX_ASTS_LINK_UP:
646 set_bit(AF_LINK_UP, &ha->flags);
647 if (test_bit(AF_INIT_DONE, &ha->flags))
648 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
649
650 ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
651 qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
652 sizeof(mbox_sts),
653 (uint8_t *) mbox_sts);
654 break;
655
656 case MBOX_ASTS_LINK_DOWN:
657 clear_bit(AF_LINK_UP, &ha->flags);
658 if (test_bit(AF_INIT_DONE, &ha->flags))
659 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
660
661 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
662 qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
663 sizeof(mbox_sts),
664 (uint8_t *) mbox_sts);
665 break;
666
667 case MBOX_ASTS_HEARTBEAT:
668 ha->seconds_since_last_heartbeat = 0;
669 break;
670
671 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
672 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
673 "ACQUIRED\n", ha->host_no, mbox_status));
674 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
675 break;
676
677 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
678 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
679 * mode
680 * only */
681 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
682 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
683 case MBOX_ASTS_SUBNET_STATE_CHANGE:
684 case MBOX_ASTS_DUPLICATE_IP:
685 /* No action */
686 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
687 mbox_status));
688 break;
689
690 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
691 printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
692 "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
693 mbox_sts[2], mbox_sts[3]);
694
695 /* mbox_sts[2] = Old ACB state
696 * mbox_sts[3] = new ACB state */
697 if ((mbox_sts[3] == ACB_STATE_VALID) &&
698 ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
699 (mbox_sts[2] == ACB_STATE_ACQUIRING)))
700 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
701 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
702 (mbox_sts[2] == ACB_STATE_VALID)) {
703 if (is_qla8022(ha))
704 set_bit(DPC_RESET_HA_FW_CONTEXT,
705 &ha->dpc_flags);
706 else
707 set_bit(DPC_RESET_HA, &ha->dpc_flags);
708 } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
709 complete(&ha->disable_acb_comp);
710 break;
711
712 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
713 case MBOX_ASTS_DNS:
714 /* No action */
715 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
716 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
717 ha->host_no, mbox_sts[0],
718 mbox_sts[1], mbox_sts[2]));
719 break;
720
721 case MBOX_ASTS_SELF_TEST_FAILED:
722 case MBOX_ASTS_LOGIN_FAILED:
723 /* No action */
724 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
725 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
726 ha->host_no, mbox_sts[0], mbox_sts[1],
727 mbox_sts[2], mbox_sts[3]));
728 break;
729
730 case MBOX_ASTS_DATABASE_CHANGED:
731 /* Queue AEN information and process it in the DPC
732 * routine */
733 if (ha->aen_q_count > 0) {
734
735 /* decrement available counter */
736 ha->aen_q_count--;
737
738 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
739 ha->aen_q[ha->aen_in].mbox_sts[i] =
740 mbox_sts[i];
741
742 /* print debug message */
743 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
744 "mb1:0x%x mb2:0x%x mb3:0x%x "
745 "mb4:0x%x mb5:0x%x\n",
746 ha->host_no, ha->aen_in,
747 mbox_sts[0], mbox_sts[1],
748 mbox_sts[2], mbox_sts[3],
749 mbox_sts[4], mbox_sts[5]));
750
751 /* advance pointer */
752 ha->aen_in++;
753 if (ha->aen_in == MAX_AEN_ENTRIES)
754 ha->aen_in = 0;
755
756 /* The DPC routine will process the aen */
757 set_bit(DPC_AEN, &ha->dpc_flags);
758 } else {
759 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
760 "overflowed! AEN LOST!!\n",
761 ha->host_no, __func__,
762 mbox_sts[0]));
763
764 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
765 ha->host_no));
766
767 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
768 DEBUG2(printk("AEN[%d] %04x %04x %04x "
769 "%04x\n", i, mbox_sts[0],
770 mbox_sts[1], mbox_sts[2],
771 mbox_sts[3]));
772 }
773 }
774 break;
775
776 case MBOX_ASTS_TXSCVR_INSERTED:
777 DEBUG2(printk(KERN_WARNING
778 "scsi%ld: AEN %04x Transceiver"
779 " inserted\n", ha->host_no, mbox_sts[0]));
780 break;
781
782 case MBOX_ASTS_TXSCVR_REMOVED:
783 DEBUG2(printk(KERN_WARNING
784 "scsi%ld: AEN %04x Transceiver"
785 " removed\n", ha->host_no, mbox_sts[0]));
786 break;
787
788 default:
789 DEBUG2(printk(KERN_WARNING
790 "scsi%ld: AEN %04x UNKNOWN\n",
791 ha->host_no, mbox_sts[0]));
792 break;
793 }
794 } else {
795 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
796 ha->host_no, mbox_status));
797
798 ha->mbox_status[0] = mbox_status;
799 }
800 }
801
802 /**
803 * qla4_8xxx_interrupt_service_routine - isr
804 * @ha: pointer to host adapter structure.
805 *
806 * This is the main interrupt service routine.
807 * hardware_lock locked upon entry. runs in interrupt context.
808 **/
qla4_8xxx_interrupt_service_routine(struct scsi_qla_host * ha,uint32_t intr_status)809 void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
810 uint32_t intr_status)
811 {
812 /* Process response queue interrupt. */
813 if (intr_status & HSRX_RISC_IOCB_INT)
814 qla4xxx_process_response_queue(ha);
815
816 /* Process mailbox/asynch event interrupt.*/
817 if (intr_status & HSRX_RISC_MB_INT)
818 qla4xxx_isr_decode_mailbox(ha,
819 readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
820
821 /* clear the interrupt */
822 writel(0, &ha->qla4_8xxx_reg->host_int);
823 readl(&ha->qla4_8xxx_reg->host_int);
824 }
825
826 /**
827 * qla4xxx_interrupt_service_routine - isr
828 * @ha: pointer to host adapter structure.
829 *
830 * This is the main interrupt service routine.
831 * hardware_lock locked upon entry. runs in interrupt context.
832 **/
qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,uint32_t intr_status)833 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
834 uint32_t intr_status)
835 {
836 /* Process response queue interrupt. */
837 if (intr_status & CSR_SCSI_COMPLETION_INTR)
838 qla4xxx_process_response_queue(ha);
839
840 /* Process mailbox/asynch event interrupt.*/
841 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
842 qla4xxx_isr_decode_mailbox(ha,
843 readl(&ha->reg->mailbox[0]));
844
845 /* Clear Mailbox Interrupt */
846 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
847 &ha->reg->ctrl_status);
848 readl(&ha->reg->ctrl_status);
849 }
850 }
851
852 /**
853 * qla4_8xxx_spurious_interrupt - processes spurious interrupt
854 * @ha: pointer to host adapter structure.
855 * @reqs_count: .
856 *
857 **/
qla4_8xxx_spurious_interrupt(struct scsi_qla_host * ha,uint8_t reqs_count)858 static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
859 uint8_t reqs_count)
860 {
861 if (reqs_count)
862 return;
863
864 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
865 if (is_qla8022(ha)) {
866 writel(0, &ha->qla4_8xxx_reg->host_int);
867 if (test_bit(AF_INTx_ENABLED, &ha->flags))
868 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
869 0xfbff);
870 }
871 ha->spurious_int_count++;
872 }
873
874 /**
875 * qla4xxx_intr_handler - hardware interrupt handler.
876 * @irq: Unused
877 * @dev_id: Pointer to host adapter structure
878 **/
qla4xxx_intr_handler(int irq,void * dev_id)879 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
880 {
881 struct scsi_qla_host *ha;
882 uint32_t intr_status;
883 unsigned long flags = 0;
884 uint8_t reqs_count = 0;
885
886 ha = (struct scsi_qla_host *) dev_id;
887 if (!ha) {
888 DEBUG2(printk(KERN_INFO
889 "qla4xxx: Interrupt with NULL host ptr\n"));
890 return IRQ_NONE;
891 }
892
893 spin_lock_irqsave(&ha->hardware_lock, flags);
894
895 ha->isr_count++;
896 /*
897 * Repeatedly service interrupts up to a maximum of
898 * MAX_REQS_SERVICED_PER_INTR
899 */
900 while (1) {
901 /*
902 * Read interrupt status
903 */
904 if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
905 ha->response_out)
906 intr_status = CSR_SCSI_COMPLETION_INTR;
907 else
908 intr_status = readl(&ha->reg->ctrl_status);
909
910 if ((intr_status &
911 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
912 if (reqs_count == 0)
913 ha->spurious_int_count++;
914 break;
915 }
916
917 if (intr_status & CSR_FATAL_ERROR) {
918 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
919 "Status 0x%04x\n", ha->host_no,
920 readl(isp_port_error_status (ha))));
921
922 /* Issue Soft Reset to clear this error condition.
923 * This will prevent the RISC from repeatedly
924 * interrupting the driver; thus, allowing the DPC to
925 * get scheduled to continue error recovery.
926 * NOTE: Disabling RISC interrupts does not work in
927 * this case, as CSR_FATAL_ERROR overrides
928 * CSR_SCSI_INTR_ENABLE */
929 if ((readl(&ha->reg->ctrl_status) &
930 CSR_SCSI_RESET_INTR) == 0) {
931 writel(set_rmask(CSR_SOFT_RESET),
932 &ha->reg->ctrl_status);
933 readl(&ha->reg->ctrl_status);
934 }
935
936 writel(set_rmask(CSR_FATAL_ERROR),
937 &ha->reg->ctrl_status);
938 readl(&ha->reg->ctrl_status);
939
940 __qla4xxx_disable_intrs(ha);
941
942 set_bit(DPC_RESET_HA, &ha->dpc_flags);
943
944 break;
945 } else if (intr_status & CSR_SCSI_RESET_INTR) {
946 clear_bit(AF_ONLINE, &ha->flags);
947 __qla4xxx_disable_intrs(ha);
948
949 writel(set_rmask(CSR_SCSI_RESET_INTR),
950 &ha->reg->ctrl_status);
951 readl(&ha->reg->ctrl_status);
952
953 if (!test_bit(AF_HA_REMOVAL, &ha->flags))
954 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
955
956 break;
957 } else if (intr_status & INTR_PENDING) {
958 ha->isp_ops->interrupt_service_routine(ha, intr_status);
959 ha->total_io_count++;
960 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
961 break;
962 }
963 }
964
965 spin_unlock_irqrestore(&ha->hardware_lock, flags);
966
967 return IRQ_HANDLED;
968 }
969
970 /**
971 * qla4_8xxx_intr_handler - hardware interrupt handler.
972 * @irq: Unused
973 * @dev_id: Pointer to host adapter structure
974 **/
qla4_8xxx_intr_handler(int irq,void * dev_id)975 irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
976 {
977 struct scsi_qla_host *ha = dev_id;
978 uint32_t intr_status;
979 uint32_t status;
980 unsigned long flags = 0;
981 uint8_t reqs_count = 0;
982
983 if (unlikely(pci_channel_offline(ha->pdev)))
984 return IRQ_HANDLED;
985
986 ha->isr_count++;
987 status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
988 if (!(status & ha->nx_legacy_intr.int_vec_bit))
989 return IRQ_NONE;
990
991 status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
992 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
993 DEBUG2(ql4_printk(KERN_INFO, ha,
994 "%s legacy Int not triggered\n", __func__));
995 return IRQ_NONE;
996 }
997
998 /* clear the interrupt */
999 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1000
1001 /* read twice to ensure write is flushed */
1002 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
1003 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
1004
1005 spin_lock_irqsave(&ha->hardware_lock, flags);
1006 while (1) {
1007 if (!(readl(&ha->qla4_8xxx_reg->host_int) &
1008 ISRX_82XX_RISC_INT)) {
1009 qla4_8xxx_spurious_interrupt(ha, reqs_count);
1010 break;
1011 }
1012 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
1013 if ((intr_status &
1014 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1015 qla4_8xxx_spurious_interrupt(ha, reqs_count);
1016 break;
1017 }
1018
1019 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1020
1021 /* Enable Interrupt */
1022 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1023
1024 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1025 break;
1026 }
1027
1028 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1029 return IRQ_HANDLED;
1030 }
1031
1032 irqreturn_t
qla4_8xxx_msi_handler(int irq,void * dev_id)1033 qla4_8xxx_msi_handler(int irq, void *dev_id)
1034 {
1035 struct scsi_qla_host *ha;
1036
1037 ha = (struct scsi_qla_host *) dev_id;
1038 if (!ha) {
1039 DEBUG2(printk(KERN_INFO
1040 "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
1041 return IRQ_NONE;
1042 }
1043
1044 ha->isr_count++;
1045 /* clear the interrupt */
1046 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1047
1048 /* read twice to ensure write is flushed */
1049 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
1050 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
1051
1052 return qla4_8xxx_default_intr_handler(irq, dev_id);
1053 }
1054
1055 /**
1056 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1057 * @irq: Unused
1058 * @dev_id: Pointer to host adapter structure
1059 *
1060 * This interrupt handler is called directly for MSI-X, and
1061 * called indirectly for MSI.
1062 **/
1063 irqreturn_t
qla4_8xxx_default_intr_handler(int irq,void * dev_id)1064 qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1065 {
1066 struct scsi_qla_host *ha = dev_id;
1067 unsigned long flags;
1068 uint32_t intr_status;
1069 uint8_t reqs_count = 0;
1070
1071 spin_lock_irqsave(&ha->hardware_lock, flags);
1072 while (1) {
1073 if (!(readl(&ha->qla4_8xxx_reg->host_int) &
1074 ISRX_82XX_RISC_INT)) {
1075 qla4_8xxx_spurious_interrupt(ha, reqs_count);
1076 break;
1077 }
1078
1079 intr_status = readl(&ha->qla4_8xxx_reg->host_status);
1080 if ((intr_status &
1081 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1082 qla4_8xxx_spurious_interrupt(ha, reqs_count);
1083 break;
1084 }
1085
1086 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1087
1088 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1089 break;
1090 }
1091
1092 ha->isr_count++;
1093 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1094 return IRQ_HANDLED;
1095 }
1096
1097 irqreturn_t
qla4_8xxx_msix_rsp_q(int irq,void * dev_id)1098 qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1099 {
1100 struct scsi_qla_host *ha = dev_id;
1101 unsigned long flags;
1102
1103 spin_lock_irqsave(&ha->hardware_lock, flags);
1104 qla4xxx_process_response_queue(ha);
1105 writel(0, &ha->qla4_8xxx_reg->host_int);
1106 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1107
1108 ha->isr_count++;
1109 return IRQ_HANDLED;
1110 }
1111
1112 /**
1113 * qla4xxx_process_aen - processes AENs generated by firmware
1114 * @ha: pointer to host adapter structure.
1115 * @process_aen: type of AENs to process
1116 *
1117 * Processes specific types of Asynchronous Events generated by firmware.
1118 * The type of AENs to process is specified by process_aen and can be
1119 * PROCESS_ALL_AENS 0
1120 * FLUSH_DDB_CHANGED_AENS 1
1121 * RELOGIN_DDB_CHANGED_AENS 2
1122 **/
qla4xxx_process_aen(struct scsi_qla_host * ha,uint8_t process_aen)1123 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1124 {
1125 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
1126 struct aen *aen;
1127 int i;
1128 unsigned long flags;
1129
1130 spin_lock_irqsave(&ha->hardware_lock, flags);
1131 while (ha->aen_out != ha->aen_in) {
1132 aen = &ha->aen_q[ha->aen_out];
1133 /* copy aen information to local structure */
1134 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
1135 mbox_sts[i] = aen->mbox_sts[i];
1136
1137 ha->aen_q_count++;
1138 ha->aen_out++;
1139
1140 if (ha->aen_out == MAX_AEN_ENTRIES)
1141 ha->aen_out = 0;
1142
1143 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1144
1145 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
1146 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
1147 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
1148 mbox_sts[0], mbox_sts[1], mbox_sts[2],
1149 mbox_sts[3], mbox_sts[4]));
1150
1151 switch (mbox_sts[0]) {
1152 case MBOX_ASTS_DATABASE_CHANGED:
1153 switch (process_aen) {
1154 case FLUSH_DDB_CHANGED_AENS:
1155 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
1156 "[%d] state=%04x FLUSHED!\n",
1157 ha->host_no, ha->aen_out,
1158 mbox_sts[0], mbox_sts[2],
1159 mbox_sts[3]));
1160 break;
1161 case PROCESS_ALL_AENS:
1162 default:
1163 /* Specific device. */
1164 if (mbox_sts[1] == 1)
1165 qla4xxx_process_ddb_changed(ha,
1166 mbox_sts[2], mbox_sts[3],
1167 mbox_sts[4]);
1168 break;
1169 }
1170 }
1171 spin_lock_irqsave(&ha->hardware_lock, flags);
1172 }
1173 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1174 }
1175
qla4xxx_request_irqs(struct scsi_qla_host * ha)1176 int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1177 {
1178 int ret;
1179
1180 if (!is_qla8022(ha))
1181 goto try_intx;
1182
1183 if (ql4xenablemsix == 2)
1184 goto try_msi;
1185
1186 if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1187 goto try_intx;
1188
1189 /* Trying MSI-X */
1190 ret = qla4_8xxx_enable_msix(ha);
1191 if (!ret) {
1192 DEBUG2(ql4_printk(KERN_INFO, ha,
1193 "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1194 goto irq_attached;
1195 }
1196
1197 ql4_printk(KERN_WARNING, ha,
1198 "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1199
1200 try_msi:
1201 /* Trying MSI */
1202 ret = pci_enable_msi(ha->pdev);
1203 if (!ret) {
1204 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1205 0, DRIVER_NAME, ha);
1206 if (!ret) {
1207 DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1208 set_bit(AF_MSI_ENABLED, &ha->flags);
1209 goto irq_attached;
1210 } else {
1211 ql4_printk(KERN_WARNING, ha,
1212 "MSI: Failed to reserve interrupt %d "
1213 "already in use.\n", ha->pdev->irq);
1214 pci_disable_msi(ha->pdev);
1215 }
1216 }
1217 ql4_printk(KERN_WARNING, ha,
1218 "MSI: Falling back-to INTx mode -- %d.\n", ret);
1219
1220 try_intx:
1221 /* Trying INTx */
1222 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1223 IRQF_SHARED, DRIVER_NAME, ha);
1224 if (!ret) {
1225 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1226 set_bit(AF_INTx_ENABLED, &ha->flags);
1227 goto irq_attached;
1228
1229 } else {
1230 ql4_printk(KERN_WARNING, ha,
1231 "INTx: Failed to reserve interrupt %d already in"
1232 " use.\n", ha->pdev->irq);
1233 return ret;
1234 }
1235
1236 irq_attached:
1237 set_bit(AF_IRQ_ATTACHED, &ha->flags);
1238 ha->host->irq = ha->pdev->irq;
1239 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1240 __func__, ha->pdev->irq);
1241 return ret;
1242 }
1243
qla4xxx_free_irqs(struct scsi_qla_host * ha)1244 void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1245 {
1246 if (test_bit(AF_MSIX_ENABLED, &ha->flags))
1247 qla4_8xxx_disable_msix(ha);
1248 else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1249 free_irq(ha->pdev->irq, ha);
1250 pci_disable_msi(ha->pdev);
1251 } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
1252 free_irq(ha->pdev->irq, ha);
1253 }
1254