1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc. All rights reserved.
3
4 #include <linux/mempool.h>
5 #include <linux/errno.h>
6 #include <linux/init.h>
7 #include <linux/workqueue.h>
8 #include <linux/pci.h>
9 #include <linux/spinlock.h>
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
12 #include <scsi/scsi.h>
13 #include <scsi/scsi_host.h>
14 #include <scsi/scsi_device.h>
15 #include <scsi/scsi_cmnd.h>
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsi_dbg.h>
18
19 #include "snic_io.h"
20 #include "snic.h"
21
22 #define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag)
23
24 const char *snic_state_str[] = {
25 [SNIC_INIT] = "SNIC_INIT",
26 [SNIC_ERROR] = "SNIC_ERROR",
27 [SNIC_ONLINE] = "SNIC_ONLINE",
28 [SNIC_OFFLINE] = "SNIC_OFFLINE",
29 [SNIC_FWRESET] = "SNIC_FWRESET",
30 };
31
32 static const char * const snic_req_state_str[] = {
33 [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
34 [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
35 [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
36 [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE",
37 [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
38 [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE",
39 [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPLETE",
40 };
41
42 /* snic cmd status strings */
43 static const char * const snic_io_status_str[] = {
44 [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */
45 [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
46 [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES",
47 [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
48 [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP",
49 [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
50 [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED",
51 [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT",
52 [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
53 [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
54 [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR",
55 [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
56 [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL",
57 [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
58 [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
59 [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
60 [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN",
61 [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR",
62 [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY",
63 [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR",
64 };
65
66 static void snic_scsi_cleanup(struct snic *, int);
67
68 const char *
snic_state_to_str(unsigned int state)69 snic_state_to_str(unsigned int state)
70 {
71 if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
72 return "Unknown";
73
74 return snic_state_str[state];
75 }
76
77 static const char *
snic_io_status_to_str(unsigned int state)78 snic_io_status_to_str(unsigned int state)
79 {
80 if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
81 (!snic_io_status_str[state]))
82 return "Unknown";
83
84 return snic_io_status_str[state];
85 }
86
87 static const char *
snic_ioreq_state_to_str(unsigned int state)88 snic_ioreq_state_to_str(unsigned int state)
89 {
90 if (state >= ARRAY_SIZE(snic_req_state_str) ||
91 !snic_req_state_str[state])
92 return "Unknown";
93
94 return snic_req_state_str[state];
95 }
96
97 static inline spinlock_t *
snic_io_lock_hash(struct snic * snic,struct scsi_cmnd * sc)98 snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
99 {
100 u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
101
102 return &snic->io_req_lock[hash];
103 }
104
105 static inline spinlock_t *
snic_io_lock_tag(struct snic * snic,int tag)106 snic_io_lock_tag(struct snic *snic, int tag)
107 {
108 return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
109 }
110
111 /* snic_release_req_buf : Releases snic_req_info */
112 static void
snic_release_req_buf(struct snic * snic,struct snic_req_info * rqi,struct scsi_cmnd * sc)113 snic_release_req_buf(struct snic *snic,
114 struct snic_req_info *rqi,
115 struct scsi_cmnd *sc)
116 {
117 struct snic_host_req *req = rqi_to_req(rqi);
118
119 /* Freeing cmd without marking completion, not okay */
120 SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
121 (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
122 (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
123 (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
124 (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
125 (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
126 (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
127
128 SNIC_SCSI_DBG(snic->shost,
129 "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
130 sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
131 rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
132 CMD_FLAGS(sc));
133
134 if (req->u.icmnd.sense_addr)
135 dma_unmap_single(&snic->pdev->dev,
136 le64_to_cpu(req->u.icmnd.sense_addr),
137 SCSI_SENSE_BUFFERSIZE,
138 DMA_FROM_DEVICE);
139
140 scsi_dma_unmap(sc);
141
142 snic_req_free(snic, rqi);
143 } /* end of snic_release_req_buf */
144
145 /*
146 * snic_queue_icmnd_req : Queues snic_icmnd request
147 */
148 static int
snic_queue_icmnd_req(struct snic * snic,struct snic_req_info * rqi,struct scsi_cmnd * sc,int sg_cnt)149 snic_queue_icmnd_req(struct snic *snic,
150 struct snic_req_info *rqi,
151 struct scsi_cmnd *sc,
152 int sg_cnt)
153 {
154 struct scatterlist *sg;
155 struct snic_sg_desc *sgd;
156 dma_addr_t pa = 0;
157 struct scsi_lun lun;
158 u16 flags = 0;
159 int ret = 0;
160 unsigned int i;
161
162 if (sg_cnt) {
163 flags = SNIC_ICMND_ESGL;
164 sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
165
166 for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
167 sgd->addr = cpu_to_le64(sg_dma_address(sg));
168 sgd->len = cpu_to_le32(sg_dma_len(sg));
169 sgd->_resvd = 0;
170 sgd++;
171 }
172 }
173
174 pa = dma_map_single(&snic->pdev->dev,
175 sc->sense_buffer,
176 SCSI_SENSE_BUFFERSIZE,
177 DMA_FROM_DEVICE);
178 if (dma_mapping_error(&snic->pdev->dev, pa)) {
179 SNIC_HOST_ERR(snic->shost,
180 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
181 sc->sense_buffer, snic_cmd_tag(sc));
182 ret = -ENOMEM;
183
184 return ret;
185 }
186
187 int_to_scsilun(sc->device->lun, &lun);
188 if (sc->sc_data_direction == DMA_FROM_DEVICE)
189 flags |= SNIC_ICMND_RD;
190 if (sc->sc_data_direction == DMA_TO_DEVICE)
191 flags |= SNIC_ICMND_WR;
192
193 /* Initialize icmnd */
194 snic_icmnd_init(rqi->req,
195 snic_cmd_tag(sc),
196 snic->config.hid, /* hid */
197 (ulong) rqi,
198 flags, /* command flags */
199 rqi->tgt_id,
200 lun.scsi_lun,
201 sc->cmnd,
202 sc->cmd_len,
203 scsi_bufflen(sc),
204 sg_cnt,
205 (ulong) req_to_sgl(rqi->req),
206 pa, /* sense buffer pa */
207 SCSI_SENSE_BUFFERSIZE);
208
209 atomic64_inc(&snic->s_stats.io.active);
210 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
211 if (ret) {
212 atomic64_dec(&snic->s_stats.io.active);
213 SNIC_HOST_ERR(snic->shost,
214 "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
215 ret);
216 } else
217 snic_stats_update_active_ios(&snic->s_stats);
218
219 return ret;
220 } /* end of snic_queue_icmnd_req */
221
222 /*
223 * snic_issue_scsi_req : Prepares IO request and Issues to FW.
224 */
225 static int
snic_issue_scsi_req(struct snic * snic,struct snic_tgt * tgt,struct scsi_cmnd * sc)226 snic_issue_scsi_req(struct snic *snic,
227 struct snic_tgt *tgt,
228 struct scsi_cmnd *sc)
229 {
230 struct snic_req_info *rqi = NULL;
231 int sg_cnt = 0;
232 int ret = 0;
233 u32 tag = snic_cmd_tag(sc);
234 u64 cmd_trc = 0, cmd_st_flags = 0;
235 spinlock_t *io_lock = NULL;
236 unsigned long flags;
237
238 CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
239 CMD_FLAGS(sc) = SNIC_NO_FLAGS;
240 sg_cnt = scsi_dma_map(sc);
241 if (sg_cnt < 0) {
242 SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
243 sc->cmnd[0], sg_cnt, CMD_STATE(sc));
244
245 SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
246 ret = -ENOMEM;
247
248 goto issue_sc_end;
249 }
250
251 rqi = snic_req_init(snic, sg_cnt);
252 if (!rqi) {
253 scsi_dma_unmap(sc);
254 ret = -ENOMEM;
255
256 goto issue_sc_end;
257 }
258
259 rqi->tgt_id = tgt->id;
260 rqi->sc = sc;
261
262 CMD_STATE(sc) = SNIC_IOREQ_PENDING;
263 CMD_SP(sc) = (char *) rqi;
264 cmd_trc = SNIC_TRC_CMD(sc);
265 CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
266 cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
267 io_lock = snic_io_lock_hash(snic, sc);
268
269 /* create wq desc and enqueue it */
270 ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
271 if (ret) {
272 SNIC_HOST_ERR(snic->shost,
273 "issue_sc: icmnd qing Failed for sc %p, err %d\n",
274 sc, ret);
275
276 spin_lock_irqsave(io_lock, flags);
277 rqi = (struct snic_req_info *) CMD_SP(sc);
278 CMD_SP(sc) = NULL;
279 CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
280 CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */
281 spin_unlock_irqrestore(io_lock, flags);
282
283 if (rqi)
284 snic_release_req_buf(snic, rqi, sc);
285
286 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
287 SNIC_TRC_CMD_STATE_FLAGS(sc));
288 } else {
289 u32 io_sz = scsi_bufflen(sc) >> 9;
290 u32 qtime = jiffies - rqi->start_time;
291 struct snic_io_stats *iostats = &snic->s_stats.io;
292
293 if (io_sz > atomic64_read(&iostats->max_io_sz))
294 atomic64_set(&iostats->max_io_sz, io_sz);
295
296 if (qtime > atomic64_read(&iostats->max_qtime))
297 atomic64_set(&iostats->max_qtime, qtime);
298
299 SNIC_SCSI_DBG(snic->shost,
300 "issue_sc:sc %p, tag %d queued to WQ.\n",
301 sc, tag);
302
303 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
304 sg_cnt, cmd_trc, cmd_st_flags);
305 }
306
307 issue_sc_end:
308
309 return ret;
310 } /* end of snic_issue_scsi_req */
311
312
313 /*
314 * snic_queuecommand
315 * Routine to send a scsi cdb to LLD
316 * Called with host_lock held and interrupts disabled
317 */
318 int
snic_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * sc)319 snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
320 {
321 struct snic_tgt *tgt = NULL;
322 struct snic *snic = shost_priv(shost);
323 int ret;
324
325 tgt = starget_to_tgt(scsi_target(sc->device));
326 ret = snic_tgt_chkready(tgt);
327 if (ret) {
328 SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
329 atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
330 sc->result = ret;
331 scsi_done(sc);
332
333 return 0;
334 }
335
336 if (snic_get_state(snic) != SNIC_ONLINE) {
337 SNIC_HOST_ERR(shost, "snic state is %s\n",
338 snic_state_str[snic_get_state(snic)]);
339
340 return SCSI_MLQUEUE_HOST_BUSY;
341 }
342 atomic_inc(&snic->ios_inflight);
343
344 SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
345 sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
346
347 ret = snic_issue_scsi_req(snic, tgt, sc);
348 if (ret) {
349 SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
350 ret = SCSI_MLQUEUE_HOST_BUSY;
351 }
352
353 atomic_dec(&snic->ios_inflight);
354
355 return ret;
356 } /* end of snic_queuecommand */
357
358 /*
359 * snic_process_abts_pending_state:
360 * caller should hold IO lock
361 */
362 static void
snic_proc_tmreq_pending_state(struct snic * snic,struct scsi_cmnd * sc,u8 cmpl_status)363 snic_proc_tmreq_pending_state(struct snic *snic,
364 struct scsi_cmnd *sc,
365 u8 cmpl_status)
366 {
367 int state = CMD_STATE(sc);
368
369 if (state == SNIC_IOREQ_ABTS_PENDING)
370 CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
371 else if (state == SNIC_IOREQ_LR_PENDING)
372 CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
373 else
374 SNIC_BUG_ON(1);
375
376 switch (cmpl_status) {
377 case SNIC_STAT_IO_SUCCESS:
378 CMD_FLAGS(sc) |= SNIC_IO_DONE;
379 break;
380
381 case SNIC_STAT_ABORTED:
382 CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
383 break;
384
385 default:
386 SNIC_BUG_ON(1);
387 }
388 }
389
390 /*
391 * snic_process_io_failed_state:
392 * Processes IO's error states
393 */
394 static void
snic_process_io_failed_state(struct snic * snic,struct snic_icmnd_cmpl * icmnd_cmpl,struct scsi_cmnd * sc,u8 cmpl_stat)395 snic_process_io_failed_state(struct snic *snic,
396 struct snic_icmnd_cmpl *icmnd_cmpl,
397 struct scsi_cmnd *sc,
398 u8 cmpl_stat)
399 {
400 int res = 0;
401
402 switch (cmpl_stat) {
403 case SNIC_STAT_TIMEOUT: /* Req was timedout */
404 atomic64_inc(&snic->s_stats.misc.io_tmo);
405 res = DID_TIME_OUT;
406 break;
407
408 case SNIC_STAT_ABORTED: /* Req was aborted */
409 atomic64_inc(&snic->s_stats.misc.io_aborted);
410 res = DID_ABORT;
411 break;
412
413 case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
414 atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
415 scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
416 res = DID_ERROR;
417 break;
418
419 case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
420 atomic64_inc(&snic->s_stats.fw.out_of_res);
421 res = DID_REQUEUE;
422 break;
423
424 case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */
425 atomic64_inc(&snic->s_stats.io.io_not_found);
426 res = DID_ERROR;
427 break;
428
429 case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/
430 atomic64_inc(&snic->s_stats.misc.sgl_inval);
431 res = DID_ERROR;
432 break;
433
434 case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */
435 atomic64_inc(&snic->s_stats.fw.io_errs);
436 res = DID_ERROR;
437 break;
438
439 case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */
440 atomic64_inc(&snic->s_stats.fw.scsi_errs);
441 break;
442
443 case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
444 case SNIC_STAT_DEV_OFFLINE: /* Device offline */
445 res = DID_NO_CONNECT;
446 break;
447
448 case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */
449 case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */
450 case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */
451 case SNIC_STAT_CMND_REJECT: /* Req rejected */
452 case SNIC_STAT_FATAL_ERROR: /* XPT Error */
453 default:
454 SNIC_SCSI_DBG(snic->shost,
455 "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
456 res = DID_ERROR;
457 break;
458 }
459
460 SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
461 snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
462
463 /* Set sc->result */
464 sc->result = (res << 16) | icmnd_cmpl->scsi_status;
465 } /* end of snic_process_io_failed_state */
466
467 /*
468 * snic_tmreq_pending : is task management in progress.
469 */
470 static int
snic_tmreq_pending(struct scsi_cmnd * sc)471 snic_tmreq_pending(struct scsi_cmnd *sc)
472 {
473 int state = CMD_STATE(sc);
474
475 return ((state == SNIC_IOREQ_ABTS_PENDING) ||
476 (state == SNIC_IOREQ_LR_PENDING));
477 }
478
479 /*
480 * snic_process_icmnd_cmpl_status:
481 * Caller should hold io_lock
482 */
483 static int
snic_process_icmnd_cmpl_status(struct snic * snic,struct snic_icmnd_cmpl * icmnd_cmpl,u8 cmpl_stat,struct scsi_cmnd * sc)484 snic_process_icmnd_cmpl_status(struct snic *snic,
485 struct snic_icmnd_cmpl *icmnd_cmpl,
486 u8 cmpl_stat,
487 struct scsi_cmnd *sc)
488 {
489 u8 scsi_stat = icmnd_cmpl->scsi_status;
490 u64 xfer_len = 0;
491 int ret = 0;
492
493 /* Mark the IO as complete */
494 CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
495
496 if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
497 sc->result = (DID_OK << 16) | scsi_stat;
498
499 xfer_len = scsi_bufflen(sc);
500
501 /* Update SCSI Cmd with resid value */
502 scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
503
504 if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
505 xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
506 atomic64_inc(&snic->s_stats.misc.io_under_run);
507 }
508
509 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
510 atomic64_inc(&snic->s_stats.misc.qfull);
511
512 ret = 0;
513 } else {
514 snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
515 atomic64_inc(&snic->s_stats.io.fail);
516 SNIC_HOST_ERR(snic->shost,
517 "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
518 snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
519 ret = 1;
520 }
521
522 return ret;
523 } /* end of snic_process_icmnd_cmpl_status */
524
525
526 /*
527 * snic_icmnd_cmpl_handler
528 * Routine to handle icmnd completions
529 */
530 static void
snic_icmnd_cmpl_handler(struct snic * snic,struct snic_fw_req * fwreq)531 snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
532 {
533 u8 typ, hdr_stat;
534 u32 cmnd_id, hid;
535 ulong ctx;
536 struct scsi_cmnd *sc = NULL;
537 struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
538 struct snic_host_req *req = NULL;
539 struct snic_req_info *rqi = NULL;
540 unsigned long flags, start_time;
541 spinlock_t *io_lock;
542 u8 sc_stat = 0;
543
544 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
545 icmnd_cmpl = &fwreq->u.icmnd_cmpl;
546 sc_stat = icmnd_cmpl->scsi_status;
547
548 SNIC_SCSI_DBG(snic->shost,
549 "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
550 typ, hdr_stat, cmnd_id, hid, ctx);
551
552 if (cmnd_id >= snic->max_tag_id) {
553 SNIC_HOST_ERR(snic->shost,
554 "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
555 cmnd_id, snic_io_status_to_str(hdr_stat));
556 return;
557 }
558
559 sc = scsi_host_find_tag(snic->shost, cmnd_id);
560 WARN_ON_ONCE(!sc);
561
562 if (!sc) {
563 atomic64_inc(&snic->s_stats.io.sc_null);
564 SNIC_HOST_ERR(snic->shost,
565 "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
566 snic_io_status_to_str(hdr_stat),
567 cmnd_id,
568 fwreq);
569
570 SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
571 ((u64)hdr_stat << 16 |
572 (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
573 (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
574
575 return;
576 }
577
578 io_lock = snic_io_lock_hash(snic, sc);
579
580 spin_lock_irqsave(io_lock, flags);
581 rqi = (struct snic_req_info *) CMD_SP(sc);
582 SNIC_SCSI_DBG(snic->shost,
583 "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
584 sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
585 CMD_FLAGS(sc), rqi);
586
587 if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
588 spin_unlock_irqrestore(io_lock, flags);
589
590 return;
591 }
592
593 SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
594 WARN_ON_ONCE(req);
595 if (!rqi) {
596 atomic64_inc(&snic->s_stats.io.req_null);
597 CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
598 spin_unlock_irqrestore(io_lock, flags);
599
600 SNIC_HOST_ERR(snic->shost,
601 "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
602 snic_io_status_to_str(hdr_stat),
603 cmnd_id, sc, CMD_FLAGS(sc));
604 return;
605 }
606
607 rqi = (struct snic_req_info *) ctx;
608 start_time = rqi->start_time;
609
610 /* firmware completed the io */
611 rqi->io_cmpl = 1;
612
613 /*
614 * if SCSI-ML has already issued abort on this command,
615 * ignore completion of the IO. The abts path will clean it up
616 */
617 if (unlikely(snic_tmreq_pending(sc))) {
618 snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
619 spin_unlock_irqrestore(io_lock, flags);
620
621 snic_stats_update_io_cmpl(&snic->s_stats);
622
623 /* Expected value is SNIC_STAT_ABORTED */
624 if (likely(hdr_stat == SNIC_STAT_ABORTED))
625 return;
626
627 SNIC_SCSI_DBG(snic->shost,
628 "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
629 snic_ioreq_state_to_str(CMD_STATE(sc)),
630 snic_io_status_to_str(hdr_stat),
631 sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
632 CMD_FLAGS(sc));
633
634 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
635 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
636 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
637
638 return;
639 }
640
641 if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
642 scsi_print_command(sc);
643 SNIC_HOST_ERR(snic->shost,
644 "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
645 sc, sc->cmnd[0], cmnd_id,
646 snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
647 }
648
649 /* Break link with the SCSI Command */
650 CMD_SP(sc) = NULL;
651 CMD_FLAGS(sc) |= SNIC_IO_DONE;
652
653 spin_unlock_irqrestore(io_lock, flags);
654
655 /* For now, consider only successful IO. */
656 snic_calc_io_process_time(snic, rqi);
657
658 snic_release_req_buf(snic, rqi, sc);
659
660 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
661 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
662 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
663
664
665 scsi_done(sc);
666
667 snic_stats_update_io_cmpl(&snic->s_stats);
668 } /* end of snic_icmnd_cmpl_handler */
669
670 static void
snic_proc_dr_cmpl_locked(struct snic * snic,struct snic_fw_req * fwreq,u8 cmpl_stat,u32 cmnd_id,struct scsi_cmnd * sc)671 snic_proc_dr_cmpl_locked(struct snic *snic,
672 struct snic_fw_req *fwreq,
673 u8 cmpl_stat,
674 u32 cmnd_id,
675 struct scsi_cmnd *sc)
676 {
677 struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
678 u32 start_time = rqi->start_time;
679
680 CMD_LR_STATUS(sc) = cmpl_stat;
681
682 SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
683 snic_ioreq_state_to_str(CMD_STATE(sc)));
684
685 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
686 CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
687
688 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
689 jiffies_to_msecs(jiffies - start_time),
690 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
691
692 SNIC_SCSI_DBG(snic->shost,
693 "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
694 (int)(cmnd_id & SNIC_TAG_MASK),
695 snic_io_status_to_str(cmpl_stat),
696 CMD_FLAGS(sc));
697
698 return;
699 }
700
701
702 if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
703 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
704 jiffies_to_msecs(jiffies - start_time),
705 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
706
707 SNIC_SCSI_DBG(snic->shost,
708 "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
709 (int)(cmnd_id & SNIC_TAG_MASK),
710 snic_io_status_to_str(cmpl_stat),
711 CMD_FLAGS(sc));
712
713 return;
714 }
715
716 CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
717 CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
718
719 SNIC_SCSI_DBG(snic->shost,
720 "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
721 (int)(cmnd_id & SNIC_TAG_MASK),
722 snic_io_status_to_str(cmpl_stat),
723 CMD_FLAGS(sc));
724
725 if (rqi->dr_done)
726 complete(rqi->dr_done);
727 } /* end of snic_proc_dr_cmpl_locked */
728
729 /*
730 * snic_update_abort_stats : Updates abort stats based on completion status.
731 */
732 static void
snic_update_abort_stats(struct snic * snic,u8 cmpl_stat)733 snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
734 {
735 struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
736
737 SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
738
739 switch (cmpl_stat) {
740 case SNIC_STAT_IO_SUCCESS:
741 break;
742
743 case SNIC_STAT_TIMEOUT:
744 atomic64_inc(&abt_stats->fw_tmo);
745 break;
746
747 case SNIC_STAT_IO_NOT_FOUND:
748 atomic64_inc(&abt_stats->io_not_found);
749 break;
750
751 default:
752 atomic64_inc(&abt_stats->fail);
753 break;
754 }
755 }
756
757 static int
snic_process_itmf_cmpl(struct snic * snic,struct snic_fw_req * fwreq,u32 cmnd_id,u8 cmpl_stat,struct scsi_cmnd * sc)758 snic_process_itmf_cmpl(struct snic *snic,
759 struct snic_fw_req *fwreq,
760 u32 cmnd_id,
761 u8 cmpl_stat,
762 struct scsi_cmnd *sc)
763 {
764 struct snic_req_info *rqi = NULL;
765 u32 tm_tags = 0;
766 spinlock_t *io_lock = NULL;
767 unsigned long flags;
768 u32 start_time = 0;
769 int ret = 0;
770
771 io_lock = snic_io_lock_hash(snic, sc);
772 spin_lock_irqsave(io_lock, flags);
773 if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
774 spin_unlock_irqrestore(io_lock, flags);
775
776 return ret;
777 }
778 rqi = (struct snic_req_info *) CMD_SP(sc);
779 WARN_ON_ONCE(!rqi);
780
781 if (!rqi) {
782 atomic64_inc(&snic->s_stats.io.req_null);
783 spin_unlock_irqrestore(io_lock, flags);
784 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
785 SNIC_HOST_ERR(snic->shost,
786 "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
787 snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
788 CMD_FLAGS(sc));
789
790 return ret;
791 }
792
793 /* Extract task management flags */
794 tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
795
796 start_time = rqi->start_time;
797 cmnd_id &= (SNIC_TAG_MASK);
798
799 switch (tm_tags) {
800 case SNIC_TAG_ABORT:
801 /* Abort only issued on cmd */
802 snic_update_abort_stats(snic, cmpl_stat);
803
804 if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
805 /* This is a late completion. Ignore it. */
806 ret = -1;
807 spin_unlock_irqrestore(io_lock, flags);
808 break;
809 }
810
811 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
812 CMD_ABTS_STATUS(sc) = cmpl_stat;
813 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
814
815 SNIC_SCSI_DBG(snic->shost,
816 "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
817 cmnd_id,
818 snic_io_status_to_str(cmpl_stat),
819 CMD_FLAGS(sc));
820
821 /*
822 * If scsi_eh thread is blocked waiting for abts complete,
823 * signal completion to it. IO will be cleaned in the thread,
824 * else clean it in this context.
825 */
826 if (rqi->abts_done) {
827 complete(rqi->abts_done);
828 spin_unlock_irqrestore(io_lock, flags);
829
830 break; /* jump out */
831 }
832
833 CMD_SP(sc) = NULL;
834 sc->result = (DID_ERROR << 16);
835 SNIC_SCSI_DBG(snic->shost,
836 "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
837 sc, CMD_FLAGS(sc));
838
839 spin_unlock_irqrestore(io_lock, flags);
840
841 snic_release_req_buf(snic, rqi, sc);
842
843 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
844 jiffies_to_msecs(jiffies - start_time),
845 (ulong) fwreq, SNIC_TRC_CMD(sc),
846 SNIC_TRC_CMD_STATE_FLAGS(sc));
847
848 scsi_done(sc);
849
850 break;
851
852 case SNIC_TAG_DEV_RST:
853 case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
854 snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
855 spin_unlock_irqrestore(io_lock, flags);
856 ret = 0;
857
858 break;
859
860 case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
861 /* Abort and terminate completion of device reset req */
862
863 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
864 CMD_ABTS_STATUS(sc) = cmpl_stat;
865 CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
866
867 SNIC_SCSI_DBG(snic->shost,
868 "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
869 cmnd_id, snic_io_status_to_str(cmpl_stat),
870 CMD_FLAGS(sc));
871
872 if (rqi->abts_done)
873 complete(rqi->abts_done);
874
875 spin_unlock_irqrestore(io_lock, flags);
876
877 break;
878
879 default:
880 spin_unlock_irqrestore(io_lock, flags);
881 SNIC_HOST_ERR(snic->shost,
882 "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
883
884 SNIC_HOST_ERR(snic->shost,
885 "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
886 snic_ioreq_state_to_str(CMD_STATE(sc)),
887 cmnd_id,
888 CMD_FLAGS(sc));
889 ret = -1;
890 SNIC_BUG_ON(1);
891
892 break;
893 }
894
895 return ret;
896 } /* end of snic_process_itmf_cmpl_status */
897
898 /*
899 * snic_itmf_cmpl_handler.
900 * Routine to handle itmf completions.
901 */
902 static void
snic_itmf_cmpl_handler(struct snic * snic,struct snic_fw_req * fwreq)903 snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
904 {
905 struct scsi_cmnd *sc = NULL;
906 struct snic_req_info *rqi = NULL;
907 struct snic_itmf_cmpl *itmf_cmpl = NULL;
908 ulong ctx;
909 u32 cmnd_id;
910 u32 hid;
911 u8 typ;
912 u8 hdr_stat;
913
914 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
915 SNIC_SCSI_DBG(snic->shost,
916 "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
917 __func__, typ, hdr_stat, cmnd_id, hid, ctx);
918
919 itmf_cmpl = &fwreq->u.itmf_cmpl;
920 SNIC_SCSI_DBG(snic->shost,
921 "Itmf_cmpl: nterm %u , flags 0x%x\n",
922 le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
923
924 /* spl case, dev reset issued through ioctl */
925 if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
926 rqi = (struct snic_req_info *) ctx;
927 sc = rqi->sc;
928
929 goto ioctl_dev_rst;
930 }
931
932 if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
933 SNIC_HOST_ERR(snic->shost,
934 "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
935 cmnd_id, snic_io_status_to_str(hdr_stat));
936 SNIC_BUG_ON(1);
937
938 return;
939 }
940
941 sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
942 WARN_ON_ONCE(!sc);
943
944 ioctl_dev_rst:
945 if (!sc) {
946 atomic64_inc(&snic->s_stats.io.sc_null);
947 SNIC_HOST_ERR(snic->shost,
948 "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
949 snic_io_status_to_str(hdr_stat), cmnd_id);
950
951 return;
952 }
953
954 snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
955 } /* end of snic_itmf_cmpl_handler */
956
957
958
959 static void
snic_hba_reset_scsi_cleanup(struct snic * snic,struct scsi_cmnd * sc)960 snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
961 {
962 struct snic_stats *st = &snic->s_stats;
963 long act_ios = 0, act_fwreqs = 0;
964
965 SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
966 snic_scsi_cleanup(snic, snic_cmd_tag(sc));
967
968 /* Update stats on pending IOs */
969 act_ios = atomic64_read(&st->io.active);
970 atomic64_add(act_ios, &st->io.compl);
971 atomic64_sub(act_ios, &st->io.active);
972
973 act_fwreqs = atomic64_read(&st->fw.actv_reqs);
974 atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
975 }
976
977 /*
978 * snic_hba_reset_cmpl_handler :
979 *
980 * Notes :
981 * 1. Cleanup all the scsi cmds, release all snic specific cmds
982 * 2. Issue Report Targets in case of SAN targets
983 */
984 static int
snic_hba_reset_cmpl_handler(struct snic * snic,struct snic_fw_req * fwreq)985 snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
986 {
987 ulong ctx;
988 u32 cmnd_id;
989 u32 hid;
990 u8 typ;
991 u8 hdr_stat;
992 struct scsi_cmnd *sc = NULL;
993 struct snic_req_info *rqi = NULL;
994 spinlock_t *io_lock = NULL;
995 unsigned long flags, gflags;
996 int ret = 0;
997
998 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
999 SNIC_HOST_INFO(snic->shost,
1000 "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n",
1001 cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
1002
1003 SNIC_SCSI_DBG(snic->shost,
1004 "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1005 typ, hdr_stat, cmnd_id, hid, ctx);
1006
1007 /* spl case, host reset issued through ioctl */
1008 if (cmnd_id == SCSI_NO_TAG) {
1009 rqi = (struct snic_req_info *) ctx;
1010 SNIC_HOST_INFO(snic->shost,
1011 "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n",
1012 cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
1013 sc = rqi->sc;
1014
1015 goto ioctl_hba_rst;
1016 }
1017
1018 if (cmnd_id >= snic->max_tag_id) {
1019 SNIC_HOST_ERR(snic->shost,
1020 "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
1021 cmnd_id, snic_io_status_to_str(hdr_stat));
1022 SNIC_BUG_ON(1);
1023
1024 return 1;
1025 }
1026
1027 sc = scsi_host_find_tag(snic->shost, cmnd_id);
1028 ioctl_hba_rst:
1029 if (!sc) {
1030 atomic64_inc(&snic->s_stats.io.sc_null);
1031 SNIC_HOST_ERR(snic->shost,
1032 "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
1033 snic_io_status_to_str(hdr_stat), cmnd_id);
1034 ret = 1;
1035
1036 return ret;
1037 }
1038
1039 SNIC_HOST_INFO(snic->shost,
1040 "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n",
1041 sc, rqi, cmnd_id, CMD_FLAGS(sc));
1042
1043 io_lock = snic_io_lock_hash(snic, sc);
1044 spin_lock_irqsave(io_lock, flags);
1045
1046 if (!snic->remove_wait) {
1047 spin_unlock_irqrestore(io_lock, flags);
1048 SNIC_HOST_ERR(snic->shost,
1049 "reset_cmpl:host reset completed after timeout\n");
1050 ret = 1;
1051
1052 return ret;
1053 }
1054
1055 rqi = (struct snic_req_info *) CMD_SP(sc);
1056 WARN_ON_ONCE(!rqi);
1057
1058 if (!rqi) {
1059 atomic64_inc(&snic->s_stats.io.req_null);
1060 spin_unlock_irqrestore(io_lock, flags);
1061 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1062 SNIC_HOST_ERR(snic->shost,
1063 "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
1064 snic_io_status_to_str(hdr_stat), cmnd_id, sc,
1065 CMD_FLAGS(sc));
1066
1067 ret = 1;
1068
1069 return ret;
1070 }
1071 /* stats */
1072 spin_unlock_irqrestore(io_lock, flags);
1073
1074 /* scsi cleanup */
1075 snic_hba_reset_scsi_cleanup(snic, sc);
1076
1077 SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
1078 snic_get_state(snic) != SNIC_FWRESET);
1079
1080 /* Careful locking between snic_lock and io lock */
1081 spin_lock_irqsave(io_lock, flags);
1082 spin_lock_irqsave(&snic->snic_lock, gflags);
1083 if (snic_get_state(snic) == SNIC_FWRESET)
1084 snic_set_state(snic, SNIC_ONLINE);
1085 spin_unlock_irqrestore(&snic->snic_lock, gflags);
1086
1087 if (snic->remove_wait)
1088 complete(snic->remove_wait);
1089
1090 spin_unlock_irqrestore(io_lock, flags);
1091 atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
1092
1093 ret = 0;
1094 /* Rediscovery is for SAN */
1095 if (snic->config.xpt_type == SNIC_DAS)
1096 return ret;
1097
1098 SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
1099 queue_work(snic_glob->event_q, &snic->disc_work);
1100
1101 return ret;
1102 }
1103
1104 static void
snic_msg_ack_handler(struct snic * snic,struct snic_fw_req * fwreq)1105 snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
1106 {
1107 SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
1108
1109 SNIC_ASSERT_NOT_IMPL(1);
1110 }
1111
1112 static void
snic_aen_handler(struct snic * snic,struct snic_fw_req * fwreq)1113 snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
1114 {
1115 u8 typ, hdr_stat;
1116 u32 cmnd_id, hid;
1117 ulong ctx;
1118 struct snic_async_evnotify *aen = &fwreq->u.async_ev;
1119 u32 event_id = 0;
1120
1121 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
1122 SNIC_SCSI_DBG(snic->shost,
1123 "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1124 typ, hdr_stat, cmnd_id, hid, ctx);
1125
1126 event_id = le32_to_cpu(aen->ev_id);
1127
1128 switch (event_id) {
1129 case SNIC_EV_TGT_OFFLINE:
1130 SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
1131 break;
1132
1133 case SNIC_EV_TGT_ONLINE:
1134 SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
1135 break;
1136
1137 case SNIC_EV_LUN_OFFLINE:
1138 SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
1139 break;
1140
1141 case SNIC_EV_LUN_ONLINE:
1142 SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
1143 break;
1144
1145 case SNIC_EV_CONF_CHG:
1146 SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
1147 break;
1148
1149 case SNIC_EV_TGT_ADDED:
1150 SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
1151 break;
1152
1153 case SNIC_EV_TGT_DELTD:
1154 SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
1155 break;
1156
1157 case SNIC_EV_LUN_ADDED:
1158 SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
1159 break;
1160
1161 case SNIC_EV_LUN_DELTD:
1162 SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
1163 break;
1164
1165 case SNIC_EV_DISC_CMPL:
1166 SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
1167 break;
1168
1169 default:
1170 SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
1171 SNIC_BUG_ON(1);
1172 break;
1173 }
1174
1175 SNIC_ASSERT_NOT_IMPL(1);
1176 } /* end of snic_aen_handler */
1177
1178 /*
1179 * snic_io_cmpl_handler
1180 * Routine to process CQ entries(IO Completions) posted by fw.
1181 */
1182 static int
snic_io_cmpl_handler(struct vnic_dev * vdev,unsigned int cq_idx,struct snic_fw_req * fwreq)1183 snic_io_cmpl_handler(struct vnic_dev *vdev,
1184 unsigned int cq_idx,
1185 struct snic_fw_req *fwreq)
1186 {
1187 struct snic *snic = svnic_dev_priv(vdev);
1188 u64 start = jiffies, cmpl_time;
1189
1190 snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
1191
1192 /* Update FW Stats */
1193 if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
1194 (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
1195 atomic64_dec(&snic->s_stats.fw.actv_reqs);
1196
1197 SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
1198 (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
1199
1200 /* Check for snic subsys errors */
1201 switch (fwreq->hdr.status) {
1202 case SNIC_STAT_NOT_READY: /* XPT yet to initialize */
1203 SNIC_HOST_ERR(snic->shost,
1204 "sNIC SubSystem is NOT Ready.\n");
1205 break;
1206
1207 case SNIC_STAT_FATAL_ERROR: /* XPT Error */
1208 SNIC_HOST_ERR(snic->shost,
1209 "sNIC SubSystem in Unrecoverable State.\n");
1210 break;
1211 }
1212
1213 switch (fwreq->hdr.type) {
1214 case SNIC_RSP_EXCH_VER_CMPL:
1215 snic_io_exch_ver_cmpl_handler(snic, fwreq);
1216 break;
1217
1218 case SNIC_RSP_REPORT_TGTS_CMPL:
1219 snic_report_tgt_cmpl_handler(snic, fwreq);
1220 break;
1221
1222 case SNIC_RSP_ICMND_CMPL:
1223 snic_icmnd_cmpl_handler(snic, fwreq);
1224 break;
1225
1226 case SNIC_RSP_ITMF_CMPL:
1227 snic_itmf_cmpl_handler(snic, fwreq);
1228 break;
1229
1230 case SNIC_RSP_HBA_RESET_CMPL:
1231 snic_hba_reset_cmpl_handler(snic, fwreq);
1232 break;
1233
1234 case SNIC_MSG_ACK:
1235 snic_msg_ack_handler(snic, fwreq);
1236 break;
1237
1238 case SNIC_MSG_ASYNC_EVNOTIFY:
1239 snic_aen_handler(snic, fwreq);
1240 break;
1241
1242 default:
1243 SNIC_BUG_ON(1);
1244 SNIC_SCSI_DBG(snic->shost,
1245 "Unknown Firmware completion request type %d\n",
1246 fwreq->hdr.type);
1247 break;
1248 }
1249
1250 /* Update Stats */
1251 cmpl_time = jiffies - start;
1252 if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
1253 atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
1254
1255 return 0;
1256 } /* end of snic_io_cmpl_handler */
1257
1258 /*
1259 * snic_fwcq_cmpl_handler
1260 * Routine to process fwCQ
1261 * This CQ is independent, and not associated with wq/rq/wq_copy queues
1262 */
1263 int
snic_fwcq_cmpl_handler(struct snic * snic,int io_cmpl_work)1264 snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
1265 {
1266 unsigned int num_ent = 0; /* number cq entries processed */
1267 unsigned int cq_idx;
1268 unsigned int nent_per_cq;
1269 struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
1270
1271 for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
1272 nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
1273 snic_io_cmpl_handler,
1274 io_cmpl_work);
1275 num_ent += nent_per_cq;
1276
1277 if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
1278 atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
1279 }
1280
1281 return num_ent;
1282 } /* end of snic_fwcq_cmpl_handler */
1283
1284 /*
1285 * snic_queue_itmf_req: Common API to queue Task Management requests.
1286 * Use rqi->tm_tag for passing special tags.
1287 * @req_id : aborted request's tag, -1 for lun reset.
1288 */
1289 static int
snic_queue_itmf_req(struct snic * snic,struct snic_host_req * tmreq,struct scsi_cmnd * sc,u32 tmf,u32 req_id)1290 snic_queue_itmf_req(struct snic *snic,
1291 struct snic_host_req *tmreq,
1292 struct scsi_cmnd *sc,
1293 u32 tmf,
1294 u32 req_id)
1295 {
1296 struct snic_req_info *rqi = req_to_rqi(tmreq);
1297 struct scsi_lun lun;
1298 int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
1299 int ret = 0;
1300
1301 SNIC_BUG_ON(!rqi);
1302 SNIC_BUG_ON(!rqi->tm_tag);
1303
1304 /* fill in lun info */
1305 int_to_scsilun(sc->device->lun, &lun);
1306
1307 /* Initialize snic_host_req: itmf */
1308 snic_itmf_init(tmreq,
1309 tm_tag,
1310 snic->config.hid,
1311 (ulong) rqi,
1312 0 /* flags */,
1313 req_id, /* Command to be aborted. */
1314 rqi->tgt_id,
1315 lun.scsi_lun,
1316 tmf);
1317
1318 /*
1319 * In case of multiple aborts on same cmd,
1320 * use try_wait_for_completion and completion_done() to check
1321 * whether it queues aborts even after completion of abort issued
1322 * prior.SNIC_BUG_ON(completion_done(&rqi->done));
1323 */
1324
1325 ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
1326 if (ret)
1327 SNIC_HOST_ERR(snic->shost,
1328 "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
1329 tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
1330 else
1331 SNIC_SCSI_DBG(snic->shost,
1332 "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
1333 tmf, sc, rqi, req_id, snic_cmd_tag(sc));
1334
1335 return ret;
1336 } /* end of snic_queue_itmf_req */
1337
1338 static int
snic_issue_tm_req(struct snic * snic,struct snic_req_info * rqi,struct scsi_cmnd * sc,int tmf)1339 snic_issue_tm_req(struct snic *snic,
1340 struct snic_req_info *rqi,
1341 struct scsi_cmnd *sc,
1342 int tmf)
1343 {
1344 struct snic_host_req *tmreq = NULL;
1345 int req_id = 0, tag = snic_cmd_tag(sc);
1346 int ret = 0;
1347
1348 if (snic_get_state(snic) == SNIC_FWRESET)
1349 return -EBUSY;
1350
1351 atomic_inc(&snic->ios_inflight);
1352
1353 SNIC_SCSI_DBG(snic->shost,
1354 "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
1355 tmf, rqi, tag);
1356
1357
1358 if (tmf == SNIC_ITMF_LUN_RESET) {
1359 tmreq = snic_dr_req_init(snic, rqi);
1360 req_id = SCSI_NO_TAG;
1361 } else {
1362 tmreq = snic_abort_req_init(snic, rqi);
1363 req_id = tag;
1364 }
1365
1366 if (!tmreq) {
1367 ret = -ENOMEM;
1368
1369 goto tmreq_err;
1370 }
1371
1372 ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
1373
1374 tmreq_err:
1375 if (ret) {
1376 SNIC_HOST_ERR(snic->shost,
1377 "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
1378 tmf, sc, rqi, req_id, tag, ret);
1379 } else {
1380 SNIC_SCSI_DBG(snic->shost,
1381 "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
1382 tmf, sc, rqi, req_id, tag);
1383 }
1384
1385 atomic_dec(&snic->ios_inflight);
1386
1387 return ret;
1388 }
1389
1390 /*
1391 * snic_queue_abort_req : Queues abort req to WQ
1392 */
1393 static int
snic_queue_abort_req(struct snic * snic,struct snic_req_info * rqi,struct scsi_cmnd * sc,int tmf)1394 snic_queue_abort_req(struct snic *snic,
1395 struct snic_req_info *rqi,
1396 struct scsi_cmnd *sc,
1397 int tmf)
1398 {
1399 SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
1400 sc, rqi, snic_cmd_tag(sc), tmf);
1401
1402 /* Add special tag for abort */
1403 rqi->tm_tag |= SNIC_TAG_ABORT;
1404
1405 return snic_issue_tm_req(snic, rqi, sc, tmf);
1406 }
1407
1408 /*
1409 * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully.
1410 */
1411 static int
snic_abort_finish(struct snic * snic,struct scsi_cmnd * sc)1412 snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
1413 {
1414 struct snic_req_info *rqi = NULL;
1415 spinlock_t *io_lock = NULL;
1416 unsigned long flags;
1417 int ret = 0, tag = snic_cmd_tag(sc);
1418
1419 io_lock = snic_io_lock_hash(snic, sc);
1420 spin_lock_irqsave(io_lock, flags);
1421 rqi = (struct snic_req_info *) CMD_SP(sc);
1422 if (!rqi) {
1423 atomic64_inc(&snic->s_stats.io.req_null);
1424 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1425
1426 SNIC_SCSI_DBG(snic->shost,
1427 "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
1428 tag, sc, CMD_FLAGS(sc));
1429 ret = FAILED;
1430
1431 goto abort_fail;
1432 }
1433
1434 rqi->abts_done = NULL;
1435
1436 ret = FAILED;
1437
1438 /* Check the abort status. */
1439 switch (CMD_ABTS_STATUS(sc)) {
1440 case SNIC_INVALID_CODE:
1441 /* Firmware didn't complete abort req, timedout */
1442 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
1443 atomic64_inc(&snic->s_stats.abts.drv_tmo);
1444 SNIC_SCSI_DBG(snic->shost,
1445 "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
1446 sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
1447 /* do not release snic request in timedout case */
1448 rqi = NULL;
1449
1450 goto abort_fail;
1451
1452 case SNIC_STAT_IO_SUCCESS:
1453 case SNIC_STAT_IO_NOT_FOUND:
1454 ret = SUCCESS;
1455 /*
1456 * If abort path doesn't call scsi_done(),
1457 * the # IO timeouts == 2, will cause the LUN offline.
1458 * Call scsi_done to complete the IO.
1459 */
1460 sc->result = (DID_ERROR << 16);
1461 scsi_done(sc);
1462 break;
1463
1464 default:
1465 /* Firmware completed abort with error */
1466 ret = FAILED;
1467 rqi = NULL;
1468 break;
1469 }
1470
1471 CMD_SP(sc) = NULL;
1472 SNIC_HOST_INFO(snic->shost,
1473 "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
1474 tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
1475 CMD_FLAGS(sc));
1476
1477 abort_fail:
1478 spin_unlock_irqrestore(io_lock, flags);
1479 if (rqi)
1480 snic_release_req_buf(snic, rqi, sc);
1481
1482 return ret;
1483 } /* end of snic_abort_finish */
1484
1485 /*
1486 * snic_send_abort_and_wait : Issues Abort, and Waits
1487 */
1488 static int
snic_send_abort_and_wait(struct snic * snic,struct scsi_cmnd * sc)1489 snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
1490 {
1491 struct snic_req_info *rqi = NULL;
1492 enum snic_ioreq_state sv_state;
1493 struct snic_tgt *tgt = NULL;
1494 spinlock_t *io_lock = NULL;
1495 DECLARE_COMPLETION_ONSTACK(tm_done);
1496 unsigned long flags;
1497 int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
1498
1499 tgt = starget_to_tgt(scsi_target(sc->device));
1500 if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1501 tmf = SNIC_ITMF_ABTS_TASK_TERM;
1502 else
1503 tmf = SNIC_ITMF_ABTS_TASK;
1504
1505 /* stats */
1506
1507 io_lock = snic_io_lock_hash(snic, sc);
1508
1509 /*
1510 * Avoid a race between SCSI issuing the abort and the device
1511 * completing the command.
1512 *
1513 * If the command is already completed by fw_cmpl code,
1514 * we just return SUCCESS from here. This means that the abort
1515 * succeeded. In the SCSI ML, since the timeout for command has
1516 * happend, the completion wont actually complete the command
1517 * and it will be considered as an aborted command
1518 *
1519 * The CMD_SP will not be cleared except while holding io_lock
1520 */
1521 spin_lock_irqsave(io_lock, flags);
1522 rqi = (struct snic_req_info *) CMD_SP(sc);
1523 if (!rqi) {
1524 spin_unlock_irqrestore(io_lock, flags);
1525
1526 SNIC_HOST_ERR(snic->shost,
1527 "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
1528 tag, CMD_FLAGS(sc));
1529
1530 ret = SUCCESS;
1531
1532 goto send_abts_end;
1533 }
1534
1535 rqi->abts_done = &tm_done;
1536 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1537 spin_unlock_irqrestore(io_lock, flags);
1538
1539 ret = 0;
1540 goto abts_pending;
1541 }
1542 SNIC_BUG_ON(!rqi->abts_done);
1543
1544 /* Save Command State, should be restored on failed to Queue. */
1545 sv_state = CMD_STATE(sc);
1546
1547 /*
1548 * Command is still pending, need to abort it
1549 * If the fw completes the command after this point,
1550 * the completion won't be done till mid-layer, since abot
1551 * has already started.
1552 */
1553 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1554 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1555
1556 SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
1557
1558 spin_unlock_irqrestore(io_lock, flags);
1559
1560 /* Now Queue the abort command to firmware */
1561 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1562 if (ret) {
1563 atomic64_inc(&snic->s_stats.abts.q_fail);
1564 SNIC_HOST_ERR(snic->shost,
1565 "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
1566 tag, ret, CMD_FLAGS(sc));
1567
1568 spin_lock_irqsave(io_lock, flags);
1569 /* Restore Command's previous state */
1570 CMD_STATE(sc) = sv_state;
1571 rqi = (struct snic_req_info *) CMD_SP(sc);
1572 if (rqi)
1573 rqi->abts_done = NULL;
1574 spin_unlock_irqrestore(io_lock, flags);
1575 ret = FAILED;
1576
1577 goto send_abts_end;
1578 }
1579
1580 spin_lock_irqsave(io_lock, flags);
1581 if (tmf == SNIC_ITMF_ABTS_TASK) {
1582 CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
1583 atomic64_inc(&snic->s_stats.abts.num);
1584 } else {
1585 /* term stats */
1586 CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
1587 }
1588 spin_unlock_irqrestore(io_lock, flags);
1589
1590 SNIC_SCSI_DBG(snic->shost,
1591 "send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
1592 sc, tag, CMD_FLAGS(sc));
1593
1594
1595 ret = 0;
1596
1597 abts_pending:
1598 /*
1599 * Queued an abort IO, wait for its completion.
1600 * Once the fw completes the abort command, it will
1601 * wakeup this thread.
1602 */
1603 wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1604
1605 send_abts_end:
1606 return ret;
1607 } /* end of snic_send_abort_and_wait */
1608
1609 /*
1610 * This function is exported to SCSI for sending abort cmnds.
1611 * A SCSI IO is represent by snic_ioreq in the driver.
1612 * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
1613 */
1614 int
snic_abort_cmd(struct scsi_cmnd * sc)1615 snic_abort_cmd(struct scsi_cmnd *sc)
1616 {
1617 struct snic *snic = shost_priv(sc->device->host);
1618 int ret = SUCCESS, tag = snic_cmd_tag(sc);
1619 u32 start_time = jiffies;
1620
1621 SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
1622 sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
1623
1624 if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
1625 SNIC_HOST_ERR(snic->shost,
1626 "abt_cmd: tag %x Parent Devs are not rdy\n",
1627 tag);
1628 ret = FAST_IO_FAIL;
1629
1630 goto abort_end;
1631 }
1632
1633
1634 ret = snic_send_abort_and_wait(snic, sc);
1635 if (ret)
1636 goto abort_end;
1637
1638 ret = snic_abort_finish(snic, sc);
1639
1640 abort_end:
1641 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
1642 jiffies_to_msecs(jiffies - start_time), 0,
1643 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
1644
1645 SNIC_SCSI_DBG(snic->shost,
1646 "abts: Abort Req Status = %s\n",
1647 (ret == SUCCESS) ? "SUCCESS" :
1648 ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
1649
1650 return ret;
1651 }
1652
1653
1654
1655 static int
snic_is_abts_pending(struct snic * snic,struct scsi_cmnd * lr_sc)1656 snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
1657 {
1658 struct snic_req_info *rqi = NULL;
1659 struct scsi_cmnd *sc = NULL;
1660 struct scsi_device *lr_sdev = NULL;
1661 spinlock_t *io_lock = NULL;
1662 u32 tag;
1663 unsigned long flags;
1664
1665 if (lr_sc)
1666 lr_sdev = lr_sc->device;
1667
1668 /* walk through the tag map, an dcheck if IOs are still pending in fw*/
1669 for (tag = 0; tag < snic->max_tag_id; tag++) {
1670 io_lock = snic_io_lock_tag(snic, tag);
1671
1672 spin_lock_irqsave(io_lock, flags);
1673 sc = scsi_host_find_tag(snic->shost, tag);
1674
1675 if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
1676 spin_unlock_irqrestore(io_lock, flags);
1677
1678 continue;
1679 }
1680
1681 rqi = (struct snic_req_info *) CMD_SP(sc);
1682 if (!rqi) {
1683 spin_unlock_irqrestore(io_lock, flags);
1684
1685 continue;
1686 }
1687
1688 /*
1689 * Found IO that is still pending w/ firmware and belongs to
1690 * the LUN that is under reset, if lr_sc != NULL
1691 */
1692 SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
1693 snic_ioreq_state_to_str(CMD_STATE(sc)));
1694
1695 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1696 spin_unlock_irqrestore(io_lock, flags);
1697
1698 return 1;
1699 }
1700
1701 spin_unlock_irqrestore(io_lock, flags);
1702 }
1703
1704 return 0;
1705 } /* end of snic_is_abts_pending */
1706
1707 static int
snic_dr_clean_single_req(struct snic * snic,u32 tag,struct scsi_device * lr_sdev)1708 snic_dr_clean_single_req(struct snic *snic,
1709 u32 tag,
1710 struct scsi_device *lr_sdev)
1711 {
1712 struct snic_req_info *rqi = NULL;
1713 struct snic_tgt *tgt = NULL;
1714 struct scsi_cmnd *sc = NULL;
1715 spinlock_t *io_lock = NULL;
1716 u32 sv_state = 0, tmf = 0;
1717 DECLARE_COMPLETION_ONSTACK(tm_done);
1718 unsigned long flags;
1719 int ret = 0;
1720
1721 io_lock = snic_io_lock_tag(snic, tag);
1722 spin_lock_irqsave(io_lock, flags);
1723 sc = scsi_host_find_tag(snic->shost, tag);
1724
1725 /* Ignore Cmd that don't belong to Lun Reset device */
1726 if (!sc || sc->device != lr_sdev)
1727 goto skip_clean;
1728
1729 rqi = (struct snic_req_info *) CMD_SP(sc);
1730
1731 if (!rqi)
1732 goto skip_clean;
1733
1734
1735 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1736 goto skip_clean;
1737
1738
1739 if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
1740 (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
1741
1742 SNIC_SCSI_DBG(snic->shost,
1743 "clean_single_req: devrst is not pending sc 0x%p\n",
1744 sc);
1745
1746 goto skip_clean;
1747 }
1748
1749 SNIC_SCSI_DBG(snic->shost,
1750 "clean_single_req: Found IO in %s on lun\n",
1751 snic_ioreq_state_to_str(CMD_STATE(sc)));
1752
1753 /* Save Command State */
1754 sv_state = CMD_STATE(sc);
1755
1756 /*
1757 * Any pending IO issued prior to reset is expected to be
1758 * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING
1759 * to indicate the IO is abort pending.
1760 * When IO is completed, the IO will be handed over and handled
1761 * in this function.
1762 */
1763
1764 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1765 SNIC_BUG_ON(rqi->abts_done);
1766
1767 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
1768 rqi->tm_tag = SNIC_TAG_DEV_RST;
1769
1770 SNIC_SCSI_DBG(snic->shost,
1771 "clean_single_req:devrst sc 0x%p\n", sc);
1772 }
1773
1774 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1775 rqi->abts_done = &tm_done;
1776 spin_unlock_irqrestore(io_lock, flags);
1777
1778 tgt = starget_to_tgt(scsi_target(sc->device));
1779 if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1780 tmf = SNIC_ITMF_ABTS_TASK_TERM;
1781 else
1782 tmf = SNIC_ITMF_ABTS_TASK;
1783
1784 /* Now queue the abort command to firmware */
1785 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1786 if (ret) {
1787 SNIC_HOST_ERR(snic->shost,
1788 "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
1789 sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1790
1791 spin_lock_irqsave(io_lock, flags);
1792 rqi = (struct snic_req_info *) CMD_SP(sc);
1793 if (rqi)
1794 rqi->abts_done = NULL;
1795
1796 /* Restore Command State */
1797 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1798 CMD_STATE(sc) = sv_state;
1799
1800 ret = 1;
1801 goto skip_clean;
1802 }
1803
1804 spin_lock_irqsave(io_lock, flags);
1805 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
1806 CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
1807
1808 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
1809 spin_unlock_irqrestore(io_lock, flags);
1810
1811 wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1812
1813 /* Recheck cmd state to check if it now aborted. */
1814 spin_lock_irqsave(io_lock, flags);
1815 rqi = (struct snic_req_info *) CMD_SP(sc);
1816 if (!rqi) {
1817 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1818 goto skip_clean;
1819 }
1820 rqi->abts_done = NULL;
1821
1822 /* if abort is still pending w/ fw, fail */
1823 if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
1824 SNIC_HOST_ERR(snic->shost,
1825 "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
1826 sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1827
1828 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
1829 ret = 1;
1830
1831 goto skip_clean;
1832 }
1833
1834 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
1835 CMD_SP(sc) = NULL;
1836 spin_unlock_irqrestore(io_lock, flags);
1837
1838 snic_release_req_buf(snic, rqi, sc);
1839
1840 sc->result = (DID_ERROR << 16);
1841 scsi_done(sc);
1842
1843 ret = 0;
1844
1845 return ret;
1846
1847 skip_clean:
1848 spin_unlock_irqrestore(io_lock, flags);
1849
1850 return ret;
1851 } /* end of snic_dr_clean_single_req */
1852
1853 static int
snic_dr_clean_pending_req(struct snic * snic,struct scsi_cmnd * lr_sc)1854 snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
1855 {
1856 struct scsi_device *lr_sdev = lr_sc->device;
1857 u32 tag = 0;
1858 int ret = FAILED;
1859
1860 for (tag = 0; tag < snic->max_tag_id; tag++) {
1861 if (tag == snic_cmd_tag(lr_sc))
1862 continue;
1863
1864 ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
1865 if (ret) {
1866 SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
1867
1868 goto clean_err;
1869 }
1870 }
1871
1872 schedule_timeout(msecs_to_jiffies(100));
1873
1874 /* Walk through all the cmds and check abts status. */
1875 if (snic_is_abts_pending(snic, lr_sc)) {
1876 ret = FAILED;
1877
1878 goto clean_err;
1879 }
1880
1881 ret = 0;
1882 SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
1883
1884 return ret;
1885
1886 clean_err:
1887 ret = FAILED;
1888 SNIC_HOST_ERR(snic->shost,
1889 "Failed to Clean Pending IOs on %s device.\n",
1890 dev_name(&lr_sdev->sdev_gendev));
1891
1892 return ret;
1893
1894 } /* end of snic_dr_clean_pending_req */
1895
1896 /*
1897 * snic_dr_finish : Called by snic_device_reset
1898 */
1899 static int
snic_dr_finish(struct snic * snic,struct scsi_cmnd * sc)1900 snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
1901 {
1902 struct snic_req_info *rqi = NULL;
1903 spinlock_t *io_lock = NULL;
1904 unsigned long flags;
1905 int lr_res = 0;
1906 int ret = FAILED;
1907
1908 io_lock = snic_io_lock_hash(snic, sc);
1909 spin_lock_irqsave(io_lock, flags);
1910 rqi = (struct snic_req_info *) CMD_SP(sc);
1911 if (!rqi) {
1912 spin_unlock_irqrestore(io_lock, flags);
1913 SNIC_SCSI_DBG(snic->shost,
1914 "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
1915 snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
1916
1917 ret = FAILED;
1918 goto dr_fini_end;
1919 }
1920
1921 rqi->dr_done = NULL;
1922
1923 lr_res = CMD_LR_STATUS(sc);
1924
1925 switch (lr_res) {
1926 case SNIC_INVALID_CODE:
1927 /* stats */
1928 SNIC_SCSI_DBG(snic->shost,
1929 "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
1930 snic_cmd_tag(sc), CMD_FLAGS(sc));
1931
1932 CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
1933 ret = FAILED;
1934
1935 goto dr_failed;
1936
1937 case SNIC_STAT_IO_SUCCESS:
1938 SNIC_SCSI_DBG(snic->shost,
1939 "dr_fini: Tag %x Dev Reset cmpl\n",
1940 snic_cmd_tag(sc));
1941 ret = 0;
1942 break;
1943
1944 default:
1945 SNIC_HOST_ERR(snic->shost,
1946 "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
1947 snic_cmd_tag(sc),
1948 snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
1949 ret = FAILED;
1950 goto dr_failed;
1951 }
1952 spin_unlock_irqrestore(io_lock, flags);
1953
1954 /*
1955 * Cleanup any IOs on this LUN that have still not completed.
1956 * If any of these fail, then LUN Reset fails.
1957 * Cleanup cleans all commands on this LUN except
1958 * the lun reset command. If all cmds get cleaned, the LUN Reset
1959 * succeeds.
1960 */
1961
1962 ret = snic_dr_clean_pending_req(snic, sc);
1963 if (ret) {
1964 spin_lock_irqsave(io_lock, flags);
1965 SNIC_SCSI_DBG(snic->shost,
1966 "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
1967 snic_cmd_tag(sc));
1968 rqi = (struct snic_req_info *) CMD_SP(sc);
1969
1970 goto dr_failed;
1971 } else {
1972 /* Cleanup LUN Reset Command */
1973 spin_lock_irqsave(io_lock, flags);
1974 rqi = (struct snic_req_info *) CMD_SP(sc);
1975 if (rqi)
1976 ret = SUCCESS; /* Completed Successfully */
1977 else
1978 ret = FAILED;
1979 }
1980
1981 dr_failed:
1982 lockdep_assert_held(io_lock);
1983 if (rqi)
1984 CMD_SP(sc) = NULL;
1985 spin_unlock_irqrestore(io_lock, flags);
1986
1987 if (rqi)
1988 snic_release_req_buf(snic, rqi, sc);
1989
1990 dr_fini_end:
1991 return ret;
1992 } /* end of snic_dr_finish */
1993
1994 static int
snic_queue_dr_req(struct snic * snic,struct snic_req_info * rqi,struct scsi_cmnd * sc)1995 snic_queue_dr_req(struct snic *snic,
1996 struct snic_req_info *rqi,
1997 struct scsi_cmnd *sc)
1998 {
1999 /* Add special tag for device reset */
2000 rqi->tm_tag |= SNIC_TAG_DEV_RST;
2001
2002 return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
2003 }
2004
2005 static int
snic_send_dr_and_wait(struct snic * snic,struct scsi_cmnd * sc)2006 snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
2007 {
2008 struct snic_req_info *rqi = NULL;
2009 enum snic_ioreq_state sv_state;
2010 spinlock_t *io_lock = NULL;
2011 unsigned long flags;
2012 DECLARE_COMPLETION_ONSTACK(tm_done);
2013 int ret = FAILED, tag = snic_cmd_tag(sc);
2014
2015 io_lock = snic_io_lock_hash(snic, sc);
2016 spin_lock_irqsave(io_lock, flags);
2017 CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
2018 rqi = (struct snic_req_info *) CMD_SP(sc);
2019 if (!rqi) {
2020 SNIC_HOST_ERR(snic->shost,
2021 "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
2022 tag, CMD_FLAGS(sc));
2023 spin_unlock_irqrestore(io_lock, flags);
2024
2025 ret = FAILED;
2026 goto send_dr_end;
2027 }
2028
2029 /* Save Command state to restore in case Queuing failed. */
2030 sv_state = CMD_STATE(sc);
2031
2032 CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
2033 CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
2034
2035 SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
2036
2037 rqi->dr_done = &tm_done;
2038 SNIC_BUG_ON(!rqi->dr_done);
2039
2040 spin_unlock_irqrestore(io_lock, flags);
2041 /*
2042 * The Command state is changed to IOREQ_PENDING,
2043 * in this case, if the command is completed, the icmnd_cmpl will
2044 * mark the cmd as completed.
2045 * This logic still makes LUN Reset is inevitable.
2046 */
2047
2048 ret = snic_queue_dr_req(snic, rqi, sc);
2049 if (ret) {
2050 SNIC_HOST_ERR(snic->shost,
2051 "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
2052 tag, ret, CMD_FLAGS(sc));
2053
2054 spin_lock_irqsave(io_lock, flags);
2055 /* Restore State */
2056 CMD_STATE(sc) = sv_state;
2057 rqi = (struct snic_req_info *) CMD_SP(sc);
2058 if (rqi)
2059 rqi->dr_done = NULL;
2060 /* rqi is freed in caller. */
2061 spin_unlock_irqrestore(io_lock, flags);
2062 ret = FAILED;
2063
2064 goto send_dr_end;
2065 }
2066
2067 spin_lock_irqsave(io_lock, flags);
2068 CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
2069 spin_unlock_irqrestore(io_lock, flags);
2070
2071 ret = 0;
2072
2073 wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
2074
2075 send_dr_end:
2076 return ret;
2077 }
2078
2079 /*
2080 * auxillary funciton to check lun reset op is supported or not
2081 * Not supported if returns 0
2082 */
2083 static int
snic_dev_reset_supported(struct scsi_device * sdev)2084 snic_dev_reset_supported(struct scsi_device *sdev)
2085 {
2086 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
2087
2088 if (tgt->tdata.typ == SNIC_TGT_DAS)
2089 return 0;
2090
2091 return 1;
2092 }
2093
2094 static void
snic_unlink_and_release_req(struct snic * snic,struct scsi_cmnd * sc,int flag)2095 snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
2096 {
2097 struct snic_req_info *rqi = NULL;
2098 spinlock_t *io_lock = NULL;
2099 unsigned long flags;
2100 u32 start_time = jiffies;
2101
2102 io_lock = snic_io_lock_hash(snic, sc);
2103 spin_lock_irqsave(io_lock, flags);
2104 rqi = (struct snic_req_info *) CMD_SP(sc);
2105 if (rqi) {
2106 start_time = rqi->start_time;
2107 CMD_SP(sc) = NULL;
2108 }
2109
2110 CMD_FLAGS(sc) |= flag;
2111 spin_unlock_irqrestore(io_lock, flags);
2112
2113 if (rqi)
2114 snic_release_req_buf(snic, rqi, sc);
2115
2116 SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2117 jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
2118 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2119 }
2120
2121 /*
2122 * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN
2123 * fail to get aborted. It calls driver's eh_device_reset with a SCSI
2124 * command on the LUN.
2125 */
2126 int
snic_device_reset(struct scsi_cmnd * sc)2127 snic_device_reset(struct scsi_cmnd *sc)
2128 {
2129 struct Scsi_Host *shost = sc->device->host;
2130 struct snic *snic = shost_priv(shost);
2131 struct snic_req_info *rqi = NULL;
2132 int tag = snic_cmd_tag(sc);
2133 int start_time = jiffies;
2134 int ret = FAILED;
2135 int dr_supp = 0;
2136
2137 SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
2138 sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
2139 snic_cmd_tag(sc));
2140 dr_supp = snic_dev_reset_supported(sc->device);
2141 if (!dr_supp) {
2142 /* device reset op is not supported */
2143 SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
2144 snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
2145
2146 goto dev_rst_end;
2147 }
2148
2149 if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
2150 snic_unlink_and_release_req(snic, sc, 0);
2151 SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
2152
2153 goto dev_rst_end;
2154 }
2155
2156 /* There is no tag when lun reset is issue through ioctl. */
2157 if (unlikely(tag <= SNIC_NO_TAG)) {
2158 SNIC_HOST_INFO(snic->shost,
2159 "Devrst: LUN Reset Recvd thru IOCTL.\n");
2160
2161 rqi = snic_req_init(snic, 0);
2162 if (!rqi)
2163 goto dev_rst_end;
2164
2165 memset(scsi_cmd_priv(sc), 0,
2166 sizeof(struct snic_internal_io_state));
2167 CMD_SP(sc) = (char *)rqi;
2168 CMD_FLAGS(sc) = SNIC_NO_FLAGS;
2169
2170 /* Add special tag for dr coming from user spc */
2171 rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
2172 rqi->sc = sc;
2173 }
2174
2175 ret = snic_send_dr_and_wait(snic, sc);
2176 if (ret) {
2177 SNIC_HOST_ERR(snic->shost,
2178 "Devrst: IO w/ Tag %x Failed w/ err = %d\n",
2179 tag, ret);
2180
2181 snic_unlink_and_release_req(snic, sc, 0);
2182
2183 goto dev_rst_end;
2184 }
2185
2186 ret = snic_dr_finish(snic, sc);
2187
2188 dev_rst_end:
2189 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2190 jiffies_to_msecs(jiffies - start_time),
2191 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2192
2193 SNIC_SCSI_DBG(snic->shost,
2194 "Devrst: Returning from Device Reset : %s\n",
2195 (ret == SUCCESS) ? "SUCCESS" : "FAILED");
2196
2197 return ret;
2198 } /* end of snic_device_reset */
2199
2200 /*
2201 * SCSI Error handling calls driver's eh_host_reset if all prior
2202 * error handling levels return FAILED.
2203 *
2204 * Host Reset is the highest level of error recovery. If this fails, then
2205 * host is offlined by SCSI.
2206 */
2207 /*
2208 * snic_issue_hba_reset : Queues FW Reset Request.
2209 */
2210 static int
snic_issue_hba_reset(struct snic * snic,struct scsi_cmnd * sc)2211 snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
2212 {
2213 struct snic_req_info *rqi = NULL;
2214 struct snic_host_req *req = NULL;
2215 spinlock_t *io_lock = NULL;
2216 DECLARE_COMPLETION_ONSTACK(wait);
2217 unsigned long flags;
2218 int ret = -ENOMEM;
2219
2220 rqi = snic_req_init(snic, 0);
2221 if (!rqi) {
2222 ret = -ENOMEM;
2223
2224 goto hba_rst_end;
2225 }
2226
2227 if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
2228 memset(scsi_cmd_priv(sc), 0,
2229 sizeof(struct snic_internal_io_state));
2230 SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
2231 rqi->sc = sc;
2232 }
2233
2234 req = rqi_to_req(rqi);
2235
2236 io_lock = snic_io_lock_hash(snic, sc);
2237 spin_lock_irqsave(io_lock, flags);
2238 SNIC_BUG_ON(CMD_SP(sc) != NULL);
2239 CMD_STATE(sc) = SNIC_IOREQ_PENDING;
2240 CMD_SP(sc) = (char *) rqi;
2241 CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
2242 snic->remove_wait = &wait;
2243 spin_unlock_irqrestore(io_lock, flags);
2244
2245 /* Initialize Request */
2246 snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
2247 snic->config.hid, 0, (ulong) rqi);
2248
2249 req->u.reset.flags = 0;
2250
2251 ret = snic_queue_wq_desc(snic, req, sizeof(*req));
2252 if (ret) {
2253 SNIC_HOST_ERR(snic->shost,
2254 "issu_hr:Queuing HBA Reset Failed. w err %d\n",
2255 ret);
2256
2257 goto hba_rst_err;
2258 }
2259
2260 spin_lock_irqsave(io_lock, flags);
2261 CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
2262 spin_unlock_irqrestore(io_lock, flags);
2263 atomic64_inc(&snic->s_stats.reset.hba_resets);
2264 SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
2265
2266 wait_for_completion_timeout(snic->remove_wait,
2267 SNIC_HOST_RESET_TIMEOUT);
2268
2269 if (snic_get_state(snic) == SNIC_FWRESET) {
2270 SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
2271 ret = -ETIMEDOUT;
2272
2273 goto hba_rst_err;
2274 }
2275
2276 spin_lock_irqsave(io_lock, flags);
2277 snic->remove_wait = NULL;
2278 rqi = (struct snic_req_info *) CMD_SP(sc);
2279 CMD_SP(sc) = NULL;
2280 spin_unlock_irqrestore(io_lock, flags);
2281
2282 if (rqi)
2283 snic_req_free(snic, rqi);
2284
2285 ret = 0;
2286
2287 return ret;
2288
2289 hba_rst_err:
2290 spin_lock_irqsave(io_lock, flags);
2291 snic->remove_wait = NULL;
2292 rqi = (struct snic_req_info *) CMD_SP(sc);
2293 CMD_SP(sc) = NULL;
2294 spin_unlock_irqrestore(io_lock, flags);
2295
2296 if (rqi)
2297 snic_req_free(snic, rqi);
2298
2299 hba_rst_end:
2300 SNIC_HOST_ERR(snic->shost,
2301 "reset:HBA Reset Failed w/ err = %d.\n",
2302 ret);
2303
2304 return ret;
2305 } /* end of snic_issue_hba_reset */
2306
2307 int
snic_reset(struct Scsi_Host * shost,struct scsi_cmnd * sc)2308 snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
2309 {
2310 struct snic *snic = shost_priv(shost);
2311 enum snic_state sv_state;
2312 unsigned long flags;
2313 int ret = FAILED;
2314
2315 /* Set snic state as SNIC_FWRESET*/
2316 sv_state = snic_get_state(snic);
2317
2318 spin_lock_irqsave(&snic->snic_lock, flags);
2319 if (snic_get_state(snic) == SNIC_FWRESET) {
2320 spin_unlock_irqrestore(&snic->snic_lock, flags);
2321 SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
2322
2323 msleep(SNIC_HOST_RESET_TIMEOUT);
2324 ret = SUCCESS;
2325
2326 goto reset_end;
2327 }
2328
2329 snic_set_state(snic, SNIC_FWRESET);
2330 spin_unlock_irqrestore(&snic->snic_lock, flags);
2331
2332
2333 /* Wait for all the IOs that are entered in Qcmd */
2334 while (atomic_read(&snic->ios_inflight))
2335 schedule_timeout(msecs_to_jiffies(1));
2336
2337 ret = snic_issue_hba_reset(snic, sc);
2338 if (ret) {
2339 SNIC_HOST_ERR(shost,
2340 "reset:Host Reset Failed w/ err %d.\n",
2341 ret);
2342 spin_lock_irqsave(&snic->snic_lock, flags);
2343 snic_set_state(snic, sv_state);
2344 spin_unlock_irqrestore(&snic->snic_lock, flags);
2345 atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
2346 ret = FAILED;
2347
2348 goto reset_end;
2349 }
2350
2351 ret = SUCCESS;
2352
2353 reset_end:
2354 return ret;
2355 } /* end of snic_reset */
2356
2357 /*
2358 * SCSI Error handling calls driver's eh_host_reset if all prior
2359 * error handling levels return FAILED.
2360 *
2361 * Host Reset is the highest level of error recovery. If this fails, then
2362 * host is offlined by SCSI.
2363 */
2364 int
snic_host_reset(struct scsi_cmnd * sc)2365 snic_host_reset(struct scsi_cmnd *sc)
2366 {
2367 struct Scsi_Host *shost = sc->device->host;
2368 u32 start_time = jiffies;
2369 int ret;
2370
2371 SNIC_SCSI_DBG(shost,
2372 "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
2373 sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
2374 snic_cmd_tag(sc), CMD_FLAGS(sc));
2375
2376 ret = snic_reset(shost, sc);
2377
2378 SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2379 jiffies_to_msecs(jiffies - start_time),
2380 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2381
2382 return ret;
2383 } /* end of snic_host_reset */
2384
2385 /*
2386 * snic_cmpl_pending_tmreq : Caller should hold io_lock
2387 */
2388 static void
snic_cmpl_pending_tmreq(struct snic * snic,struct scsi_cmnd * sc)2389 snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
2390 {
2391 struct snic_req_info *rqi = NULL;
2392
2393 SNIC_SCSI_DBG(snic->shost,
2394 "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
2395 sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
2396
2397 /*
2398 * CASE : FW didn't post itmf completion due to PCIe Errors.
2399 * Marking the abort status as Success to call scsi completion
2400 * in snic_abort_finish()
2401 */
2402 CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
2403
2404 rqi = (struct snic_req_info *) CMD_SP(sc);
2405 if (!rqi)
2406 return;
2407
2408 if (rqi->dr_done)
2409 complete(rqi->dr_done);
2410 else if (rqi->abts_done)
2411 complete(rqi->abts_done);
2412 }
2413
2414 /*
2415 * snic_scsi_cleanup: Walks through tag map and releases the reqs
2416 */
2417 static void
snic_scsi_cleanup(struct snic * snic,int ex_tag)2418 snic_scsi_cleanup(struct snic *snic, int ex_tag)
2419 {
2420 struct snic_req_info *rqi = NULL;
2421 struct scsi_cmnd *sc = NULL;
2422 spinlock_t *io_lock = NULL;
2423 unsigned long flags;
2424 int tag;
2425 u64 st_time = 0;
2426
2427 SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
2428
2429 for (tag = 0; tag < snic->max_tag_id; tag++) {
2430 /* Skip ex_tag */
2431 if (tag == ex_tag)
2432 continue;
2433
2434 io_lock = snic_io_lock_tag(snic, tag);
2435 spin_lock_irqsave(io_lock, flags);
2436 sc = scsi_host_find_tag(snic->shost, tag);
2437 if (!sc) {
2438 spin_unlock_irqrestore(io_lock, flags);
2439
2440 continue;
2441 }
2442
2443 if (unlikely(snic_tmreq_pending(sc))) {
2444 /*
2445 * When FW Completes reset w/o sending completions
2446 * for outstanding ios.
2447 */
2448 snic_cmpl_pending_tmreq(snic, sc);
2449 spin_unlock_irqrestore(io_lock, flags);
2450
2451 continue;
2452 }
2453
2454 rqi = (struct snic_req_info *) CMD_SP(sc);
2455 if (!rqi) {
2456 spin_unlock_irqrestore(io_lock, flags);
2457
2458 goto cleanup;
2459 }
2460
2461 SNIC_SCSI_DBG(snic->shost,
2462 "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
2463 sc, rqi, tag, CMD_FLAGS(sc));
2464
2465 CMD_SP(sc) = NULL;
2466 CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
2467 spin_unlock_irqrestore(io_lock, flags);
2468 st_time = rqi->start_time;
2469
2470 SNIC_HOST_INFO(snic->shost,
2471 "sc_clean: Releasing rqi %p : flags 0x%llx\n",
2472 rqi, CMD_FLAGS(sc));
2473
2474 snic_release_req_buf(snic, rqi, sc);
2475
2476 cleanup:
2477 sc->result = DID_TRANSPORT_DISRUPTED << 16;
2478 SNIC_HOST_INFO(snic->shost,
2479 "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
2480 sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
2481 jiffies_to_msecs(jiffies - st_time));
2482
2483 /* Update IO stats */
2484 snic_stats_update_io_cmpl(&snic->s_stats);
2485
2486 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2487 jiffies_to_msecs(jiffies - st_time), 0,
2488 SNIC_TRC_CMD(sc),
2489 SNIC_TRC_CMD_STATE_FLAGS(sc));
2490
2491 scsi_done(sc);
2492 }
2493 } /* end of snic_scsi_cleanup */
2494
2495 void
snic_shutdown_scsi_cleanup(struct snic * snic)2496 snic_shutdown_scsi_cleanup(struct snic *snic)
2497 {
2498 SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
2499
2500 snic_scsi_cleanup(snic, SCSI_NO_TAG);
2501 } /* end of snic_shutdown_scsi_cleanup */
2502
2503 /*
2504 * snic_internal_abort_io
2505 * called by : snic_tgt_scsi_abort_io
2506 */
2507 static int
snic_internal_abort_io(struct snic * snic,struct scsi_cmnd * sc,int tmf)2508 snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
2509 {
2510 struct snic_req_info *rqi = NULL;
2511 spinlock_t *io_lock = NULL;
2512 unsigned long flags;
2513 u32 sv_state = 0;
2514 int ret = 0;
2515
2516 io_lock = snic_io_lock_hash(snic, sc);
2517 spin_lock_irqsave(io_lock, flags);
2518 rqi = (struct snic_req_info *) CMD_SP(sc);
2519 if (!rqi)
2520 goto skip_internal_abts;
2521
2522 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2523 goto skip_internal_abts;
2524
2525 if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
2526 (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
2527
2528 SNIC_SCSI_DBG(snic->shost,
2529 "internal_abts: dev rst not pending sc 0x%p\n",
2530 sc);
2531
2532 goto skip_internal_abts;
2533 }
2534
2535
2536 if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
2537 SNIC_SCSI_DBG(snic->shost,
2538 "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
2539 sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
2540
2541 goto skip_internal_abts;
2542 }
2543
2544 sv_state = CMD_STATE(sc);
2545 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
2546 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
2547 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
2548
2549 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
2550 /* stats */
2551 rqi->tm_tag = SNIC_TAG_DEV_RST;
2552 SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
2553 }
2554
2555 SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
2556 snic_cmd_tag(sc));
2557 SNIC_BUG_ON(rqi->abts_done);
2558 spin_unlock_irqrestore(io_lock, flags);
2559
2560 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
2561 if (ret) {
2562 SNIC_HOST_ERR(snic->shost,
2563 "internal_abts: Tag = %x , Failed w/ err = %d\n",
2564 snic_cmd_tag(sc), ret);
2565
2566 spin_lock_irqsave(io_lock, flags);
2567
2568 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2569 CMD_STATE(sc) = sv_state;
2570
2571 goto skip_internal_abts;
2572 }
2573
2574 spin_lock_irqsave(io_lock, flags);
2575 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
2576 CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
2577 else
2578 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
2579
2580 ret = SUCCESS;
2581
2582 skip_internal_abts:
2583 lockdep_assert_held(io_lock);
2584 spin_unlock_irqrestore(io_lock, flags);
2585
2586 return ret;
2587 } /* end of snic_internal_abort_io */
2588
2589 /*
2590 * snic_tgt_scsi_abort_io : called by snic_tgt_del
2591 */
2592 int
snic_tgt_scsi_abort_io(struct snic_tgt * tgt)2593 snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
2594 {
2595 struct snic *snic = NULL;
2596 struct scsi_cmnd *sc = NULL;
2597 struct snic_tgt *sc_tgt = NULL;
2598 spinlock_t *io_lock = NULL;
2599 unsigned long flags;
2600 int ret = 0, tag, abt_cnt = 0, tmf = 0;
2601
2602 if (!tgt)
2603 return -1;
2604
2605 snic = shost_priv(snic_tgt_to_shost(tgt));
2606 SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
2607
2608 if (tgt->tdata.typ == SNIC_TGT_DAS)
2609 tmf = SNIC_ITMF_ABTS_TASK;
2610 else
2611 tmf = SNIC_ITMF_ABTS_TASK_TERM;
2612
2613 for (tag = 0; tag < snic->max_tag_id; tag++) {
2614 io_lock = snic_io_lock_tag(snic, tag);
2615
2616 spin_lock_irqsave(io_lock, flags);
2617 sc = scsi_host_find_tag(snic->shost, tag);
2618 if (!sc) {
2619 spin_unlock_irqrestore(io_lock, flags);
2620
2621 continue;
2622 }
2623
2624 sc_tgt = starget_to_tgt(scsi_target(sc->device));
2625 if (sc_tgt != tgt) {
2626 spin_unlock_irqrestore(io_lock, flags);
2627
2628 continue;
2629 }
2630 spin_unlock_irqrestore(io_lock, flags);
2631
2632 ret = snic_internal_abort_io(snic, sc, tmf);
2633 if (ret < 0) {
2634 SNIC_HOST_ERR(snic->shost,
2635 "tgt_abt_io: Tag %x, Failed w err = %d\n",
2636 tag, ret);
2637
2638 continue;
2639 }
2640
2641 if (ret == SUCCESS)
2642 abt_cnt++;
2643 }
2644
2645 SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
2646
2647 return 0;
2648 } /* end of snic_tgt_scsi_abort_io */
2649