1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 * based on qla2x00t.c code:
6 *
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 * Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30
31 #include "qla_def.h"
32 #include "qla_target.h"
33
34 static int ql2xtgt_tape_enable;
35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(ql2xtgt_tape_enable,
37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
38
39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
40 module_param(qlini_mode, charp, S_IRUGO);
41 MODULE_PARM_DESC(qlini_mode,
42 "Determines when initiator mode will be enabled. Possible values: "
43 "\"exclusive\" - initiator mode will be enabled on load, "
44 "disabled on enabling target mode and then on disabling target mode "
45 "enabled back; "
46 "\"disabled\" - initiator mode will never be enabled; "
47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
48 "when ready "
49 "\"enabled\" (default) - initiator mode will always stay enabled.");
50
51 int ql2xuctrlirq = 1;
52 module_param(ql2xuctrlirq, int, 0644);
53 MODULE_PARM_DESC(ql2xuctrlirq,
54 "User to control IRQ placement via smp_affinity."
55 "Valid with qlini_mode=disabled."
56 "1(default): enable");
57
58 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
59
60 static int qla_sam_status = SAM_STAT_BUSY;
61 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
62
63 /*
64 * From scsi/fc/fc_fcp.h
65 */
66 enum fcp_resp_rsp_codes {
67 FCP_TMF_CMPL = 0,
68 FCP_DATA_LEN_INVALID = 1,
69 FCP_CMND_FIELDS_INVALID = 2,
70 FCP_DATA_PARAM_MISMATCH = 3,
71 FCP_TMF_REJECTED = 4,
72 FCP_TMF_FAILED = 5,
73 FCP_TMF_INVALID_LUN = 9,
74 };
75
76 /*
77 * fc_pri_ta from scsi/fc/fc_fcp.h
78 */
79 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
80 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
81 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
82 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
83 #define FCP_PTA_MASK 7 /* mask for task attribute field */
84 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
85 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
86
87 /*
88 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
89 * must be called under HW lock and could unlock/lock it inside.
90 * It isn't an issue, since in the current implementation on the time when
91 * those functions are called:
92 *
93 * - Either context is IRQ and only IRQ handler can modify HW data,
94 * including rings related fields,
95 *
96 * - Or access to target mode variables from struct qla_tgt doesn't
97 * cross those functions boundaries, except tgt_stop, which
98 * additionally protected by irq_cmd_count.
99 */
100 /* Predefs for callbacks handed to qla2xxx LLD */
101 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
102 struct atio_from_isp *pkt, uint8_t);
103 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
104 response_t *pkt);
105 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
110 struct atio_from_isp *atio, uint16_t status, int qfull);
111 static void qlt_disable_vha(struct scsi_qla_host *vha);
112 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
113 static void qlt_send_notify_ack(struct qla_qpair *qpair,
114 struct imm_ntfy_from_isp *ntfy,
115 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
116 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *imm, int ha_locked);
119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
120 fc_port_t *fcport, bool local);
121 void qlt_unreg_sess(struct fc_port *sess);
122 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
123 struct abts_recv_from_24xx *);
124 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
125 uint16_t);
126 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
127 static inline uint32_t qlt_make_handle(struct qla_qpair *);
128
129 /*
130 * Global Variables
131 */
132 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
133 struct kmem_cache *qla_tgt_plogi_cachep;
134 static mempool_t *qla_tgt_mgmt_cmd_mempool;
135 static struct workqueue_struct *qla_tgt_wq;
136 static DEFINE_MUTEX(qla_tgt_mutex);
137 static LIST_HEAD(qla_tgt_glist);
138
prot_op_str(u32 prot_op)139 static const char *prot_op_str(u32 prot_op)
140 {
141 switch (prot_op) {
142 case TARGET_PROT_NORMAL: return "NORMAL";
143 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
144 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
145 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
146 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
147 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
148 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
149 default: return "UNKNOWN";
150 }
151 }
152
153 /* This API intentionally takes dest as a parameter, rather than returning
154 * int value to avoid caller forgetting to issue wmb() after the store */
qlt_do_generation_tick(struct scsi_qla_host * vha,int * dest)155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
156 {
157 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
158 *dest = atomic_inc_return(&base_vha->generation_tick);
159 /* memory barrier */
160 wmb();
161 }
162
163 /* Might release hw lock, then reaquire!! */
qlt_issue_marker(struct scsi_qla_host * vha,int vha_locked)164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
165 {
166 /* Send marker if required */
167 if (unlikely(vha->marker_needed != 0)) {
168 int rc = qla2x00_issue_marker(vha, vha_locked);
169
170 if (rc != QLA_SUCCESS) {
171 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
172 "qla_target(%d): issue_marker() failed\n",
173 vha->vp_idx);
174 }
175 return rc;
176 }
177 return QLA_SUCCESS;
178 }
179
qla_find_host_by_d_id(struct scsi_qla_host * vha,be_id_t d_id)180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
181 be_id_t d_id)
182 {
183 struct scsi_qla_host *host;
184 uint32_t key;
185
186 if (vha->d_id.b.area == d_id.area &&
187 vha->d_id.b.domain == d_id.domain &&
188 vha->d_id.b.al_pa == d_id.al_pa)
189 return vha;
190
191 key = be_to_port_id(d_id).b24;
192
193 host = btree_lookup32(&vha->hw->host_map, key);
194 if (!host)
195 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
196 "Unable to find host %06x\n", key);
197
198 return host;
199 }
200
201 static inline
qlt_find_host_by_vp_idx(struct scsi_qla_host * vha,uint16_t vp_idx)202 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
203 uint16_t vp_idx)
204 {
205 struct qla_hw_data *ha = vha->hw;
206
207 if (vha->vp_idx == vp_idx)
208 return vha;
209
210 BUG_ON(ha->tgt.tgt_vp_map == NULL);
211 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
212 return ha->tgt.tgt_vp_map[vp_idx].vha;
213
214 return NULL;
215 }
216
qlt_incr_num_pend_cmds(struct scsi_qla_host * vha)217 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
218 {
219 unsigned long flags;
220
221 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
222
223 vha->hw->tgt.num_pend_cmds++;
224 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
225 vha->qla_stats.stat_max_pend_cmds =
226 vha->hw->tgt.num_pend_cmds;
227 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
228 }
qlt_decr_num_pend_cmds(struct scsi_qla_host * vha)229 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
230 {
231 unsigned long flags;
232
233 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
234 vha->hw->tgt.num_pend_cmds--;
235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
236 }
237
238
qlt_queue_unknown_atio(scsi_qla_host_t * vha,struct atio_from_isp * atio,uint8_t ha_locked)239 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
240 struct atio_from_isp *atio, uint8_t ha_locked)
241 {
242 struct qla_tgt_sess_op *u;
243 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
244 unsigned long flags;
245
246 if (tgt->tgt_stop) {
247 ql_dbg(ql_dbg_async, vha, 0x502c,
248 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
249 vha->vp_idx);
250 goto out_term;
251 }
252
253 u = kzalloc(sizeof(*u), GFP_ATOMIC);
254 if (u == NULL)
255 goto out_term;
256
257 u->vha = vha;
258 memcpy(&u->atio, atio, sizeof(*atio));
259 INIT_LIST_HEAD(&u->cmd_list);
260
261 spin_lock_irqsave(&vha->cmd_list_lock, flags);
262 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
263 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
264
265 schedule_delayed_work(&vha->unknown_atio_work, 1);
266
267 out:
268 return;
269
270 out_term:
271 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
272 goto out;
273 }
274
qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host * vha,uint8_t ha_locked)275 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
276 uint8_t ha_locked)
277 {
278 struct qla_tgt_sess_op *u, *t;
279 scsi_qla_host_t *host;
280 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
281 unsigned long flags;
282 uint8_t queued = 0;
283
284 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
285 if (u->aborted) {
286 ql_dbg(ql_dbg_async, vha, 0x502e,
287 "Freeing unknown %s %p, because of Abort\n",
288 "ATIO_TYPE7", u);
289 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
290 &u->atio, ha_locked, 0);
291 goto abort;
292 }
293
294 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
295 if (host != NULL) {
296 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
297 "Requeuing unknown ATIO_TYPE7 %p\n", u);
298 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
299 } else if (tgt->tgt_stop) {
300 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
301 "Freeing unknown %s %p, because tgt is being stopped\n",
302 "ATIO_TYPE7", u);
303 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
304 &u->atio, ha_locked, 0);
305 } else {
306 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
307 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
308 if (!queued) {
309 queued = 1;
310 schedule_delayed_work(&vha->unknown_atio_work,
311 1);
312 }
313 continue;
314 }
315
316 abort:
317 spin_lock_irqsave(&vha->cmd_list_lock, flags);
318 list_del(&u->cmd_list);
319 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
320 kfree(u);
321 }
322 }
323
qlt_unknown_atio_work_fn(struct work_struct * work)324 void qlt_unknown_atio_work_fn(struct work_struct *work)
325 {
326 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
327 struct scsi_qla_host, unknown_atio_work);
328
329 qlt_try_to_dequeue_unknown_atios(vha, 0);
330 }
331
qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)332 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
333 struct atio_from_isp *atio, uint8_t ha_locked)
334 {
335 ql_dbg(ql_dbg_tgt, vha, 0xe072,
336 "%s: qla_target(%d): type %x ox_id %04x\n",
337 __func__, vha->vp_idx, atio->u.raw.entry_type,
338 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
339
340 switch (atio->u.raw.entry_type) {
341 case ATIO_TYPE7:
342 {
343 struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
344 atio->u.isp24.fcp_hdr.d_id);
345 if (unlikely(NULL == host)) {
346 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
347 "qla_target(%d): Received ATIO_TYPE7 "
348 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
349 atio->u.isp24.fcp_hdr.d_id.domain,
350 atio->u.isp24.fcp_hdr.d_id.area,
351 atio->u.isp24.fcp_hdr.d_id.al_pa);
352
353
354 qlt_queue_unknown_atio(vha, atio, ha_locked);
355 break;
356 }
357 if (unlikely(!list_empty(&vha->unknown_atio_list)))
358 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
359
360 qlt_24xx_atio_pkt(host, atio, ha_locked);
361 break;
362 }
363
364 case IMMED_NOTIFY_TYPE:
365 {
366 struct scsi_qla_host *host = vha;
367 struct imm_ntfy_from_isp *entry =
368 (struct imm_ntfy_from_isp *)atio;
369
370 qlt_issue_marker(vha, ha_locked);
371
372 if ((entry->u.isp24.vp_index != 0xFF) &&
373 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
374 host = qlt_find_host_by_vp_idx(vha,
375 entry->u.isp24.vp_index);
376 if (unlikely(!host)) {
377 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
378 "qla_target(%d): Received "
379 "ATIO (IMMED_NOTIFY_TYPE) "
380 "with unknown vp_index %d\n",
381 vha->vp_idx, entry->u.isp24.vp_index);
382 break;
383 }
384 }
385 qlt_24xx_atio_pkt(host, atio, ha_locked);
386 break;
387 }
388
389 case VP_RPT_ID_IOCB_TYPE:
390 qla24xx_report_id_acquisition(vha,
391 (struct vp_rpt_id_entry_24xx *)atio);
392 break;
393
394 case ABTS_RECV_24XX:
395 {
396 struct abts_recv_from_24xx *entry =
397 (struct abts_recv_from_24xx *)atio;
398 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
399 entry->vp_index);
400 unsigned long flags;
401
402 if (unlikely(!host)) {
403 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
404 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
405 "received, with unknown vp_index %d\n",
406 vha->vp_idx, entry->vp_index);
407 break;
408 }
409 if (!ha_locked)
410 spin_lock_irqsave(&host->hw->hardware_lock, flags);
411 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
412 if (!ha_locked)
413 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
414 break;
415 }
416
417 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
418
419 default:
420 ql_dbg(ql_dbg_tgt, vha, 0xe040,
421 "qla_target(%d): Received unknown ATIO atio "
422 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
423 break;
424 }
425
426 return false;
427 }
428
qlt_response_pkt_all_vps(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)429 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
430 struct rsp_que *rsp, response_t *pkt)
431 {
432 switch (pkt->entry_type) {
433 case CTIO_CRC2:
434 ql_dbg(ql_dbg_tgt, vha, 0xe073,
435 "qla_target(%d):%s: CRC2 Response pkt\n",
436 vha->vp_idx, __func__);
437 fallthrough;
438 case CTIO_TYPE7:
439 {
440 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
441 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
442 entry->vp_index);
443 if (unlikely(!host)) {
444 ql_dbg(ql_dbg_tgt, vha, 0xe041,
445 "qla_target(%d): Response pkt (CTIO_TYPE7) "
446 "received, with unknown vp_index %d\n",
447 vha->vp_idx, entry->vp_index);
448 break;
449 }
450 qlt_response_pkt(host, rsp, pkt);
451 break;
452 }
453
454 case IMMED_NOTIFY_TYPE:
455 {
456 struct scsi_qla_host *host;
457 struct imm_ntfy_from_isp *entry =
458 (struct imm_ntfy_from_isp *)pkt;
459
460 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
461 if (unlikely(!host)) {
462 ql_dbg(ql_dbg_tgt, vha, 0xe042,
463 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
464 "received, with unknown vp_index %d\n",
465 vha->vp_idx, entry->u.isp24.vp_index);
466 break;
467 }
468 qlt_response_pkt(host, rsp, pkt);
469 break;
470 }
471
472 case NOTIFY_ACK_TYPE:
473 {
474 struct scsi_qla_host *host = vha;
475 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
476
477 if (0xFF != entry->u.isp24.vp_index) {
478 host = qlt_find_host_by_vp_idx(vha,
479 entry->u.isp24.vp_index);
480 if (unlikely(!host)) {
481 ql_dbg(ql_dbg_tgt, vha, 0xe043,
482 "qla_target(%d): Response "
483 "pkt (NOTIFY_ACK_TYPE) "
484 "received, with unknown "
485 "vp_index %d\n", vha->vp_idx,
486 entry->u.isp24.vp_index);
487 break;
488 }
489 }
490 qlt_response_pkt(host, rsp, pkt);
491 break;
492 }
493
494 case ABTS_RECV_24XX:
495 {
496 struct abts_recv_from_24xx *entry =
497 (struct abts_recv_from_24xx *)pkt;
498 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
499 entry->vp_index);
500 if (unlikely(!host)) {
501 ql_dbg(ql_dbg_tgt, vha, 0xe044,
502 "qla_target(%d): Response pkt "
503 "(ABTS_RECV_24XX) received, with unknown "
504 "vp_index %d\n", vha->vp_idx, entry->vp_index);
505 break;
506 }
507 qlt_response_pkt(host, rsp, pkt);
508 break;
509 }
510
511 case ABTS_RESP_24XX:
512 {
513 struct abts_resp_to_24xx *entry =
514 (struct abts_resp_to_24xx *)pkt;
515 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
516 entry->vp_index);
517 if (unlikely(!host)) {
518 ql_dbg(ql_dbg_tgt, vha, 0xe045,
519 "qla_target(%d): Response pkt "
520 "(ABTS_RECV_24XX) received, with unknown "
521 "vp_index %d\n", vha->vp_idx, entry->vp_index);
522 break;
523 }
524 qlt_response_pkt(host, rsp, pkt);
525 break;
526 }
527 default:
528 qlt_response_pkt(vha, rsp, pkt);
529 break;
530 }
531
532 }
533
534 /*
535 * All qlt_plogi_ack_t operations are protected by hardware_lock
536 */
qla24xx_post_nack_work(struct scsi_qla_host * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)537 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
538 struct imm_ntfy_from_isp *ntfy, int type)
539 {
540 struct qla_work_evt *e;
541
542 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
543 if (!e)
544 return QLA_FUNCTION_FAILED;
545
546 e->u.nack.fcport = fcport;
547 e->u.nack.type = type;
548 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
549 return qla2x00_post_work(vha, e);
550 }
551
qla2x00_async_nack_sp_done(srb_t * sp,int res)552 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
553 {
554 struct scsi_qla_host *vha = sp->vha;
555 unsigned long flags;
556
557 ql_dbg(ql_dbg_disc, vha, 0x20f2,
558 "Async done-%s res %x %8phC type %d\n",
559 sp->name, res, sp->fcport->port_name, sp->type);
560
561 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
562 sp->fcport->flags &= ~FCF_ASYNC_SENT;
563 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
564
565 switch (sp->type) {
566 case SRB_NACK_PLOGI:
567 sp->fcport->login_gen++;
568 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
569 sp->fcport->logout_on_delete = 1;
570 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
571 sp->fcport->send_els_logo = 0;
572
573 if (sp->fcport->flags & FCF_FCSP_DEVICE) {
574 ql_dbg(ql_dbg_edif, vha, 0x20ef,
575 "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
576 sp->fcport->port_name);
577 qla2x00_set_fcport_disc_state(sp->fcport,
578 DSC_LOGIN_AUTH_PEND);
579 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
580 sp->fcport->d_id.b24);
581 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
582 0, sp->fcport);
583 }
584 break;
585
586 case SRB_NACK_PRLI:
587 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
588 sp->fcport->deleted = 0;
589 sp->fcport->send_els_logo = 0;
590
591 if (!sp->fcport->login_succ &&
592 !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
593 sp->fcport->login_succ = 1;
594
595 vha->fcport_count++;
596 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
597 qla24xx_sched_upd_fcport(sp->fcport);
598 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
599 } else {
600 sp->fcport->login_retry = 0;
601 qla2x00_set_fcport_disc_state(sp->fcport,
602 DSC_LOGIN_COMPLETE);
603 sp->fcport->deleted = 0;
604 sp->fcport->logout_on_delete = 1;
605 }
606 break;
607
608 case SRB_NACK_LOGO:
609 sp->fcport->login_gen++;
610 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
611 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
612 break;
613 }
614 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
615
616 kref_put(&sp->cmd_kref, qla2x00_sp_release);
617 }
618
qla24xx_async_notify_ack(scsi_qla_host_t * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)619 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
620 struct imm_ntfy_from_isp *ntfy, int type)
621 {
622 int rval = QLA_FUNCTION_FAILED;
623 srb_t *sp;
624 char *c = NULL;
625
626 fcport->flags |= FCF_ASYNC_SENT;
627 switch (type) {
628 case SRB_NACK_PLOGI:
629 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
630 c = "PLOGI";
631 if (vha->hw->flags.edif_enabled &&
632 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
633 fcport->flags |= FCF_FCSP_DEVICE;
634 break;
635 case SRB_NACK_PRLI:
636 fcport->fw_login_state = DSC_LS_PRLI_PEND;
637 fcport->deleted = 0;
638 c = "PRLI";
639 break;
640 case SRB_NACK_LOGO:
641 fcport->fw_login_state = DSC_LS_LOGO_PEND;
642 c = "LOGO";
643 break;
644 }
645
646 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
647 if (!sp)
648 goto done;
649
650 sp->type = type;
651 sp->name = "nack";
652 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
653 qla2x00_async_nack_sp_done);
654
655 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
656
657 ql_dbg(ql_dbg_disc, vha, 0x20f4,
658 "Async-%s %8phC hndl %x %s\n",
659 sp->name, fcport->port_name, sp->handle, c);
660
661 rval = qla2x00_start_sp(sp);
662 if (rval != QLA_SUCCESS)
663 goto done_free_sp;
664
665 return rval;
666
667 done_free_sp:
668 kref_put(&sp->cmd_kref, qla2x00_sp_release);
669 done:
670 fcport->flags &= ~FCF_ASYNC_SENT;
671 return rval;
672 }
673
qla24xx_do_nack_work(struct scsi_qla_host * vha,struct qla_work_evt * e)674 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
675 {
676 fc_port_t *t;
677
678 switch (e->u.nack.type) {
679 case SRB_NACK_PRLI:
680 t = e->u.nack.fcport;
681 flush_work(&t->del_work);
682 flush_work(&t->free_work);
683 mutex_lock(&vha->vha_tgt.tgt_mutex);
684 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
685 mutex_unlock(&vha->vha_tgt.tgt_mutex);
686 if (t) {
687 ql_log(ql_log_info, vha, 0xd034,
688 "%s create sess success %p", __func__, t);
689 /* create sess has an extra kref */
690 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
691 }
692 break;
693 }
694 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
695 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
696 }
697
qla24xx_delete_sess_fn(struct work_struct * work)698 void qla24xx_delete_sess_fn(struct work_struct *work)
699 {
700 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
701 struct qla_hw_data *ha = NULL;
702
703 if (!fcport || !fcport->vha || !fcport->vha->hw)
704 return;
705
706 ha = fcport->vha->hw;
707
708 if (fcport->se_sess) {
709 ha->tgt.tgt_ops->shutdown_sess(fcport);
710 ha->tgt.tgt_ops->put_sess(fcport);
711 } else {
712 qlt_unreg_sess(fcport);
713 }
714 }
715
716 /*
717 * Called from qla2x00_reg_remote_port()
718 */
qlt_fc_port_added(struct scsi_qla_host * vha,fc_port_t * fcport)719 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
720 {
721 struct qla_hw_data *ha = vha->hw;
722 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
723 struct fc_port *sess = fcport;
724 unsigned long flags;
725
726 if (!vha->hw->tgt.tgt_ops)
727 return;
728
729 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
730 if (tgt->tgt_stop) {
731 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
732 return;
733 }
734
735 if (fcport->disc_state == DSC_DELETE_PEND) {
736 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
737 return;
738 }
739
740 if (!sess->se_sess) {
741 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
742
743 mutex_lock(&vha->vha_tgt.tgt_mutex);
744 sess = qlt_create_sess(vha, fcport, false);
745 mutex_unlock(&vha->vha_tgt.tgt_mutex);
746
747 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
748 } else {
749 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
750 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
751 return;
752 }
753
754 if (!kref_get_unless_zero(&sess->sess_kref)) {
755 ql_dbg(ql_dbg_disc, vha, 0x2107,
756 "%s: kref_get fail sess %8phC \n",
757 __func__, sess->port_name);
758 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
759 return;
760 }
761
762 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
763 "qla_target(%u): %ssession for port %8phC "
764 "(loop ID %d) reappeared\n", vha->vp_idx,
765 sess->local ? "local " : "", sess->port_name, sess->loop_id);
766
767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
768 "Reappeared sess %p\n", sess);
769
770 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
771 fcport->loop_id,
772 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
773 }
774
775 if (sess && sess->local) {
776 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
777 "qla_target(%u): local session for "
778 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
779 fcport->port_name, sess->loop_id);
780 sess->local = 0;
781 }
782 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
783
784 ha->tgt.tgt_ops->put_sess(sess);
785 }
786
787 /*
788 * This is a zero-base ref-counting solution, since hardware_lock
789 * guarantees that ref_count is not modified concurrently.
790 * Upon successful return content of iocb is undefined
791 */
792 static struct qlt_plogi_ack_t *
qlt_plogi_ack_find_add(struct scsi_qla_host * vha,port_id_t * id,struct imm_ntfy_from_isp * iocb)793 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
794 struct imm_ntfy_from_isp *iocb)
795 {
796 struct qlt_plogi_ack_t *pla;
797
798 lockdep_assert_held(&vha->hw->hardware_lock);
799
800 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
801 if (pla->id.b24 == id->b24) {
802 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
803 "%s %d %8phC Term INOT due to new INOT",
804 __func__, __LINE__,
805 pla->iocb.u.isp24.port_name);
806 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
807 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
808 return pla;
809 }
810 }
811
812 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
813 if (!pla) {
814 ql_dbg(ql_dbg_async, vha, 0x5088,
815 "qla_target(%d): Allocation of plogi_ack failed\n",
816 vha->vp_idx);
817 return NULL;
818 }
819
820 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
821 pla->id = *id;
822 list_add_tail(&pla->list, &vha->plogi_ack_list);
823
824 return pla;
825 }
826
qlt_plogi_ack_unref(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla)827 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
828 struct qlt_plogi_ack_t *pla)
829 {
830 struct imm_ntfy_from_isp *iocb = &pla->iocb;
831 port_id_t port_id;
832 uint16_t loop_id;
833 fc_port_t *fcport = pla->fcport;
834
835 BUG_ON(!pla->ref_count);
836 pla->ref_count--;
837
838 if (pla->ref_count)
839 return;
840
841 ql_dbg(ql_dbg_disc, vha, 0x5089,
842 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
843 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
844 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
845 iocb->u.isp24.port_id[0],
846 le16_to_cpu(iocb->u.isp24.nport_handle),
847 iocb->u.isp24.exchange_address, iocb->ox_id);
848
849 port_id.b.domain = iocb->u.isp24.port_id[2];
850 port_id.b.area = iocb->u.isp24.port_id[1];
851 port_id.b.al_pa = iocb->u.isp24.port_id[0];
852 port_id.b.rsvd_1 = 0;
853
854 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
855
856 fcport->loop_id = loop_id;
857 fcport->d_id = port_id;
858 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
859 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
860 else
861 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
862
863 list_for_each_entry(fcport, &vha->vp_fcports, list) {
864 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
865 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
866 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
867 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
868 }
869
870 list_del(&pla->list);
871 kmem_cache_free(qla_tgt_plogi_cachep, pla);
872 }
873
874 void
qlt_plogi_ack_link(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla,struct fc_port * sess,enum qlt_plogi_link_t link)875 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
876 struct fc_port *sess, enum qlt_plogi_link_t link)
877 {
878 struct imm_ntfy_from_isp *iocb = &pla->iocb;
879 /* Inc ref_count first because link might already be pointing at pla */
880 pla->ref_count++;
881
882 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
883 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
884 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
885 sess, link, sess->port_name,
886 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
887 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
888 pla->ref_count, pla, link);
889
890 if (link == QLT_PLOGI_LINK_CONFLICT) {
891 switch (sess->disc_state) {
892 case DSC_DELETED:
893 case DSC_DELETE_PEND:
894 pla->ref_count--;
895 return;
896 default:
897 break;
898 }
899 }
900
901 if (sess->plogi_link[link])
902 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
903
904 if (link == QLT_PLOGI_LINK_SAME_WWN)
905 pla->fcport = sess;
906
907 sess->plogi_link[link] = pla;
908 }
909
910 typedef struct {
911 /* These fields must be initialized by the caller */
912 port_id_t id;
913 /*
914 * number of cmds dropped while we were waiting for
915 * initiator to ack LOGO initialize to 1 if LOGO is
916 * triggered by a command, otherwise, to 0
917 */
918 int cmd_count;
919
920 /* These fields are used by callee */
921 struct list_head list;
922 } qlt_port_logo_t;
923
924 static void
qlt_send_first_logo(struct scsi_qla_host * vha,qlt_port_logo_t * logo)925 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
926 {
927 qlt_port_logo_t *tmp;
928 int res;
929
930 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
931 res = 0;
932 goto out;
933 }
934
935 mutex_lock(&vha->vha_tgt.tgt_mutex);
936
937 list_for_each_entry(tmp, &vha->logo_list, list) {
938 if (tmp->id.b24 == logo->id.b24) {
939 tmp->cmd_count += logo->cmd_count;
940 mutex_unlock(&vha->vha_tgt.tgt_mutex);
941 return;
942 }
943 }
944
945 list_add_tail(&logo->list, &vha->logo_list);
946
947 mutex_unlock(&vha->vha_tgt.tgt_mutex);
948
949 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
950
951 mutex_lock(&vha->vha_tgt.tgt_mutex);
952 list_del(&logo->list);
953 mutex_unlock(&vha->vha_tgt.tgt_mutex);
954
955 out:
956 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
957 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
958 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
959 logo->cmd_count, res);
960 }
961
qlt_free_session_done(struct work_struct * work)962 void qlt_free_session_done(struct work_struct *work)
963 {
964 struct fc_port *sess = container_of(work, struct fc_port,
965 free_work);
966 struct qla_tgt *tgt = sess->tgt;
967 struct scsi_qla_host *vha = sess->vha;
968 struct qla_hw_data *ha = vha->hw;
969 unsigned long flags;
970 bool logout_started = false;
971 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
972 struct qlt_plogi_ack_t *own =
973 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
974
975 ql_dbg(ql_dbg_disc, vha, 0xf084,
976 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
977 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
978 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
979 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
980 sess->logout_on_delete, sess->keep_nport_handle,
981 sess->send_els_logo);
982
983 if (!IS_SW_RESV_ADDR(sess->d_id)) {
984 qla2x00_mark_device_lost(vha, sess, 0);
985
986 if (sess->send_els_logo) {
987 qlt_port_logo_t logo;
988
989 logo.id = sess->d_id;
990 logo.cmd_count = 0;
991 INIT_LIST_HEAD(&logo.list);
992 if (!own)
993 qlt_send_first_logo(vha, &logo);
994 sess->send_els_logo = 0;
995 }
996
997 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
998 int rc;
999
1000 if (!own ||
1001 (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
1002 sess->logout_completed = 0;
1003 rc = qla2x00_post_async_logout_work(vha, sess,
1004 NULL);
1005 if (rc != QLA_SUCCESS)
1006 ql_log(ql_log_warn, vha, 0xf085,
1007 "Schedule logo failed sess %p rc %d\n",
1008 sess, rc);
1009 else
1010 logout_started = true;
1011 } else if (own && (own->iocb.u.isp24.status_subcode ==
1012 ELS_PRLI) && ha->flags.rida_fmt2) {
1013 rc = qla2x00_post_async_prlo_work(vha, sess,
1014 NULL);
1015 if (rc != QLA_SUCCESS)
1016 ql_log(ql_log_warn, vha, 0xf085,
1017 "Schedule PRLO failed sess %p rc %d\n",
1018 sess, rc);
1019 else
1020 logout_started = true;
1021 }
1022 } /* if sess->logout_on_delete */
1023
1024 if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1025 !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1026 sess->nvme_flag |= NVME_FLAG_DELETING;
1027 qla_nvme_unregister_remote_port(sess);
1028 }
1029
1030 if (ha->flags.edif_enabled &&
1031 (!own || (own &&
1032 own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
1033 sess->edif.authok = 0;
1034 if (!ha->flags.host_shutting_down) {
1035 ql_dbg(ql_dbg_edif, vha, 0x911e,
1036 "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
1037 __func__, sess->port_name);
1038 qla2x00_release_all_sadb(vha, sess);
1039 } else {
1040 ql_dbg(ql_dbg_edif, vha, 0x911e,
1041 "%s bypassing release_all_sadb\n",
1042 __func__);
1043 }
1044
1045 qla_edif_clear_appdata(vha, sess);
1046 qla_edif_sess_down(vha, sess);
1047 }
1048 }
1049
1050 /*
1051 * Release the target session for FC Nexus from fabric module code.
1052 */
1053 if (sess->se_sess != NULL)
1054 ha->tgt.tgt_ops->free_session(sess);
1055
1056 if (logout_started) {
1057 bool traced = false;
1058 u16 cnt = 0;
1059
1060 while (!READ_ONCE(sess->logout_completed)) {
1061 if (!traced) {
1062 ql_dbg(ql_dbg_disc, vha, 0xf086,
1063 "%s: waiting for sess %p logout\n",
1064 __func__, sess);
1065 traced = true;
1066 }
1067 msleep(100);
1068 cnt++;
1069 /*
1070 * Driver timeout is set to 22 Sec, update count value to loop
1071 * long enough for log-out to complete before advancing. Otherwise,
1072 * straddling logout can interfere with re-login attempt.
1073 */
1074 if (cnt > 230)
1075 break;
1076 }
1077
1078 ql_dbg(ql_dbg_disc, vha, 0xf087,
1079 "%s: sess %p logout completed\n", __func__, sess);
1080 }
1081
1082 if (sess->logo_ack_needed) {
1083 sess->logo_ack_needed = 0;
1084 qla24xx_async_notify_ack(vha, sess,
1085 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1086 }
1087
1088 spin_lock_irqsave(&vha->work_lock, flags);
1089 sess->flags &= ~FCF_ASYNC_SENT;
1090 spin_unlock_irqrestore(&vha->work_lock, flags);
1091
1092 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1093 if (sess->se_sess) {
1094 sess->se_sess = NULL;
1095 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1096 tgt->sess_count--;
1097 }
1098
1099 qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1100 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1101 sess->deleted = QLA_SESS_DELETED;
1102
1103 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1104 vha->fcport_count--;
1105 sess->login_succ = 0;
1106 }
1107
1108 qla2x00_clear_loop_id(sess);
1109
1110 if (sess->conflict) {
1111 sess->conflict->login_pause = 0;
1112 sess->conflict = NULL;
1113 if (!test_bit(UNLOADING, &vha->dpc_flags))
1114 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1115 }
1116
1117 {
1118 struct qlt_plogi_ack_t *con =
1119 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1120 struct imm_ntfy_from_isp *iocb;
1121
1122 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1123
1124 if (con) {
1125 iocb = &con->iocb;
1126 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1127 "se_sess %p / sess %p port %8phC is gone,"
1128 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1129 sess->se_sess, sess, sess->port_name,
1130 own ? "releasing own PLOGI" : "no own PLOGI pending",
1131 own ? own->ref_count : -1,
1132 iocb->u.isp24.port_name, con->ref_count);
1133 qlt_plogi_ack_unref(vha, con);
1134 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1135 } else {
1136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1137 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1138 sess->se_sess, sess, sess->port_name,
1139 own ? "releasing own PLOGI" :
1140 "no own PLOGI pending",
1141 own ? own->ref_count : -1);
1142 }
1143
1144 if (own) {
1145 sess->fw_login_state = DSC_LS_PLOGI_PEND;
1146 qlt_plogi_ack_unref(vha, own);
1147 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1148 }
1149 }
1150
1151 sess->explicit_logout = 0;
1152 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1153 sess->free_pending = 0;
1154
1155 qla2x00_dfs_remove_rport(vha, sess);
1156
1157 ql_dbg(ql_dbg_disc, vha, 0xf001,
1158 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1159 sess, sess->port_name, vha->fcport_count);
1160
1161 if (tgt && (tgt->sess_count == 0))
1162 wake_up_all(&tgt->waitQ);
1163
1164 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1165 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1166 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1167 switch (vha->host->active_mode) {
1168 case MODE_INITIATOR:
1169 case MODE_DUAL:
1170 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1171 qla2xxx_wake_dpc(vha);
1172 break;
1173 case MODE_TARGET:
1174 default:
1175 /* no-op */
1176 break;
1177 }
1178 }
1179
1180 if (vha->fcport_count == 0)
1181 wake_up_all(&vha->fcport_waitQ);
1182 }
1183
1184 /* ha->tgt.sess_lock supposed to be held on entry */
qlt_unreg_sess(struct fc_port * sess)1185 void qlt_unreg_sess(struct fc_port *sess)
1186 {
1187 struct scsi_qla_host *vha = sess->vha;
1188 unsigned long flags;
1189
1190 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1191 "%s sess %p for deletion %8phC\n",
1192 __func__, sess, sess->port_name);
1193
1194 spin_lock_irqsave(&sess->vha->work_lock, flags);
1195 if (sess->free_pending) {
1196 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1197 return;
1198 }
1199 sess->free_pending = 1;
1200 /*
1201 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1202 * management from being sent.
1203 */
1204 sess->flags |= FCF_ASYNC_SENT;
1205 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1206
1207 if (sess->se_sess)
1208 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1209
1210 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1211 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1212 sess->last_rscn_gen = sess->rscn_gen;
1213 sess->last_login_gen = sess->login_gen;
1214
1215 queue_work(sess->vha->hw->wq, &sess->free_work);
1216 }
1217 EXPORT_SYMBOL(qlt_unreg_sess);
1218
qlt_reset(struct scsi_qla_host * vha,void * iocb,int mcmd)1219 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1220 {
1221 struct qla_hw_data *ha = vha->hw;
1222 struct fc_port *sess = NULL;
1223 uint16_t loop_id;
1224 int res = 0;
1225 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1226 unsigned long flags;
1227
1228 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1229 if (loop_id == 0xFFFF) {
1230 /* Global event */
1231 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1232 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1233 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1234 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1235 } else {
1236 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1237 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1238 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1239 }
1240
1241 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1242 "Using sess for qla_tgt_reset: %p\n", sess);
1243 if (!sess) {
1244 res = -ESRCH;
1245 return res;
1246 }
1247
1248 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1249 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1250 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1251 mcmd, loop_id);
1252
1253 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1254 }
1255
qla24xx_chk_fcp_state(struct fc_port * sess)1256 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1257 {
1258 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1259 sess->logout_on_delete = 0;
1260 sess->logo_ack_needed = 0;
1261 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1262 }
1263 }
1264
qlt_schedule_sess_for_deletion(struct fc_port * sess)1265 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1266 {
1267 struct qla_tgt *tgt = sess->tgt;
1268 unsigned long flags;
1269 u16 sec;
1270
1271 switch (sess->disc_state) {
1272 case DSC_DELETE_PEND:
1273 return;
1274 case DSC_DELETED:
1275 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1276 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
1277 if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1278 wake_up_all(&tgt->waitQ);
1279
1280 if (sess->vha->fcport_count == 0)
1281 wake_up_all(&sess->vha->fcport_waitQ);
1282 return;
1283 }
1284 break;
1285 case DSC_UPD_FCPORT:
1286 /*
1287 * This port is not done reporting to upper layer.
1288 * let it finish
1289 */
1290 sess->next_disc_state = DSC_DELETE_PEND;
1291 sec = jiffies_to_msecs(jiffies -
1292 sess->jiffies_at_registration)/1000;
1293 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1294 sess->sec_since_registration = sec;
1295 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1296 "%s %8phC : Slow Rport registration(%d Sec)\n",
1297 __func__, sess->port_name, sec);
1298 }
1299 return;
1300 default:
1301 break;
1302 }
1303
1304 spin_lock_irqsave(&sess->vha->work_lock, flags);
1305 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1306 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1307 return;
1308 }
1309 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1310 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1311
1312 sess->prli_pend_timer = 0;
1313 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1314
1315 qla24xx_chk_fcp_state(sess);
1316
1317 ql_dbg(ql_log_warn, sess->vha, 0xe001,
1318 "Scheduling sess %p for deletion %8phC fc4_type %x\n",
1319 sess, sess->port_name, sess->fc4_type);
1320
1321 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1322 }
1323
qlt_clear_tgt_db(struct qla_tgt * tgt)1324 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1325 {
1326 struct fc_port *sess;
1327 scsi_qla_host_t *vha = tgt->vha;
1328
1329 list_for_each_entry(sess, &vha->vp_fcports, list) {
1330 if (sess->se_sess)
1331 qlt_schedule_sess_for_deletion(sess);
1332 }
1333
1334 /* At this point tgt could be already dead */
1335 }
1336
qla24xx_get_loop_id(struct scsi_qla_host * vha,be_id_t s_id,uint16_t * loop_id)1337 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1338 uint16_t *loop_id)
1339 {
1340 struct qla_hw_data *ha = vha->hw;
1341 dma_addr_t gid_list_dma;
1342 struct gid_list_info *gid_list, *gid;
1343 int res, rc, i;
1344 uint16_t entries;
1345
1346 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1347 &gid_list_dma, GFP_KERNEL);
1348 if (!gid_list) {
1349 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1350 "qla_target(%d): DMA Alloc failed of %u\n",
1351 vha->vp_idx, qla2x00_gid_list_size(ha));
1352 return -ENOMEM;
1353 }
1354
1355 /* Get list of logged in devices */
1356 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1357 if (rc != QLA_SUCCESS) {
1358 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1359 "qla_target(%d): get_id_list() failed: %x\n",
1360 vha->vp_idx, rc);
1361 res = -EBUSY;
1362 goto out_free_id_list;
1363 }
1364
1365 gid = gid_list;
1366 res = -ENOENT;
1367 for (i = 0; i < entries; i++) {
1368 if (gid->al_pa == s_id.al_pa &&
1369 gid->area == s_id.area &&
1370 gid->domain == s_id.domain) {
1371 *loop_id = le16_to_cpu(gid->loop_id);
1372 res = 0;
1373 break;
1374 }
1375 gid = (void *)gid + ha->gid_list_info_size;
1376 }
1377
1378 out_free_id_list:
1379 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1380 gid_list, gid_list_dma);
1381 return res;
1382 }
1383
1384 /*
1385 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1386 * Caller must put it.
1387 */
qlt_create_sess(struct scsi_qla_host * vha,fc_port_t * fcport,bool local)1388 static struct fc_port *qlt_create_sess(
1389 struct scsi_qla_host *vha,
1390 fc_port_t *fcport,
1391 bool local)
1392 {
1393 struct qla_hw_data *ha = vha->hw;
1394 struct fc_port *sess = fcport;
1395 unsigned long flags;
1396
1397 if (vha->vha_tgt.qla_tgt->tgt_stop)
1398 return NULL;
1399
1400 if (fcport->se_sess) {
1401 if (!kref_get_unless_zero(&sess->sess_kref)) {
1402 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1403 "%s: kref_get_unless_zero failed for %8phC\n",
1404 __func__, sess->port_name);
1405 return NULL;
1406 }
1407 return fcport;
1408 }
1409 sess->tgt = vha->vha_tgt.qla_tgt;
1410 sess->local = local;
1411
1412 /*
1413 * Under normal circumstances we want to logout from firmware when
1414 * session eventually ends and release corresponding nport handle.
1415 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1416 * code will adjust these flags as necessary.
1417 */
1418 sess->logout_on_delete = 1;
1419 sess->keep_nport_handle = 0;
1420 sess->logout_completed = 0;
1421
1422 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1423 &fcport->port_name[0], sess) < 0) {
1424 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1425 "(%d) %8phC check_initiator_node_acl failed\n",
1426 vha->vp_idx, fcport->port_name);
1427 return NULL;
1428 } else {
1429 kref_init(&fcport->sess_kref);
1430 /*
1431 * Take an extra reference to ->sess_kref here to handle
1432 * fc_port access across ->tgt.sess_lock reaquire.
1433 */
1434 if (!kref_get_unless_zero(&sess->sess_kref)) {
1435 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1436 "%s: kref_get_unless_zero failed for %8phC\n",
1437 __func__, sess->port_name);
1438 return NULL;
1439 }
1440
1441 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1442 if (!IS_SW_RESV_ADDR(sess->d_id))
1443 vha->vha_tgt.qla_tgt->sess_count++;
1444
1445 qlt_do_generation_tick(vha, &sess->generation);
1446 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1447 }
1448
1449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1450 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1451 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1452 vha->vha_tgt.qla_tgt->sess_count);
1453
1454 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1455 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1456 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1457 vha->vp_idx, local ? "local " : "", fcport->port_name,
1458 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1459 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
1460
1461 return sess;
1462 }
1463
1464 /*
1465 * max_gen - specifies maximum session generation
1466 * at which this deletion requestion is still valid
1467 */
1468 void
qlt_fc_port_deleted(struct scsi_qla_host * vha,fc_port_t * fcport,int max_gen)1469 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1470 {
1471 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1472 struct fc_port *sess = fcport;
1473 unsigned long flags;
1474
1475 if (!vha->hw->tgt.tgt_ops)
1476 return;
1477
1478 if (!tgt)
1479 return;
1480
1481 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1482 if (tgt->tgt_stop) {
1483 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1484 return;
1485 }
1486 if (!sess->se_sess) {
1487 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1488 return;
1489 }
1490
1491 if (max_gen - sess->generation < 0) {
1492 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1494 "Ignoring stale deletion request for se_sess %p / sess %p"
1495 " for port %8phC, req_gen %d, sess_gen %d\n",
1496 sess->se_sess, sess, sess->port_name, max_gen,
1497 sess->generation);
1498 return;
1499 }
1500
1501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1502
1503 sess->local = 1;
1504 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1505 qlt_schedule_sess_for_deletion(sess);
1506 }
1507
test_tgt_sess_count(struct qla_tgt * tgt)1508 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1509 {
1510 struct qla_hw_data *ha = tgt->ha;
1511 unsigned long flags;
1512 int res;
1513 /*
1514 * We need to protect against race, when tgt is freed before or
1515 * inside wake_up()
1516 */
1517 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1518 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1519 "tgt %p, sess_count=%d\n",
1520 tgt, tgt->sess_count);
1521 res = (tgt->sess_count == 0);
1522 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1523
1524 return res;
1525 }
1526
1527 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase1(struct qla_tgt * tgt)1528 int qlt_stop_phase1(struct qla_tgt *tgt)
1529 {
1530 struct scsi_qla_host *vha = tgt->vha;
1531 struct qla_hw_data *ha = tgt->ha;
1532 unsigned long flags;
1533
1534 mutex_lock(&ha->optrom_mutex);
1535 mutex_lock(&qla_tgt_mutex);
1536
1537 if (tgt->tgt_stop || tgt->tgt_stopped) {
1538 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1539 "Already in tgt->tgt_stop or tgt_stopped state\n");
1540 mutex_unlock(&qla_tgt_mutex);
1541 mutex_unlock(&ha->optrom_mutex);
1542 return -EPERM;
1543 }
1544
1545 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1546 vha->host_no, vha);
1547 /*
1548 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1549 * Lock is needed, because we still can get an incoming packet.
1550 */
1551 mutex_lock(&vha->vha_tgt.tgt_mutex);
1552 tgt->tgt_stop = 1;
1553 qlt_clear_tgt_db(tgt);
1554 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1555 mutex_unlock(&qla_tgt_mutex);
1556
1557 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1558 "Waiting for sess works (tgt %p)", tgt);
1559 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1560 while (!list_empty(&tgt->sess_works_list)) {
1561 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1562 flush_scheduled_work();
1563 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1564 }
1565 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1566
1567 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1568 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1569
1570 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1571
1572 /* Big hammer */
1573 if (!ha->flags.host_shutting_down &&
1574 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1575 qlt_disable_vha(vha);
1576
1577 /* Wait for sessions to clear out (just in case) */
1578 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1579 mutex_unlock(&ha->optrom_mutex);
1580
1581 return 0;
1582 }
1583 EXPORT_SYMBOL(qlt_stop_phase1);
1584
1585 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase2(struct qla_tgt * tgt)1586 void qlt_stop_phase2(struct qla_tgt *tgt)
1587 {
1588 scsi_qla_host_t *vha = tgt->vha;
1589
1590 if (tgt->tgt_stopped) {
1591 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1592 "Already in tgt->tgt_stopped state\n");
1593 dump_stack();
1594 return;
1595 }
1596 if (!tgt->tgt_stop) {
1597 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1598 "%s: phase1 stop is not completed\n", __func__);
1599 dump_stack();
1600 return;
1601 }
1602
1603 mutex_lock(&tgt->ha->optrom_mutex);
1604 mutex_lock(&vha->vha_tgt.tgt_mutex);
1605 tgt->tgt_stop = 0;
1606 tgt->tgt_stopped = 1;
1607 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1608 mutex_unlock(&tgt->ha->optrom_mutex);
1609
1610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1611 tgt);
1612
1613 switch (vha->qlini_mode) {
1614 case QLA2XXX_INI_MODE_EXCLUSIVE:
1615 vha->flags.online = 1;
1616 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1617 break;
1618 default:
1619 break;
1620 }
1621 }
1622 EXPORT_SYMBOL(qlt_stop_phase2);
1623
1624 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
qlt_release(struct qla_tgt * tgt)1625 static void qlt_release(struct qla_tgt *tgt)
1626 {
1627 scsi_qla_host_t *vha = tgt->vha;
1628 void *node;
1629 u64 key = 0;
1630 u16 i;
1631 struct qla_qpair_hint *h;
1632 struct qla_hw_data *ha = vha->hw;
1633
1634 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1635 qlt_stop_phase1(tgt);
1636
1637 if (!tgt->tgt_stopped)
1638 qlt_stop_phase2(tgt);
1639
1640 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1641 unsigned long flags;
1642
1643 h = &tgt->qphints[i];
1644 if (h->qpair) {
1645 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1646 list_del(&h->hint_elem);
1647 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1648 h->qpair = NULL;
1649 }
1650 }
1651 kfree(tgt->qphints);
1652 mutex_lock(&qla_tgt_mutex);
1653 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1654 mutex_unlock(&qla_tgt_mutex);
1655
1656 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1657 btree_remove64(&tgt->lun_qpair_map, key);
1658
1659 btree_destroy64(&tgt->lun_qpair_map);
1660
1661 if (vha->vp_idx)
1662 if (ha->tgt.tgt_ops &&
1663 ha->tgt.tgt_ops->remove_target &&
1664 vha->vha_tgt.target_lport_ptr)
1665 ha->tgt.tgt_ops->remove_target(vha);
1666
1667 vha->vha_tgt.qla_tgt = NULL;
1668
1669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1670 "Release of tgt %p finished\n", tgt);
1671
1672 kfree(tgt);
1673 }
1674
1675 /* ha->hardware_lock supposed to be held on entry */
qlt_sched_sess_work(struct qla_tgt * tgt,int type,const void * param,unsigned int param_size)1676 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1677 const void *param, unsigned int param_size)
1678 {
1679 struct qla_tgt_sess_work_param *prm;
1680 unsigned long flags;
1681
1682 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1683 if (!prm) {
1684 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1685 "qla_target(%d): Unable to create session "
1686 "work, command will be refused", 0);
1687 return -ENOMEM;
1688 }
1689
1690 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1691 "Scheduling work (type %d, prm %p)"
1692 " to find session for param %p (size %d, tgt %p)\n",
1693 type, prm, param, param_size, tgt);
1694
1695 prm->type = type;
1696 memcpy(&prm->tm_iocb, param, param_size);
1697
1698 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1699 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1700 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1701
1702 schedule_work(&tgt->sess_work);
1703
1704 return 0;
1705 }
1706
1707 /*
1708 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1709 */
qlt_send_notify_ack(struct qla_qpair * qpair,struct imm_ntfy_from_isp * ntfy,uint32_t add_flags,uint16_t resp_code,int resp_code_valid,uint16_t srr_flags,uint16_t srr_reject_code,uint8_t srr_explan)1710 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1711 struct imm_ntfy_from_isp *ntfy,
1712 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1713 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1714 {
1715 struct scsi_qla_host *vha = qpair->vha;
1716 struct qla_hw_data *ha = vha->hw;
1717 request_t *pkt;
1718 struct nack_to_isp *nack;
1719
1720 if (!ha->flags.fw_started)
1721 return;
1722
1723 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1724
1725 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1726 if (!pkt) {
1727 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1728 "qla_target(%d): %s failed: unable to allocate "
1729 "request packet\n", vha->vp_idx, __func__);
1730 return;
1731 }
1732
1733 if (vha->vha_tgt.qla_tgt != NULL)
1734 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1735
1736 pkt->entry_type = NOTIFY_ACK_TYPE;
1737 pkt->entry_count = 1;
1738
1739 nack = (struct nack_to_isp *)pkt;
1740 nack->ox_id = ntfy->ox_id;
1741
1742 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1743 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1744 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1745 nack->u.isp24.flags = ntfy->u.isp24.flags &
1746 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
1747 }
1748 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1749 nack->u.isp24.status = ntfy->u.isp24.status;
1750 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1751 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1752 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1753 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1754 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1755 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1756 nack->u.isp24.srr_reject_code = srr_reject_code;
1757 nack->u.isp24.srr_reject_code_expl = srr_explan;
1758 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1759
1760 /* TODO qualify this with EDIF enable */
1761 if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
1762 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
1763 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
1764 }
1765
1766 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1767 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1768 vha->vp_idx, nack->u.isp24.status);
1769
1770 /* Memory Barrier */
1771 wmb();
1772 qla2x00_start_iocbs(vha, qpair->req);
1773 }
1774
qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd * mcmd)1775 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1776 {
1777 struct scsi_qla_host *vha = mcmd->vha;
1778 struct qla_hw_data *ha = vha->hw;
1779 struct abts_resp_to_24xx *resp;
1780 __le32 f_ctl;
1781 uint32_t h;
1782 uint8_t *p;
1783 int rc;
1784 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1785 struct qla_qpair *qpair = mcmd->qpair;
1786
1787 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1788 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1789 ha, mcmd->fc_tm_rsp);
1790
1791 rc = qlt_check_reserve_free_req(qpair, 1);
1792 if (rc) {
1793 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1794 "qla_target(%d): %s failed: unable to allocate request packet\n",
1795 vha->vp_idx, __func__);
1796 return -EAGAIN;
1797 }
1798
1799 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1800 memset(resp, 0, sizeof(*resp));
1801
1802 h = qlt_make_handle(qpair);
1803 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1804 /*
1805 * CTIO type 7 from the firmware doesn't provide a way to
1806 * know the initiator's LOOP ID, hence we can't find
1807 * the session and, so, the command.
1808 */
1809 return -EAGAIN;
1810 } else {
1811 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1812 }
1813
1814 resp->handle = make_handle(qpair->req->id, h);
1815 resp->entry_type = ABTS_RESP_24XX;
1816 resp->entry_count = 1;
1817 resp->nport_handle = abts->nport_handle;
1818 resp->vp_index = vha->vp_idx;
1819 resp->sof_type = abts->sof_type;
1820 resp->exchange_address = abts->exchange_address;
1821 resp->fcp_hdr_le = abts->fcp_hdr_le;
1822 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1823 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1824 F_CTL_SEQ_INITIATIVE);
1825 p = (uint8_t *)&f_ctl;
1826 resp->fcp_hdr_le.f_ctl[0] = *p++;
1827 resp->fcp_hdr_le.f_ctl[1] = *p++;
1828 resp->fcp_hdr_le.f_ctl[2] = *p;
1829
1830 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1831 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1832
1833 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1834 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1835 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1836 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1837 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1838 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1839 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1840 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1841 } else {
1842 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1843 resp->payload.ba_rjt.reason_code =
1844 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1845 /* Other bytes are zero */
1846 }
1847
1848 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1849
1850 /* Memory Barrier */
1851 wmb();
1852 if (qpair->reqq_start_iocbs)
1853 qpair->reqq_start_iocbs(qpair);
1854 else
1855 qla2x00_start_iocbs(vha, qpair->req);
1856
1857 return rc;
1858 }
1859
1860 /*
1861 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1862 */
qlt_24xx_send_abts_resp(struct qla_qpair * qpair,struct abts_recv_from_24xx * abts,uint32_t status,bool ids_reversed)1863 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1864 struct abts_recv_from_24xx *abts, uint32_t status,
1865 bool ids_reversed)
1866 {
1867 struct scsi_qla_host *vha = qpair->vha;
1868 struct qla_hw_data *ha = vha->hw;
1869 struct abts_resp_to_24xx *resp;
1870 __le32 f_ctl;
1871 uint8_t *p;
1872
1873 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1874 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1875 ha, abts, status);
1876
1877 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1878 NULL);
1879 if (!resp) {
1880 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1881 "qla_target(%d): %s failed: unable to allocate "
1882 "request packet", vha->vp_idx, __func__);
1883 return;
1884 }
1885
1886 resp->entry_type = ABTS_RESP_24XX;
1887 resp->handle = QLA_TGT_SKIP_HANDLE;
1888 resp->entry_count = 1;
1889 resp->nport_handle = abts->nport_handle;
1890 resp->vp_index = vha->vp_idx;
1891 resp->sof_type = abts->sof_type;
1892 resp->exchange_address = abts->exchange_address;
1893 resp->fcp_hdr_le = abts->fcp_hdr_le;
1894 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1895 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1896 F_CTL_SEQ_INITIATIVE);
1897 p = (uint8_t *)&f_ctl;
1898 resp->fcp_hdr_le.f_ctl[0] = *p++;
1899 resp->fcp_hdr_le.f_ctl[1] = *p++;
1900 resp->fcp_hdr_le.f_ctl[2] = *p;
1901 if (ids_reversed) {
1902 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1903 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1904 } else {
1905 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1906 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1907 }
1908 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1909 if (status == FCP_TMF_CMPL) {
1910 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1911 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1912 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1913 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1914 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1915 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1916 } else {
1917 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1918 resp->payload.ba_rjt.reason_code =
1919 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1920 /* Other bytes are zero */
1921 }
1922
1923 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1924
1925 /* Memory Barrier */
1926 wmb();
1927 if (qpair->reqq_start_iocbs)
1928 qpair->reqq_start_iocbs(qpair);
1929 else
1930 qla2x00_start_iocbs(vha, qpair->req);
1931 }
1932
1933 /*
1934 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1935 */
qlt_24xx_retry_term_exchange(struct scsi_qla_host * vha,struct qla_qpair * qpair,response_t * pkt,struct qla_tgt_mgmt_cmd * mcmd)1936 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1937 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1938 {
1939 struct ctio7_to_24xx *ctio;
1940 u16 tmp;
1941 struct abts_recv_from_24xx *entry;
1942
1943 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1944 if (ctio == NULL) {
1945 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1946 "qla_target(%d): %s failed: unable to allocate "
1947 "request packet\n", vha->vp_idx, __func__);
1948 return;
1949 }
1950
1951 if (mcmd)
1952 /* abts from remote port */
1953 entry = &mcmd->orig_iocb.abts;
1954 else
1955 /* abts from this driver. */
1956 entry = (struct abts_recv_from_24xx *)pkt;
1957
1958 /*
1959 * We've got on entrance firmware's response on by us generated
1960 * ABTS response. So, in it ID fields are reversed.
1961 */
1962
1963 ctio->entry_type = CTIO_TYPE7;
1964 ctio->entry_count = 1;
1965 ctio->nport_handle = entry->nport_handle;
1966 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1967 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1968 ctio->vp_index = vha->vp_idx;
1969 ctio->exchange_addr = entry->exchange_addr_to_abort;
1970 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1971
1972 if (mcmd) {
1973 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1974
1975 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1976 tmp |= (mcmd->abort_io_attr << 9);
1977 else if (qpair->retry_term_cnt & 1)
1978 tmp |= (0x4 << 9);
1979 } else {
1980 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1981
1982 if (qpair->retry_term_cnt & 1)
1983 tmp |= (0x4 << 9);
1984 }
1985 ctio->u.status1.flags = cpu_to_le16(tmp);
1986 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1987
1988 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1989 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1990 le16_to_cpu(ctio->u.status1.flags),
1991 le16_to_cpu(ctio->u.status1.ox_id),
1992 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1993
1994 /* Memory Barrier */
1995 wmb();
1996 if (qpair->reqq_start_iocbs)
1997 qpair->reqq_start_iocbs(qpair);
1998 else
1999 qla2x00_start_iocbs(vha, qpair->req);
2000
2001 if (mcmd)
2002 qlt_build_abts_resp_iocb(mcmd);
2003 else
2004 qlt_24xx_send_abts_resp(qpair,
2005 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
2006
2007 }
2008
2009 /* drop cmds for the given lun
2010 * XXX only looks for cmds on the port through which lun reset was recieved
2011 * XXX does not go through the list of other port (which may have cmds
2012 * for the same lun)
2013 */
abort_cmds_for_lun(struct scsi_qla_host * vha,u64 lun,be_id_t s_id)2014 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
2015 {
2016 struct qla_tgt_sess_op *op;
2017 struct qla_tgt_cmd *cmd;
2018 uint32_t key;
2019 unsigned long flags;
2020
2021 key = sid_to_key(s_id);
2022 spin_lock_irqsave(&vha->cmd_list_lock, flags);
2023 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
2024 uint32_t op_key;
2025 u64 op_lun;
2026
2027 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
2028 op_lun = scsilun_to_int(
2029 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
2030 if (op_key == key && op_lun == lun)
2031 op->aborted = true;
2032 }
2033
2034 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2035 uint32_t cmd_key;
2036 u64 cmd_lun;
2037
2038 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2039 cmd_lun = scsilun_to_int(
2040 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2041 if (cmd_key == key && cmd_lun == lun)
2042 cmd->aborted = 1;
2043 }
2044 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2045 }
2046
qlt_find_qphint(struct scsi_qla_host * vha,uint64_t unpacked_lun)2047 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2048 uint64_t unpacked_lun)
2049 {
2050 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2051 struct qla_qpair_hint *h = NULL;
2052
2053 if (vha->flags.qpairs_available) {
2054 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2055 if (!h)
2056 h = &tgt->qphints[0];
2057 } else {
2058 h = &tgt->qphints[0];
2059 }
2060
2061 return h;
2062 }
2063
qlt_do_tmr_work(struct work_struct * work)2064 static void qlt_do_tmr_work(struct work_struct *work)
2065 {
2066 struct qla_tgt_mgmt_cmd *mcmd =
2067 container_of(work, struct qla_tgt_mgmt_cmd, work);
2068 struct qla_hw_data *ha = mcmd->vha->hw;
2069 int rc;
2070 uint32_t tag;
2071 unsigned long flags;
2072
2073 switch (mcmd->tmr_func) {
2074 case QLA_TGT_ABTS:
2075 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
2076 break;
2077 default:
2078 tag = 0;
2079 break;
2080 }
2081
2082 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2083 mcmd->tmr_func, tag);
2084
2085 if (rc != 0) {
2086 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2087 switch (mcmd->tmr_func) {
2088 case QLA_TGT_ABTS:
2089 mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2090 qlt_build_abts_resp_iocb(mcmd);
2091 break;
2092 case QLA_TGT_LUN_RESET:
2093 case QLA_TGT_CLEAR_TS:
2094 case QLA_TGT_ABORT_TS:
2095 case QLA_TGT_CLEAR_ACA:
2096 case QLA_TGT_TARGET_RESET:
2097 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2098 qla_sam_status);
2099 break;
2100
2101 case QLA_TGT_ABORT_ALL:
2102 case QLA_TGT_NEXUS_LOSS_SESS:
2103 case QLA_TGT_NEXUS_LOSS:
2104 qlt_send_notify_ack(mcmd->qpair,
2105 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2106 break;
2107 }
2108 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2109
2110 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2111 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2112 mcmd->vha->vp_idx, rc);
2113 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2114 }
2115 }
2116
2117 /* ha->hardware_lock supposed to be held on entry */
__qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts,struct fc_port * sess)2118 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2119 struct abts_recv_from_24xx *abts, struct fc_port *sess)
2120 {
2121 struct qla_hw_data *ha = vha->hw;
2122 struct qla_tgt_mgmt_cmd *mcmd;
2123 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2124 struct qla_tgt_cmd *abort_cmd;
2125
2126 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2127 "qla_target(%d): task abort (tag=%d)\n",
2128 vha->vp_idx, abts->exchange_addr_to_abort);
2129
2130 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2131 if (mcmd == NULL) {
2132 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2133 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2134 vha->vp_idx, __func__);
2135 return -ENOMEM;
2136 }
2137 memset(mcmd, 0, sizeof(*mcmd));
2138 mcmd->cmd_type = TYPE_TGT_TMCMD;
2139 mcmd->sess = sess;
2140 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2141 mcmd->reset_count = ha->base_qpair->chip_reset;
2142 mcmd->tmr_func = QLA_TGT_ABTS;
2143 mcmd->qpair = h->qpair;
2144 mcmd->vha = vha;
2145
2146 /*
2147 * LUN is looked up by target-core internally based on the passed
2148 * abts->exchange_addr_to_abort tag.
2149 */
2150 mcmd->se_cmd.cpuid = h->cpuid;
2151
2152 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2153 le32_to_cpu(abts->exchange_addr_to_abort));
2154 if (!abort_cmd)
2155 return -EIO;
2156 mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
2157
2158 if (abort_cmd->qpair) {
2159 mcmd->qpair = abort_cmd->qpair;
2160 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2161 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2162 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2163 }
2164
2165 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2166 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2167
2168 return 0;
2169 }
2170
2171 /*
2172 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2173 */
qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts)2174 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2175 struct abts_recv_from_24xx *abts)
2176 {
2177 struct qla_hw_data *ha = vha->hw;
2178 struct fc_port *sess;
2179 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
2180 be_id_t s_id;
2181 int rc;
2182 unsigned long flags;
2183
2184 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2186 "qla_target(%d): ABTS: Abort Sequence not "
2187 "supported\n", vha->vp_idx);
2188 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2189 false);
2190 return;
2191 }
2192
2193 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2195 "qla_target(%d): ABTS: Unknown Exchange "
2196 "Address received\n", vha->vp_idx);
2197 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2198 false);
2199 return;
2200 }
2201
2202 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2203 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2204 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2205 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2206 le32_to_cpu(abts->fcp_hdr_le.parameter));
2207
2208 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2209
2210 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2211 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2212 if (!sess) {
2213 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2214 "qla_target(%d): task abort for non-existent session\n",
2215 vha->vp_idx);
2216 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2217
2218 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2219 false);
2220 return;
2221 }
2222 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2223
2224
2225 if (sess->deleted) {
2226 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2227 false);
2228 return;
2229 }
2230
2231 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2232 if (rc != 0) {
2233 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2234 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2235 vha->vp_idx, rc);
2236 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2237 false);
2238 return;
2239 }
2240 }
2241
2242 /*
2243 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2244 */
qlt_24xx_send_task_mgmt_ctio(struct qla_qpair * qpair,struct qla_tgt_mgmt_cmd * mcmd,uint32_t resp_code)2245 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2246 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2247 {
2248 struct scsi_qla_host *ha = mcmd->vha;
2249 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2250 struct ctio7_to_24xx *ctio;
2251 uint16_t temp;
2252
2253 ql_dbg(ql_dbg_tgt, ha, 0xe008,
2254 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2255 ha, atio, resp_code);
2256
2257
2258 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2259 if (ctio == NULL) {
2260 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2261 "qla_target(%d): %s failed: unable to allocate "
2262 "request packet\n", ha->vp_idx, __func__);
2263 return;
2264 }
2265
2266 ctio->entry_type = CTIO_TYPE7;
2267 ctio->entry_count = 1;
2268 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2269 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
2270 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2271 ctio->vp_index = ha->vp_idx;
2272 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2273 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2274 temp = (atio->u.isp24.attr << 9)|
2275 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2276 ctio->u.status1.flags = cpu_to_le16(temp);
2277 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2278 ctio->u.status1.ox_id = cpu_to_le16(temp);
2279 ctio->u.status1.scsi_status =
2280 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2281 ctio->u.status1.response_len = cpu_to_le16(8);
2282 ctio->u.status1.sense_data[0] = resp_code;
2283
2284 /* Memory Barrier */
2285 wmb();
2286 if (qpair->reqq_start_iocbs)
2287 qpair->reqq_start_iocbs(qpair);
2288 else
2289 qla2x00_start_iocbs(ha, qpair->req);
2290 }
2291
qlt_free_mcmd(struct qla_tgt_mgmt_cmd * mcmd)2292 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2293 {
2294 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2295 }
2296 EXPORT_SYMBOL(qlt_free_mcmd);
2297
2298 /*
2299 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2300 * reacquire
2301 */
qlt_send_resp_ctio(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,uint8_t scsi_status,uint8_t sense_key,uint8_t asc,uint8_t ascq)2302 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2303 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2304 {
2305 struct atio_from_isp *atio = &cmd->atio;
2306 struct ctio7_to_24xx *ctio;
2307 uint16_t temp;
2308 struct scsi_qla_host *vha = cmd->vha;
2309
2310 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2311 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2312 "sense_key=%02x, asc=%02x, ascq=%02x",
2313 vha, atio, scsi_status, sense_key, asc, ascq);
2314
2315 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2316 if (!ctio) {
2317 ql_dbg(ql_dbg_async, vha, 0x3067,
2318 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2319 vha->host_no, __func__);
2320 goto out;
2321 }
2322
2323 ctio->entry_type = CTIO_TYPE7;
2324 ctio->entry_count = 1;
2325 ctio->handle = QLA_TGT_SKIP_HANDLE;
2326 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2327 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2328 ctio->vp_index = vha->vp_idx;
2329 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2330 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2331 temp = (atio->u.isp24.attr << 9) |
2332 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2333 ctio->u.status1.flags = cpu_to_le16(temp);
2334 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2335 ctio->u.status1.ox_id = cpu_to_le16(temp);
2336 ctio->u.status1.scsi_status =
2337 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2338 ctio->u.status1.response_len = cpu_to_le16(18);
2339 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2340
2341 if (ctio->u.status1.residual != 0)
2342 ctio->u.status1.scsi_status |=
2343 cpu_to_le16(SS_RESIDUAL_UNDER);
2344
2345 /* Fixed format sense data. */
2346 ctio->u.status1.sense_data[0] = 0x70;
2347 ctio->u.status1.sense_data[2] = sense_key;
2348 /* Additional sense length */
2349 ctio->u.status1.sense_data[7] = 0xa;
2350 /* ASC and ASCQ */
2351 ctio->u.status1.sense_data[12] = asc;
2352 ctio->u.status1.sense_data[13] = ascq;
2353
2354 /* Memory Barrier */
2355 wmb();
2356
2357 if (qpair->reqq_start_iocbs)
2358 qpair->reqq_start_iocbs(qpair);
2359 else
2360 qla2x00_start_iocbs(vha, qpair->req);
2361
2362 out:
2363 return;
2364 }
2365
2366 /* callback from target fabric module code */
qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd * mcmd)2367 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2368 {
2369 struct scsi_qla_host *vha = mcmd->sess->vha;
2370 struct qla_hw_data *ha = vha->hw;
2371 unsigned long flags;
2372 struct qla_qpair *qpair = mcmd->qpair;
2373 bool free_mcmd = true;
2374
2375 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2376 "TM response mcmd (%p) status %#x state %#x",
2377 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2378
2379 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2380
2381 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2382 /*
2383 * Either the port is not online or this request was from
2384 * previous life, just abort the processing.
2385 */
2386 ql_dbg(ql_dbg_async, vha, 0xe100,
2387 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2388 vha->flags.online, qla2x00_reset_active(vha),
2389 mcmd->reset_count, qpair->chip_reset);
2390 ha->tgt.tgt_ops->free_mcmd(mcmd);
2391 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2392 return;
2393 }
2394
2395 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2396 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2397 case ELS_LOGO:
2398 case ELS_PRLO:
2399 case ELS_TPRLO:
2400 ql_dbg(ql_dbg_disc, vha, 0x2106,
2401 "TM response logo %8phC status %#x state %#x",
2402 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2403 mcmd->flags);
2404 qlt_schedule_sess_for_deletion(mcmd->sess);
2405 break;
2406 default:
2407 qlt_send_notify_ack(vha->hw->base_qpair,
2408 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2409 break;
2410 }
2411 } else {
2412 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2413 qlt_build_abts_resp_iocb(mcmd);
2414 free_mcmd = false;
2415 } else
2416 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2417 mcmd->fc_tm_rsp);
2418 }
2419 /*
2420 * Make the callback for ->free_mcmd() to queue_work() and invoke
2421 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2422 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2423 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2424 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2425 * qlt_xmit_tm_rsp() returns here..
2426 */
2427 if (free_mcmd)
2428 ha->tgt.tgt_ops->free_mcmd(mcmd);
2429
2430 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2431 }
2432 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2433
2434 /* No locks */
qlt_pci_map_calc_cnt(struct qla_tgt_prm * prm)2435 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2436 {
2437 struct qla_tgt_cmd *cmd = prm->cmd;
2438
2439 BUG_ON(cmd->sg_cnt == 0);
2440
2441 prm->sg = (struct scatterlist *)cmd->sg;
2442 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2443 cmd->sg_cnt, cmd->dma_data_direction);
2444 if (unlikely(prm->seg_cnt == 0))
2445 goto out_err;
2446
2447 prm->cmd->sg_mapped = 1;
2448
2449 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2450 /*
2451 * If greater than four sg entries then we need to allocate
2452 * the continuation entries
2453 */
2454 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2455 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2456 QLA_TGT_DATASEGS_PER_CMD_24XX,
2457 QLA_TGT_DATASEGS_PER_CONT_24XX);
2458 } else {
2459 /* DIF */
2460 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2461 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2462 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2463 prm->tot_dsds = prm->seg_cnt;
2464 } else
2465 prm->tot_dsds = prm->seg_cnt;
2466
2467 if (cmd->prot_sg_cnt) {
2468 prm->prot_sg = cmd->prot_sg;
2469 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2470 cmd->prot_sg, cmd->prot_sg_cnt,
2471 cmd->dma_data_direction);
2472 if (unlikely(prm->prot_seg_cnt == 0))
2473 goto out_err;
2474
2475 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2476 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2477 /* Dif Bundling not support here */
2478 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2479 cmd->blk_sz);
2480 prm->tot_dsds += prm->prot_seg_cnt;
2481 } else
2482 prm->tot_dsds += prm->prot_seg_cnt;
2483 }
2484 }
2485
2486 return 0;
2487
2488 out_err:
2489 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2490 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2491 0, prm->cmd->sg_cnt);
2492 return -1;
2493 }
2494
qlt_unmap_sg(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)2495 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2496 {
2497 struct qla_hw_data *ha;
2498 struct qla_qpair *qpair;
2499
2500 if (!cmd->sg_mapped)
2501 return;
2502
2503 qpair = cmd->qpair;
2504
2505 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2506 cmd->dma_data_direction);
2507 cmd->sg_mapped = 0;
2508
2509 if (cmd->prot_sg_cnt)
2510 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2511 cmd->dma_data_direction);
2512
2513 if (!cmd->ctx)
2514 return;
2515 ha = vha->hw;
2516 if (cmd->ctx_dsd_alloced)
2517 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2518
2519 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2520 }
2521
qlt_check_reserve_free_req(struct qla_qpair * qpair,uint32_t req_cnt)2522 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2523 uint32_t req_cnt)
2524 {
2525 uint32_t cnt;
2526 struct req_que *req = qpair->req;
2527
2528 if (req->cnt < (req_cnt + 2)) {
2529 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2530 rd_reg_dword_relaxed(req->req_q_out));
2531
2532 if (req->ring_index < cnt)
2533 req->cnt = cnt - req->ring_index;
2534 else
2535 req->cnt = req->length - (req->ring_index - cnt);
2536
2537 if (unlikely(req->cnt < (req_cnt + 2)))
2538 return -EAGAIN;
2539 }
2540
2541 req->cnt -= req_cnt;
2542
2543 return 0;
2544 }
2545
2546 /*
2547 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2548 */
qlt_get_req_pkt(struct req_que * req)2549 static inline void *qlt_get_req_pkt(struct req_que *req)
2550 {
2551 /* Adjust ring index. */
2552 req->ring_index++;
2553 if (req->ring_index == req->length) {
2554 req->ring_index = 0;
2555 req->ring_ptr = req->ring;
2556 } else {
2557 req->ring_ptr++;
2558 }
2559 return (cont_entry_t *)req->ring_ptr;
2560 }
2561
2562 /* ha->hardware_lock supposed to be held on entry */
qlt_make_handle(struct qla_qpair * qpair)2563 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2564 {
2565 uint32_t h;
2566 int index;
2567 uint8_t found = 0;
2568 struct req_que *req = qpair->req;
2569
2570 h = req->current_outstanding_cmd;
2571
2572 for (index = 1; index < req->num_outstanding_cmds; index++) {
2573 h++;
2574 if (h == req->num_outstanding_cmds)
2575 h = 1;
2576
2577 if (h == QLA_TGT_SKIP_HANDLE)
2578 continue;
2579
2580 if (!req->outstanding_cmds[h]) {
2581 found = 1;
2582 break;
2583 }
2584 }
2585
2586 if (found) {
2587 req->current_outstanding_cmd = h;
2588 } else {
2589 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2590 "qla_target(%d): Ran out of empty cmd slots\n",
2591 qpair->vha->vp_idx);
2592 h = QLA_TGT_NULL_HANDLE;
2593 }
2594
2595 return h;
2596 }
2597
2598 /* ha->hardware_lock supposed to be held on entry */
qlt_24xx_build_ctio_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)2599 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2600 struct qla_tgt_prm *prm)
2601 {
2602 uint32_t h;
2603 struct ctio7_to_24xx *pkt;
2604 struct atio_from_isp *atio = &prm->cmd->atio;
2605 uint16_t temp;
2606 struct qla_tgt_cmd *cmd = prm->cmd;
2607
2608 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2609 prm->pkt = pkt;
2610 memset(pkt, 0, sizeof(*pkt));
2611
2612 pkt->entry_type = CTIO_TYPE7;
2613 pkt->entry_count = (uint8_t)prm->req_cnt;
2614 pkt->vp_index = prm->cmd->vp_idx;
2615
2616 h = qlt_make_handle(qpair);
2617 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2618 /*
2619 * CTIO type 7 from the firmware doesn't provide a way to
2620 * know the initiator's LOOP ID, hence we can't find
2621 * the session and, so, the command.
2622 */
2623 return -EAGAIN;
2624 } else
2625 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2626
2627 pkt->handle = make_handle(qpair->req->id, h);
2628 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2629 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2630 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2631 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2632 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2633 temp = atio->u.isp24.attr << 9;
2634 pkt->u.status0.flags |= cpu_to_le16(temp);
2635 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2636 pkt->u.status0.ox_id = cpu_to_le16(temp);
2637 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2638
2639 if (cmd->edif) {
2640 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2641 prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
2642 if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2643 prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
2644
2645 pkt->u.status0.edif_flags |= EF_EN_EDIF;
2646 }
2647
2648 return 0;
2649 }
2650
2651 /*
2652 * ha->hardware_lock supposed to be held on entry. We have already made sure
2653 * that there is sufficient amount of request entries to not drop it.
2654 */
qlt_load_cont_data_segments(struct qla_tgt_prm * prm)2655 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2656 {
2657 int cnt;
2658 struct dsd64 *cur_dsd;
2659
2660 /* Build continuation packets */
2661 while (prm->seg_cnt > 0) {
2662 cont_a64_entry_t *cont_pkt64 =
2663 (cont_a64_entry_t *)qlt_get_req_pkt(
2664 prm->cmd->qpair->req);
2665
2666 /*
2667 * Make sure that from cont_pkt64 none of
2668 * 64-bit specific fields used for 32-bit
2669 * addressing. Cast to (cont_entry_t *) for
2670 * that.
2671 */
2672
2673 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2674
2675 cont_pkt64->entry_count = 1;
2676 cont_pkt64->sys_define = 0;
2677
2678 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2679 cur_dsd = cont_pkt64->dsd;
2680
2681 /* Load continuation entry data segments */
2682 for (cnt = 0;
2683 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2684 cnt++, prm->seg_cnt--) {
2685 append_dsd64(&cur_dsd, prm->sg);
2686 prm->sg = sg_next(prm->sg);
2687 }
2688 }
2689 }
2690
2691 /*
2692 * ha->hardware_lock supposed to be held on entry. We have already made sure
2693 * that there is sufficient amount of request entries to not drop it.
2694 */
qlt_load_data_segments(struct qla_tgt_prm * prm)2695 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2696 {
2697 int cnt;
2698 struct dsd64 *cur_dsd;
2699 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2700
2701 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2702
2703 /* Setup packet address segment pointer */
2704 cur_dsd = &pkt24->u.status0.dsd;
2705
2706 /* Set total data segment count */
2707 if (prm->seg_cnt)
2708 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2709
2710 if (prm->seg_cnt == 0) {
2711 /* No data transfer */
2712 cur_dsd->address = 0;
2713 cur_dsd->length = 0;
2714 return;
2715 }
2716
2717 /* If scatter gather */
2718
2719 /* Load command entry data segments */
2720 for (cnt = 0;
2721 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2722 cnt++, prm->seg_cnt--) {
2723 append_dsd64(&cur_dsd, prm->sg);
2724 prm->sg = sg_next(prm->sg);
2725 }
2726
2727 qlt_load_cont_data_segments(prm);
2728 }
2729
qlt_has_data(struct qla_tgt_cmd * cmd)2730 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2731 {
2732 return cmd->bufflen > 0;
2733 }
2734
qlt_print_dif_err(struct qla_tgt_prm * prm)2735 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2736 {
2737 struct qla_tgt_cmd *cmd;
2738 struct scsi_qla_host *vha;
2739
2740 /* asc 0x10=dif error */
2741 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2742 cmd = prm->cmd;
2743 vha = cmd->vha;
2744 /* ASCQ */
2745 switch (prm->sense_buffer[13]) {
2746 case 1:
2747 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2748 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2749 "se_cmd=%p tag[%x]",
2750 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2751 cmd->atio.u.isp24.exchange_addr);
2752 break;
2753 case 2:
2754 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2755 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2756 "se_cmd=%p tag[%x]",
2757 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2758 cmd->atio.u.isp24.exchange_addr);
2759 break;
2760 case 3:
2761 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2762 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2763 "se_cmd=%p tag[%x]",
2764 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2765 cmd->atio.u.isp24.exchange_addr);
2766 break;
2767 default:
2768 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2769 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2770 "se_cmd=%p tag[%x]",
2771 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2772 cmd->atio.u.isp24.exchange_addr);
2773 break;
2774 }
2775 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2776 }
2777 }
2778
2779 /*
2780 * Called without ha->hardware_lock held
2781 */
qlt_pre_xmit_response(struct qla_tgt_cmd * cmd,struct qla_tgt_prm * prm,int xmit_type,uint8_t scsi_status,uint32_t * full_req_cnt)2782 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2783 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2784 uint32_t *full_req_cnt)
2785 {
2786 struct se_cmd *se_cmd = &cmd->se_cmd;
2787 struct qla_qpair *qpair = cmd->qpair;
2788
2789 prm->cmd = cmd;
2790 prm->tgt = cmd->tgt;
2791 prm->pkt = NULL;
2792 prm->rq_result = scsi_status;
2793 prm->sense_buffer = &cmd->sense_buffer[0];
2794 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2795 prm->sg = NULL;
2796 prm->seg_cnt = -1;
2797 prm->req_cnt = 1;
2798 prm->residual = 0;
2799 prm->add_status_pkt = 0;
2800 prm->prot_sg = NULL;
2801 prm->prot_seg_cnt = 0;
2802 prm->tot_dsds = 0;
2803
2804 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2805 if (qlt_pci_map_calc_cnt(prm) != 0)
2806 return -EAGAIN;
2807 }
2808
2809 *full_req_cnt = prm->req_cnt;
2810
2811 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2812 prm->residual = se_cmd->residual_count;
2813 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2814 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2815 prm->residual, se_cmd->tag,
2816 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2817 cmd->bufflen, prm->rq_result);
2818 prm->rq_result |= SS_RESIDUAL_UNDER;
2819 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2820 prm->residual = se_cmd->residual_count;
2821 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2822 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2823 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2824 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2825 prm->rq_result |= SS_RESIDUAL_OVER;
2826 }
2827
2828 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2829 /*
2830 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2831 * ignored in *xmit_response() below
2832 */
2833 if (qlt_has_data(cmd)) {
2834 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2835 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2836 (prm->rq_result != 0))) {
2837 prm->add_status_pkt = 1;
2838 (*full_req_cnt)++;
2839 }
2840 }
2841 }
2842
2843 return 0;
2844 }
2845
qlt_need_explicit_conf(struct qla_tgt_cmd * cmd,int sending_sense)2846 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2847 int sending_sense)
2848 {
2849 if (cmd->qpair->enable_class_2)
2850 return 0;
2851
2852 if (sending_sense)
2853 return cmd->conf_compl_supported;
2854 else
2855 return cmd->qpair->enable_explicit_conf &&
2856 cmd->conf_compl_supported;
2857 }
2858
qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx * ctio,struct qla_tgt_prm * prm)2859 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2860 struct qla_tgt_prm *prm)
2861 {
2862 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2863 (uint32_t)sizeof(ctio->u.status1.sense_data));
2864 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2865 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2866 ctio->u.status0.flags |= cpu_to_le16(
2867 CTIO7_FLAGS_EXPLICIT_CONFORM |
2868 CTIO7_FLAGS_CONFORM_REQ);
2869 }
2870 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2871 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2872 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2873 int i;
2874
2875 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2876 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2877 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2878 "Skipping EXPLICIT_CONFORM and "
2879 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2880 "non GOOD status\n");
2881 goto skip_explict_conf;
2882 }
2883 ctio->u.status1.flags |= cpu_to_le16(
2884 CTIO7_FLAGS_EXPLICIT_CONFORM |
2885 CTIO7_FLAGS_CONFORM_REQ);
2886 }
2887 skip_explict_conf:
2888 ctio->u.status1.flags &=
2889 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2890 ctio->u.status1.flags |=
2891 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2892 ctio->u.status1.scsi_status |=
2893 cpu_to_le16(SS_SENSE_LEN_VALID);
2894 ctio->u.status1.sense_length =
2895 cpu_to_le16(prm->sense_buffer_len);
2896 for (i = 0; i < prm->sense_buffer_len/4; i++) {
2897 uint32_t v;
2898
2899 v = get_unaligned_be32(
2900 &((uint32_t *)prm->sense_buffer)[i]);
2901 put_unaligned_le32(v,
2902 &((uint32_t *)ctio->u.status1.sense_data)[i]);
2903 }
2904 qlt_print_dif_err(prm);
2905
2906 } else {
2907 ctio->u.status1.flags &=
2908 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2909 ctio->u.status1.flags |=
2910 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2911 ctio->u.status1.sense_length = 0;
2912 memset(ctio->u.status1.sense_data, 0,
2913 sizeof(ctio->u.status1.sense_data));
2914 }
2915
2916 /* Sense with len > 24, is it possible ??? */
2917 }
2918
2919 static inline int
qlt_hba_err_chk_enabled(struct se_cmd * se_cmd)2920 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2921 {
2922 switch (se_cmd->prot_op) {
2923 case TARGET_PROT_DOUT_INSERT:
2924 case TARGET_PROT_DIN_STRIP:
2925 if (ql2xenablehba_err_chk >= 1)
2926 return 1;
2927 break;
2928 case TARGET_PROT_DOUT_PASS:
2929 case TARGET_PROT_DIN_PASS:
2930 if (ql2xenablehba_err_chk >= 2)
2931 return 1;
2932 break;
2933 case TARGET_PROT_DIN_INSERT:
2934 case TARGET_PROT_DOUT_STRIP:
2935 return 1;
2936 default:
2937 break;
2938 }
2939 return 0;
2940 }
2941
2942 static inline int
qla_tgt_ref_mask_check(struct se_cmd * se_cmd)2943 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2944 {
2945 switch (se_cmd->prot_op) {
2946 case TARGET_PROT_DIN_INSERT:
2947 case TARGET_PROT_DOUT_INSERT:
2948 case TARGET_PROT_DIN_STRIP:
2949 case TARGET_PROT_DOUT_STRIP:
2950 case TARGET_PROT_DIN_PASS:
2951 case TARGET_PROT_DOUT_PASS:
2952 return 1;
2953 default:
2954 return 0;
2955 }
2956 return 0;
2957 }
2958
2959 /*
2960 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2961 */
2962 static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd * cmd,struct crc_context * ctx,uint16_t * pfw_prot_opts)2963 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2964 uint16_t *pfw_prot_opts)
2965 {
2966 struct se_cmd *se_cmd = &cmd->se_cmd;
2967 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2968 scsi_qla_host_t *vha = cmd->tgt->vha;
2969 struct qla_hw_data *ha = vha->hw;
2970 uint32_t t32 = 0;
2971
2972 /*
2973 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2974 * have been immplemented by TCM, before AppTag is avail.
2975 * Look for modesense_handlers[]
2976 */
2977 ctx->app_tag = 0;
2978 ctx->app_tag_mask[0] = 0x0;
2979 ctx->app_tag_mask[1] = 0x0;
2980
2981 if (IS_PI_UNINIT_CAPABLE(ha)) {
2982 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2983 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2984 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2985 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2986 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2987 }
2988
2989 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2990
2991 switch (se_cmd->prot_type) {
2992 case TARGET_DIF_TYPE0_PROT:
2993 /*
2994 * No check for ql2xenablehba_err_chk, as it
2995 * would be an I/O error if hba tag generation
2996 * is not done.
2997 */
2998 ctx->ref_tag = cpu_to_le32(lba);
2999 /* enable ALL bytes of the ref tag */
3000 ctx->ref_tag_mask[0] = 0xff;
3001 ctx->ref_tag_mask[1] = 0xff;
3002 ctx->ref_tag_mask[2] = 0xff;
3003 ctx->ref_tag_mask[3] = 0xff;
3004 break;
3005 case TARGET_DIF_TYPE1_PROT:
3006 /*
3007 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
3008 * REF tag, and 16 bit app tag.
3009 */
3010 ctx->ref_tag = cpu_to_le32(lba);
3011 if (!qla_tgt_ref_mask_check(se_cmd) ||
3012 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3013 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3014 break;
3015 }
3016 /* enable ALL bytes of the ref tag */
3017 ctx->ref_tag_mask[0] = 0xff;
3018 ctx->ref_tag_mask[1] = 0xff;
3019 ctx->ref_tag_mask[2] = 0xff;
3020 ctx->ref_tag_mask[3] = 0xff;
3021 break;
3022 case TARGET_DIF_TYPE2_PROT:
3023 /*
3024 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
3025 * tag has to match LBA in CDB + N
3026 */
3027 ctx->ref_tag = cpu_to_le32(lba);
3028 if (!qla_tgt_ref_mask_check(se_cmd) ||
3029 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3030 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3031 break;
3032 }
3033 /* enable ALL bytes of the ref tag */
3034 ctx->ref_tag_mask[0] = 0xff;
3035 ctx->ref_tag_mask[1] = 0xff;
3036 ctx->ref_tag_mask[2] = 0xff;
3037 ctx->ref_tag_mask[3] = 0xff;
3038 break;
3039 case TARGET_DIF_TYPE3_PROT:
3040 /* For TYPE 3 protection: 16 bit GUARD only */
3041 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3042 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
3043 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
3044 break;
3045 }
3046 }
3047
3048 static inline int
qlt_build_ctio_crc2_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)3049 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3050 {
3051 struct dsd64 *cur_dsd;
3052 uint32_t transfer_length = 0;
3053 uint32_t data_bytes;
3054 uint32_t dif_bytes;
3055 uint8_t bundling = 1;
3056 struct crc_context *crc_ctx_pkt = NULL;
3057 struct qla_hw_data *ha;
3058 struct ctio_crc2_to_fw *pkt;
3059 dma_addr_t crc_ctx_dma;
3060 uint16_t fw_prot_opts = 0;
3061 struct qla_tgt_cmd *cmd = prm->cmd;
3062 struct se_cmd *se_cmd = &cmd->se_cmd;
3063 uint32_t h;
3064 struct atio_from_isp *atio = &prm->cmd->atio;
3065 struct qla_tc_param tc;
3066 uint16_t t16;
3067 scsi_qla_host_t *vha = cmd->vha;
3068
3069 ha = vha->hw;
3070
3071 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3072 prm->pkt = pkt;
3073 memset(pkt, 0, sizeof(*pkt));
3074
3075 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3076 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3077 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3078 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3079
3080 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3081 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3082 bundling = 0;
3083
3084 /* Compute dif len and adjust data len to incude protection */
3085 data_bytes = cmd->bufflen;
3086 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3087
3088 switch (se_cmd->prot_op) {
3089 case TARGET_PROT_DIN_INSERT:
3090 case TARGET_PROT_DOUT_STRIP:
3091 transfer_length = data_bytes;
3092 if (cmd->prot_sg_cnt)
3093 data_bytes += dif_bytes;
3094 break;
3095 case TARGET_PROT_DIN_STRIP:
3096 case TARGET_PROT_DOUT_INSERT:
3097 case TARGET_PROT_DIN_PASS:
3098 case TARGET_PROT_DOUT_PASS:
3099 transfer_length = data_bytes + dif_bytes;
3100 break;
3101 default:
3102 BUG();
3103 break;
3104 }
3105
3106 if (!qlt_hba_err_chk_enabled(se_cmd))
3107 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3108 /* HBA error checking enabled */
3109 else if (IS_PI_UNINIT_CAPABLE(ha)) {
3110 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3111 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3112 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3113 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3114 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3115 }
3116
3117 switch (se_cmd->prot_op) {
3118 case TARGET_PROT_DIN_INSERT:
3119 case TARGET_PROT_DOUT_INSERT:
3120 fw_prot_opts |= PO_MODE_DIF_INSERT;
3121 break;
3122 case TARGET_PROT_DIN_STRIP:
3123 case TARGET_PROT_DOUT_STRIP:
3124 fw_prot_opts |= PO_MODE_DIF_REMOVE;
3125 break;
3126 case TARGET_PROT_DIN_PASS:
3127 case TARGET_PROT_DOUT_PASS:
3128 fw_prot_opts |= PO_MODE_DIF_PASS;
3129 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3130 break;
3131 default:/* Normal Request */
3132 fw_prot_opts |= PO_MODE_DIF_PASS;
3133 break;
3134 }
3135
3136 /* ---- PKT ---- */
3137 /* Update entry type to indicate Command Type CRC_2 IOCB */
3138 pkt->entry_type = CTIO_CRC2;
3139 pkt->entry_count = 1;
3140 pkt->vp_index = cmd->vp_idx;
3141
3142 h = qlt_make_handle(qpair);
3143 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3144 /*
3145 * CTIO type 7 from the firmware doesn't provide a way to
3146 * know the initiator's LOOP ID, hence we can't find
3147 * the session and, so, the command.
3148 */
3149 return -EAGAIN;
3150 } else
3151 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3152
3153 pkt->handle = make_handle(qpair->req->id, h);
3154 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3155 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3156 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3157 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3158 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3159
3160 /* silence compile warning */
3161 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3162 pkt->ox_id = cpu_to_le16(t16);
3163
3164 t16 = (atio->u.isp24.attr << 9);
3165 pkt->flags |= cpu_to_le16(t16);
3166 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3167
3168 /* Set transfer direction */
3169 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3170 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3171 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3172 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3173
3174 pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
3175 /* Fibre channel byte count */
3176 pkt->transfer_length = cpu_to_le32(transfer_length);
3177
3178 /* ----- CRC context -------- */
3179
3180 /* Allocate CRC context from global pool */
3181 crc_ctx_pkt = cmd->ctx =
3182 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3183
3184 if (!crc_ctx_pkt)
3185 goto crc_queuing_error;
3186
3187 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3188 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3189
3190 /* Set handle */
3191 crc_ctx_pkt->handle = pkt->handle;
3192
3193 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3194
3195 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3196 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
3197
3198 if (!bundling) {
3199 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3200 } else {
3201 /*
3202 * Configure Bundling if we need to fetch interlaving
3203 * protection PCI accesses
3204 */
3205 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3206 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3207 crc_ctx_pkt->u.bundling.dseg_count =
3208 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3209 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3210 }
3211
3212 /* Finish the common fields of CRC pkt */
3213 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3214 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
3215 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3216 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3217
3218 memset((uint8_t *)&tc, 0 , sizeof(tc));
3219 tc.vha = vha;
3220 tc.blk_sz = cmd->blk_sz;
3221 tc.bufflen = cmd->bufflen;
3222 tc.sg = cmd->sg;
3223 tc.prot_sg = cmd->prot_sg;
3224 tc.ctx = crc_ctx_pkt;
3225 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3226
3227 /* Walks data segments */
3228 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3229
3230 if (!bundling && prm->prot_seg_cnt) {
3231 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3232 prm->tot_dsds, &tc))
3233 goto crc_queuing_error;
3234 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3235 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3236 goto crc_queuing_error;
3237
3238 if (bundling && prm->prot_seg_cnt) {
3239 /* Walks dif segments */
3240 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3241
3242 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3243 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3244 prm->prot_seg_cnt, cmd))
3245 goto crc_queuing_error;
3246 }
3247 return QLA_SUCCESS;
3248
3249 crc_queuing_error:
3250 /* Cleanup will be performed by the caller */
3251 qpair->req->outstanding_cmds[h] = NULL;
3252
3253 return QLA_FUNCTION_FAILED;
3254 }
3255
3256 /*
3257 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3258 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3259 */
qlt_xmit_response(struct qla_tgt_cmd * cmd,int xmit_type,uint8_t scsi_status)3260 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3261 uint8_t scsi_status)
3262 {
3263 struct scsi_qla_host *vha = cmd->vha;
3264 struct qla_qpair *qpair = cmd->qpair;
3265 struct ctio7_to_24xx *pkt;
3266 struct qla_tgt_prm prm;
3267 uint32_t full_req_cnt = 0;
3268 unsigned long flags = 0;
3269 int res;
3270
3271 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3272 (cmd->sess && cmd->sess->deleted)) {
3273 cmd->state = QLA_TGT_STATE_PROCESSED;
3274 return 0;
3275 }
3276
3277 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3278 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3279 (xmit_type & QLA_TGT_XMIT_STATUS) ?
3280 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3281 &cmd->se_cmd, qpair->id);
3282
3283 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3284 &full_req_cnt);
3285 if (unlikely(res != 0)) {
3286 return res;
3287 }
3288
3289 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3290
3291 if (xmit_type == QLA_TGT_XMIT_STATUS)
3292 qpair->tgt_counters.core_qla_snd_status++;
3293 else
3294 qpair->tgt_counters.core_qla_que_buf++;
3295
3296 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3297 /*
3298 * Either the port is not online or this request was from
3299 * previous life, just abort the processing.
3300 */
3301 cmd->state = QLA_TGT_STATE_PROCESSED;
3302 ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3303 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3304 vha->flags.online, qla2x00_reset_active(vha),
3305 cmd->reset_count, qpair->chip_reset);
3306 res = 0;
3307 goto out_unmap_unlock;
3308 }
3309
3310 /* Does F/W have an IOCBs for this request */
3311 res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3312 if (unlikely(res))
3313 goto out_unmap_unlock;
3314
3315 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3316 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3317 else
3318 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3319 if (unlikely(res != 0)) {
3320 qpair->req->cnt += full_req_cnt;
3321 goto out_unmap_unlock;
3322 }
3323
3324 pkt = (struct ctio7_to_24xx *)prm.pkt;
3325
3326 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3327 pkt->u.status0.flags |=
3328 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3329 CTIO7_FLAGS_STATUS_MODE_0);
3330
3331 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3332 qlt_load_data_segments(&prm);
3333
3334 if (prm.add_status_pkt == 0) {
3335 if (xmit_type & QLA_TGT_XMIT_STATUS) {
3336 pkt->u.status0.scsi_status =
3337 cpu_to_le16(prm.rq_result);
3338 if (!cmd->edif)
3339 pkt->u.status0.residual =
3340 cpu_to_le32(prm.residual);
3341
3342 pkt->u.status0.flags |= cpu_to_le16(
3343 CTIO7_FLAGS_SEND_STATUS);
3344 if (qlt_need_explicit_conf(cmd, 0)) {
3345 pkt->u.status0.flags |=
3346 cpu_to_le16(
3347 CTIO7_FLAGS_EXPLICIT_CONFORM |
3348 CTIO7_FLAGS_CONFORM_REQ);
3349 }
3350 }
3351
3352 } else {
3353 /*
3354 * We have already made sure that there is sufficient
3355 * amount of request entries to not drop HW lock in
3356 * req_pkt().
3357 */
3358 struct ctio7_to_24xx *ctio =
3359 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3360 qpair->req);
3361
3362 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3363 "Building additional status packet 0x%p.\n",
3364 ctio);
3365
3366 /*
3367 * T10Dif: ctio_crc2_to_fw overlay ontop of
3368 * ctio7_to_24xx
3369 */
3370 memcpy(ctio, pkt, sizeof(*ctio));
3371 /* reset back to CTIO7 */
3372 ctio->entry_count = 1;
3373 ctio->entry_type = CTIO_TYPE7;
3374 ctio->dseg_count = 0;
3375 ctio->u.status1.flags &= ~cpu_to_le16(
3376 CTIO7_FLAGS_DATA_IN);
3377
3378 /* Real finish is ctio_m1's finish */
3379 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3380 pkt->u.status0.flags |= cpu_to_le16(
3381 CTIO7_FLAGS_DONT_RET_CTIO);
3382
3383 /* qlt_24xx_init_ctio_to_isp will correct
3384 * all neccessary fields that's part of CTIO7.
3385 * There should be no residual of CTIO-CRC2 data.
3386 */
3387 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3388 &prm);
3389 }
3390 } else
3391 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3392
3393
3394 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3395 cmd->cmd_sent_to_fw = 1;
3396 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3397
3398 /* Memory Barrier */
3399 wmb();
3400 if (qpair->reqq_start_iocbs)
3401 qpair->reqq_start_iocbs(qpair);
3402 else
3403 qla2x00_start_iocbs(vha, qpair->req);
3404 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3405
3406 return 0;
3407
3408 out_unmap_unlock:
3409 qlt_unmap_sg(vha, cmd);
3410 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3411
3412 return res;
3413 }
3414 EXPORT_SYMBOL(qlt_xmit_response);
3415
qlt_rdy_to_xfer(struct qla_tgt_cmd * cmd)3416 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3417 {
3418 struct ctio7_to_24xx *pkt;
3419 struct scsi_qla_host *vha = cmd->vha;
3420 struct qla_tgt *tgt = cmd->tgt;
3421 struct qla_tgt_prm prm;
3422 unsigned long flags = 0;
3423 int res = 0;
3424 struct qla_qpair *qpair = cmd->qpair;
3425
3426 memset(&prm, 0, sizeof(prm));
3427 prm.cmd = cmd;
3428 prm.tgt = tgt;
3429 prm.sg = NULL;
3430 prm.req_cnt = 1;
3431
3432 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3433 (cmd->sess && cmd->sess->deleted)) {
3434 /*
3435 * Either the port is not online or this request was from
3436 * previous life, just abort the processing.
3437 */
3438 cmd->aborted = 1;
3439 cmd->write_data_transferred = 0;
3440 cmd->state = QLA_TGT_STATE_DATA_IN;
3441 vha->hw->tgt.tgt_ops->handle_data(cmd);
3442 ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3443 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3444 vha->flags.online, qla2x00_reset_active(vha),
3445 cmd->reset_count, qpair->chip_reset);
3446 return 0;
3447 }
3448
3449 /* Calculate number of entries and segments required */
3450 if (qlt_pci_map_calc_cnt(&prm) != 0)
3451 return -EAGAIN;
3452
3453 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3454 /* Does F/W have an IOCBs for this request */
3455 res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3456 if (res != 0)
3457 goto out_unlock_free_unmap;
3458 if (cmd->se_cmd.prot_op)
3459 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3460 else
3461 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3462
3463 if (unlikely(res != 0)) {
3464 qpair->req->cnt += prm.req_cnt;
3465 goto out_unlock_free_unmap;
3466 }
3467
3468 pkt = (struct ctio7_to_24xx *)prm.pkt;
3469 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3470 CTIO7_FLAGS_STATUS_MODE_0);
3471
3472 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3473 qlt_load_data_segments(&prm);
3474
3475 cmd->state = QLA_TGT_STATE_NEED_DATA;
3476 cmd->cmd_sent_to_fw = 1;
3477 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3478
3479 /* Memory Barrier */
3480 wmb();
3481 if (qpair->reqq_start_iocbs)
3482 qpair->reqq_start_iocbs(qpair);
3483 else
3484 qla2x00_start_iocbs(vha, qpair->req);
3485 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3486
3487 return res;
3488
3489 out_unlock_free_unmap:
3490 qlt_unmap_sg(vha, cmd);
3491 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3492
3493 return res;
3494 }
3495 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3496
3497
3498 /*
3499 * it is assumed either hardware_lock or qpair lock is held.
3500 */
3501 static void
qlt_handle_dif_error(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct ctio_crc_from_fw * sts)3502 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3503 struct ctio_crc_from_fw *sts)
3504 {
3505 uint8_t *ap = &sts->actual_dif[0];
3506 uint8_t *ep = &sts->expected_dif[0];
3507 uint64_t lba = cmd->se_cmd.t_task_lba;
3508 uint8_t scsi_status, sense_key, asc, ascq;
3509 unsigned long flags;
3510 struct scsi_qla_host *vha = cmd->vha;
3511
3512 cmd->trc_flags |= TRC_DIF_ERR;
3513
3514 cmd->a_guard = get_unaligned_be16(ap + 0);
3515 cmd->a_app_tag = get_unaligned_be16(ap + 2);
3516 cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3517
3518 cmd->e_guard = get_unaligned_be16(ep + 0);
3519 cmd->e_app_tag = get_unaligned_be16(ep + 2);
3520 cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3521
3522 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3523 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3524
3525 scsi_status = sense_key = asc = ascq = 0;
3526
3527 /* check appl tag */
3528 if (cmd->e_app_tag != cmd->a_app_tag) {
3529 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3530 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3531 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3532 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3533 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3534 cmd->atio.u.isp24.fcp_hdr.ox_id);
3535
3536 cmd->dif_err_code = DIF_ERR_APP;
3537 scsi_status = SAM_STAT_CHECK_CONDITION;
3538 sense_key = ABORTED_COMMAND;
3539 asc = 0x10;
3540 ascq = 0x2;
3541 }
3542
3543 /* check ref tag */
3544 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3545 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3546 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3547 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3548 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3549 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3550 cmd->atio.u.isp24.fcp_hdr.ox_id);
3551
3552 cmd->dif_err_code = DIF_ERR_REF;
3553 scsi_status = SAM_STAT_CHECK_CONDITION;
3554 sense_key = ABORTED_COMMAND;
3555 asc = 0x10;
3556 ascq = 0x3;
3557 goto out;
3558 }
3559
3560 /* check guard */
3561 if (cmd->e_guard != cmd->a_guard) {
3562 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3563 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3564 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3565 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3566 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3567 cmd->atio.u.isp24.fcp_hdr.ox_id);
3568
3569 cmd->dif_err_code = DIF_ERR_GRD;
3570 scsi_status = SAM_STAT_CHECK_CONDITION;
3571 sense_key = ABORTED_COMMAND;
3572 asc = 0x10;
3573 ascq = 0x1;
3574 }
3575 out:
3576 switch (cmd->state) {
3577 case QLA_TGT_STATE_NEED_DATA:
3578 /* handle_data will load DIF error code */
3579 cmd->state = QLA_TGT_STATE_DATA_IN;
3580 vha->hw->tgt.tgt_ops->handle_data(cmd);
3581 break;
3582 default:
3583 spin_lock_irqsave(&cmd->cmd_lock, flags);
3584 if (cmd->aborted) {
3585 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3586 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3587 break;
3588 }
3589 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3590
3591 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3592 ascq);
3593 /* assume scsi status gets out on the wire.
3594 * Will not wait for completion.
3595 */
3596 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3597 break;
3598 }
3599 }
3600
3601 /* If hardware_lock held on entry, might drop it, then reaquire */
3602 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
__qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * ntfy)3603 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3604 struct imm_ntfy_from_isp *ntfy)
3605 {
3606 struct nack_to_isp *nack;
3607 struct qla_hw_data *ha = vha->hw;
3608 request_t *pkt;
3609 int ret = 0;
3610
3611 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3612 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3613
3614 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3615 if (pkt == NULL) {
3616 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3617 "qla_target(%d): %s failed: unable to allocate "
3618 "request packet\n", vha->vp_idx, __func__);
3619 return -ENOMEM;
3620 }
3621
3622 pkt->entry_type = NOTIFY_ACK_TYPE;
3623 pkt->entry_count = 1;
3624 pkt->handle = QLA_TGT_SKIP_HANDLE;
3625
3626 nack = (struct nack_to_isp *)pkt;
3627 nack->ox_id = ntfy->ox_id;
3628
3629 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3630 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3631 nack->u.isp24.flags = ntfy->u.isp24.flags &
3632 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3633 }
3634
3635 /* terminate */
3636 nack->u.isp24.flags |=
3637 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3638
3639 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3640 nack->u.isp24.status = ntfy->u.isp24.status;
3641 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3642 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3643 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3644 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3645 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3646 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3647
3648 qla2x00_start_iocbs(vha, vha->req);
3649 return ret;
3650 }
3651
qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * imm,int ha_locked)3652 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3653 struct imm_ntfy_from_isp *imm, int ha_locked)
3654 {
3655 int rc;
3656
3657 WARN_ON_ONCE(!ha_locked);
3658 rc = __qlt_send_term_imm_notif(vha, imm);
3659 pr_debug("rc = %d\n", rc);
3660 }
3661
3662 /*
3663 * If hardware_lock held on entry, might drop it, then reaquire
3664 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3665 */
__qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio)3666 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3667 struct qla_tgt_cmd *cmd,
3668 struct atio_from_isp *atio)
3669 {
3670 struct scsi_qla_host *vha = qpair->vha;
3671 struct ctio7_to_24xx *ctio24;
3672 struct qla_hw_data *ha = vha->hw;
3673 request_t *pkt;
3674 int ret = 0;
3675 uint16_t temp;
3676
3677 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3678
3679 if (cmd)
3680 vha = cmd->vha;
3681
3682 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3683 if (pkt == NULL) {
3684 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3685 "qla_target(%d): %s failed: unable to allocate "
3686 "request packet\n", vha->vp_idx, __func__);
3687 return -ENOMEM;
3688 }
3689
3690 if (cmd != NULL) {
3691 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3692 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3693 "qla_target(%d): Terminating cmd %p with "
3694 "incorrect state %d\n", vha->vp_idx, cmd,
3695 cmd->state);
3696 } else
3697 ret = 1;
3698 }
3699
3700 qpair->tgt_counters.num_term_xchg_sent++;
3701 pkt->entry_count = 1;
3702 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3703
3704 ctio24 = (struct ctio7_to_24xx *)pkt;
3705 ctio24->entry_type = CTIO_TYPE7;
3706 ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
3707 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3708 ctio24->vp_index = vha->vp_idx;
3709 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3710 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3711 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3712 CTIO7_FLAGS_TERMINATE;
3713 ctio24->u.status1.flags = cpu_to_le16(temp);
3714 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3715 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3716
3717 /* Memory Barrier */
3718 wmb();
3719 if (qpair->reqq_start_iocbs)
3720 qpair->reqq_start_iocbs(qpair);
3721 else
3722 qla2x00_start_iocbs(vha, qpair->req);
3723 return ret;
3724 }
3725
qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio,int ha_locked,int ul_abort)3726 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3727 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3728 int ul_abort)
3729 {
3730 struct scsi_qla_host *vha;
3731 unsigned long flags = 0;
3732 int rc;
3733
3734 /* why use different vha? NPIV */
3735 if (cmd)
3736 vha = cmd->vha;
3737 else
3738 vha = qpair->vha;
3739
3740 if (ha_locked) {
3741 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3742 if (rc == -ENOMEM)
3743 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3744 goto done;
3745 }
3746 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3747 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3748 if (rc == -ENOMEM)
3749 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3750
3751 done:
3752 if (cmd && !ul_abort && !cmd->aborted) {
3753 if (cmd->sg_mapped)
3754 qlt_unmap_sg(vha, cmd);
3755 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3756 }
3757
3758 if (!ha_locked)
3759 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3760
3761 return;
3762 }
3763
qlt_init_term_exchange(struct scsi_qla_host * vha)3764 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3765 {
3766 struct list_head free_list;
3767 struct qla_tgt_cmd *cmd, *tcmd;
3768
3769 vha->hw->tgt.leak_exchg_thresh_hold =
3770 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3771
3772 cmd = tcmd = NULL;
3773 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3774 INIT_LIST_HEAD(&free_list);
3775 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3776
3777 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3778 list_del(&cmd->cmd_list);
3779 /* This cmd was never sent to TCM. There is no need
3780 * to schedule free or call free_cmd
3781 */
3782 qlt_free_cmd(cmd);
3783 vha->hw->tgt.num_qfull_cmds_alloc--;
3784 }
3785 }
3786 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3787 }
3788
qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host * vha)3789 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3790 {
3791 uint32_t total_leaked;
3792
3793 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3794
3795 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3796 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3797
3798 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3799 "Chip reset due to exchange starvation: %d/%d.\n",
3800 total_leaked, vha->hw->cur_fw_xcb_count);
3801
3802 if (IS_P3P_TYPE(vha->hw))
3803 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3804 else
3805 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3806 qla2xxx_wake_dpc(vha);
3807 }
3808
3809 }
3810
qlt_abort_cmd(struct qla_tgt_cmd * cmd)3811 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3812 {
3813 struct qla_tgt *tgt = cmd->tgt;
3814 struct scsi_qla_host *vha = tgt->vha;
3815 struct se_cmd *se_cmd = &cmd->se_cmd;
3816 unsigned long flags;
3817
3818 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3819 "qla_target(%d): terminating exchange for aborted cmd=%p "
3820 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3821 se_cmd->tag);
3822
3823 spin_lock_irqsave(&cmd->cmd_lock, flags);
3824 if (cmd->aborted) {
3825 if (cmd->sg_mapped)
3826 qlt_unmap_sg(vha, cmd);
3827
3828 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3829 /*
3830 * It's normal to see 2 calls in this path:
3831 * 1) XFER Rdy completion + CMD_T_ABORT
3832 * 2) TCM TMR - drain_state_list
3833 */
3834 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3835 "multiple abort. %p transport_state %x, t_state %x, "
3836 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3837 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3838 return -EIO;
3839 }
3840 cmd->aborted = 1;
3841 cmd->trc_flags |= TRC_ABORT;
3842 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3843
3844 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3845 return 0;
3846 }
3847 EXPORT_SYMBOL(qlt_abort_cmd);
3848
qlt_free_cmd(struct qla_tgt_cmd * cmd)3849 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3850 {
3851 struct fc_port *sess = cmd->sess;
3852
3853 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3854 "%s: se_cmd[%p] ox_id %04x\n",
3855 __func__, &cmd->se_cmd,
3856 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3857
3858 BUG_ON(cmd->cmd_in_wq);
3859
3860 if (!cmd->q_full)
3861 qlt_decr_num_pend_cmds(cmd->vha);
3862
3863 BUG_ON(cmd->sg_mapped);
3864 cmd->jiffies_at_free = get_jiffies_64();
3865
3866 if (!sess || !sess->se_sess) {
3867 WARN_ON(1);
3868 return;
3869 }
3870 cmd->jiffies_at_free = get_jiffies_64();
3871 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3872 }
3873 EXPORT_SYMBOL(qlt_free_cmd);
3874
3875 /*
3876 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3877 */
qlt_term_ctio_exchange(struct qla_qpair * qpair,void * ctio,struct qla_tgt_cmd * cmd,uint32_t status)3878 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3879 struct qla_tgt_cmd *cmd, uint32_t status)
3880 {
3881 int term = 0;
3882 struct scsi_qla_host *vha = qpair->vha;
3883
3884 if (cmd->se_cmd.prot_op)
3885 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3886 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3887 "se_cmd=%p tag[%x] op %#x/%s",
3888 cmd->lba, cmd->lba,
3889 cmd->num_blks, &cmd->se_cmd,
3890 cmd->atio.u.isp24.exchange_addr,
3891 cmd->se_cmd.prot_op,
3892 prot_op_str(cmd->se_cmd.prot_op));
3893
3894 if (ctio != NULL) {
3895 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3896
3897 term = !(c->flags &
3898 cpu_to_le16(OF_TERM_EXCH));
3899 } else
3900 term = 1;
3901
3902 if (term)
3903 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3904
3905 return term;
3906 }
3907
3908
3909 /* ha->hardware_lock supposed to be held on entry */
qlt_ctio_to_cmd(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,void * ctio)3910 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3911 struct rsp_que *rsp, uint32_t handle, void *ctio)
3912 {
3913 void *cmd = NULL;
3914 struct req_que *req;
3915 int qid = GET_QID(handle);
3916 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3917
3918 if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3919 return NULL;
3920
3921 if (qid == rsp->req->id) {
3922 req = rsp->req;
3923 } else if (vha->hw->req_q_map[qid]) {
3924 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3925 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3926 vha->vp_idx, rsp->id, handle);
3927 req = vha->hw->req_q_map[qid];
3928 } else {
3929 return NULL;
3930 }
3931
3932 h &= QLA_CMD_HANDLE_MASK;
3933
3934 if (h != QLA_TGT_NULL_HANDLE) {
3935 if (unlikely(h >= req->num_outstanding_cmds)) {
3936 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3937 "qla_target(%d): Wrong handle %x received\n",
3938 vha->vp_idx, handle);
3939 return NULL;
3940 }
3941
3942 cmd = req->outstanding_cmds[h];
3943 if (unlikely(cmd == NULL)) {
3944 ql_dbg(ql_dbg_async, vha, 0xe053,
3945 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3946 vha->vp_idx, handle, req->id, rsp->id);
3947 return NULL;
3948 }
3949 req->outstanding_cmds[h] = NULL;
3950 } else if (ctio != NULL) {
3951 /* We can't get loop ID from CTIO7 */
3952 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3953 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3954 "support NULL handles\n", vha->vp_idx);
3955 return NULL;
3956 }
3957
3958 return cmd;
3959 }
3960
3961 /*
3962 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3963 */
qlt_do_ctio_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,uint32_t status,void * ctio)3964 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3965 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3966 {
3967 struct qla_hw_data *ha = vha->hw;
3968 struct se_cmd *se_cmd;
3969 struct qla_tgt_cmd *cmd;
3970 struct qla_qpair *qpair = rsp->qpair;
3971
3972 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3973 /* That could happen only in case of an error/reset/abort */
3974 if (status != CTIO_SUCCESS) {
3975 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3976 "Intermediate CTIO received"
3977 " (status %x)\n", status);
3978 }
3979 return;
3980 }
3981
3982 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3983 if (cmd == NULL)
3984 return;
3985
3986 if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
3987 cmd->sess) {
3988 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
3989 (struct ctio7_from_24xx *)ctio);
3990 }
3991
3992 se_cmd = &cmd->se_cmd;
3993 cmd->cmd_sent_to_fw = 0;
3994
3995 qlt_unmap_sg(vha, cmd);
3996
3997 if (unlikely(status != CTIO_SUCCESS)) {
3998 switch (status & 0xFFFF) {
3999 case CTIO_INVALID_RX_ID:
4000 if (printk_ratelimit())
4001 dev_info(&vha->hw->pdev->dev,
4002 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
4003 vha->vp_idx, cmd->atio.u.isp24.attr,
4004 ((cmd->ctio_flags >> 9) & 0xf),
4005 cmd->ctio_flags);
4006
4007 break;
4008 case CTIO_LIP_RESET:
4009 case CTIO_TARGET_RESET:
4010 case CTIO_ABORTED:
4011 /* driver request abort via Terminate exchange */
4012 case CTIO_TIMEOUT:
4013 /* They are OK */
4014 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
4015 "qla_target(%d): CTIO with "
4016 "status %#x received, state %x, se_cmd %p, "
4017 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
4018 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
4019 status, cmd->state, se_cmd);
4020 break;
4021
4022 case CTIO_PORT_LOGGED_OUT:
4023 case CTIO_PORT_UNAVAILABLE:
4024 {
4025 int logged_out =
4026 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
4027
4028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4029 "qla_target(%d): CTIO with %s status %x "
4030 "received (state %x, se_cmd %p)\n", vha->vp_idx,
4031 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
4032 status, cmd->state, se_cmd);
4033
4034 if (logged_out && cmd->sess) {
4035 /*
4036 * Session is already logged out, but we need
4037 * to notify initiator, who's not aware of this
4038 */
4039 cmd->sess->send_els_logo = 1;
4040 ql_dbg(ql_dbg_disc, vha, 0x20f8,
4041 "%s %d %8phC post del sess\n",
4042 __func__, __LINE__, cmd->sess->port_name);
4043
4044 qlt_schedule_sess_for_deletion(cmd->sess);
4045 }
4046 break;
4047 }
4048 case CTIO_DIF_ERROR: {
4049 struct ctio_crc_from_fw *crc =
4050 (struct ctio_crc_from_fw *)ctio;
4051 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4052 "qla_target(%d): CTIO with DIF_ERROR status %x "
4053 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
4054 "expect_dif[0x%llx]\n",
4055 vha->vp_idx, status, cmd->state, se_cmd,
4056 *((u64 *)&crc->actual_dif[0]),
4057 *((u64 *)&crc->expected_dif[0]));
4058
4059 qlt_handle_dif_error(qpair, cmd, ctio);
4060 return;
4061 }
4062
4063 case CTIO_FAST_AUTH_ERR:
4064 case CTIO_FAST_INCOMP_PAD_LEN:
4065 case CTIO_FAST_INVALID_REQ:
4066 case CTIO_FAST_SPI_ERR:
4067 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4068 "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
4069 vha->vp_idx, status, cmd->state, se_cmd);
4070 break;
4071
4072 default:
4073 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4074 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4075 vha->vp_idx, status, cmd->state, se_cmd);
4076 break;
4077 }
4078
4079
4080 /* "cmd->aborted" means
4081 * cmd is already aborted/terminated, we don't
4082 * need to terminate again. The exchange is already
4083 * cleaned up/freed at FW level. Just cleanup at driver
4084 * level.
4085 */
4086 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4087 (!cmd->aborted)) {
4088 cmd->trc_flags |= TRC_CTIO_ERR;
4089 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4090 return;
4091 }
4092 }
4093
4094 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4095 cmd->trc_flags |= TRC_CTIO_DONE;
4096 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4097 cmd->state = QLA_TGT_STATE_DATA_IN;
4098
4099 if (status == CTIO_SUCCESS)
4100 cmd->write_data_transferred = 1;
4101
4102 ha->tgt.tgt_ops->handle_data(cmd);
4103 return;
4104 } else if (cmd->aborted) {
4105 cmd->trc_flags |= TRC_CTIO_ABORTED;
4106 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4107 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4108 } else {
4109 cmd->trc_flags |= TRC_CTIO_STRANGE;
4110 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4111 "qla_target(%d): A command in state (%d) should "
4112 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4113 }
4114
4115 if (unlikely(status != CTIO_SUCCESS) &&
4116 !cmd->aborted) {
4117 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4118 dump_stack();
4119 }
4120
4121 ha->tgt.tgt_ops->free_cmd(cmd);
4122 }
4123
qlt_get_fcp_task_attr(struct scsi_qla_host * vha,uint8_t task_codes)4124 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4125 uint8_t task_codes)
4126 {
4127 int fcp_task_attr;
4128
4129 switch (task_codes) {
4130 case ATIO_SIMPLE_QUEUE:
4131 fcp_task_attr = TCM_SIMPLE_TAG;
4132 break;
4133 case ATIO_HEAD_OF_QUEUE:
4134 fcp_task_attr = TCM_HEAD_TAG;
4135 break;
4136 case ATIO_ORDERED_QUEUE:
4137 fcp_task_attr = TCM_ORDERED_TAG;
4138 break;
4139 case ATIO_ACA_QUEUE:
4140 fcp_task_attr = TCM_ACA_TAG;
4141 break;
4142 case ATIO_UNTAGGED:
4143 fcp_task_attr = TCM_SIMPLE_TAG;
4144 break;
4145 default:
4146 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4147 "qla_target: unknown task code %x, use ORDERED instead\n",
4148 task_codes);
4149 fcp_task_attr = TCM_ORDERED_TAG;
4150 break;
4151 }
4152
4153 return fcp_task_attr;
4154 }
4155
4156 /*
4157 * Process context for I/O path into tcm_qla2xxx code
4158 */
__qlt_do_work(struct qla_tgt_cmd * cmd)4159 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4160 {
4161 scsi_qla_host_t *vha = cmd->vha;
4162 struct qla_hw_data *ha = vha->hw;
4163 struct fc_port *sess = cmd->sess;
4164 struct atio_from_isp *atio = &cmd->atio;
4165 unsigned char *cdb;
4166 unsigned long flags;
4167 uint32_t data_length;
4168 int ret, fcp_task_attr, data_dir, bidi = 0;
4169 struct qla_qpair *qpair = cmd->qpair;
4170
4171 cmd->cmd_in_wq = 0;
4172 cmd->trc_flags |= TRC_DO_WORK;
4173
4174 if (cmd->aborted) {
4175 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4176 "cmd with tag %u is aborted\n",
4177 cmd->atio.u.isp24.exchange_addr);
4178 goto out_term;
4179 }
4180
4181 spin_lock_init(&cmd->cmd_lock);
4182 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4183 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4184
4185 if (atio->u.isp24.fcp_cmnd.rddata &&
4186 atio->u.isp24.fcp_cmnd.wrdata) {
4187 bidi = 1;
4188 data_dir = DMA_TO_DEVICE;
4189 } else if (atio->u.isp24.fcp_cmnd.rddata)
4190 data_dir = DMA_FROM_DEVICE;
4191 else if (atio->u.isp24.fcp_cmnd.wrdata)
4192 data_dir = DMA_TO_DEVICE;
4193 else
4194 data_dir = DMA_NONE;
4195
4196 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4197 atio->u.isp24.fcp_cmnd.task_attr);
4198 data_length = get_datalen_for_atio(atio);
4199
4200 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4201 fcp_task_attr, data_dir, bidi);
4202 if (ret != 0)
4203 goto out_term;
4204 /*
4205 * Drop extra session reference from qlt_handle_cmd_for_atio().
4206 */
4207 ha->tgt.tgt_ops->put_sess(sess);
4208 return;
4209
4210 out_term:
4211 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4212 /*
4213 * cmd has not sent to target yet, so pass NULL as the second
4214 * argument to qlt_send_term_exchange() and free the memory here.
4215 */
4216 cmd->trc_flags |= TRC_DO_WORK_ERR;
4217 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4218 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4219
4220 qlt_decr_num_pend_cmds(vha);
4221 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4222 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4223
4224 ha->tgt.tgt_ops->put_sess(sess);
4225 }
4226
qlt_do_work(struct work_struct * work)4227 static void qlt_do_work(struct work_struct *work)
4228 {
4229 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4230 scsi_qla_host_t *vha = cmd->vha;
4231 unsigned long flags;
4232
4233 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4234 list_del(&cmd->cmd_list);
4235 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4236
4237 __qlt_do_work(cmd);
4238 }
4239
qlt_clr_qp_table(struct scsi_qla_host * vha)4240 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4241 {
4242 unsigned long flags;
4243 struct qla_hw_data *ha = vha->hw;
4244 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4245 void *node;
4246 u64 key = 0;
4247
4248 ql_log(ql_log_info, vha, 0x706c,
4249 "User update Number of Active Qpairs %d\n",
4250 ha->tgt.num_act_qpairs);
4251
4252 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4253
4254 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4255 btree_remove64(&tgt->lun_qpair_map, key);
4256
4257 ha->base_qpair->lun_cnt = 0;
4258 for (key = 0; key < ha->max_qpairs; key++)
4259 if (ha->queue_pair_map[key])
4260 ha->queue_pair_map[key]->lun_cnt = 0;
4261
4262 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4263 }
4264
qlt_assign_qpair(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)4265 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4266 struct qla_tgt_cmd *cmd)
4267 {
4268 struct qla_qpair *qpair, *qp;
4269 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4270 struct qla_qpair_hint *h;
4271
4272 if (vha->flags.qpairs_available) {
4273 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4274 if (unlikely(!h)) {
4275 /* spread lun to qpair ratio evently */
4276 int lcnt = 0, rc;
4277 struct scsi_qla_host *base_vha =
4278 pci_get_drvdata(vha->hw->pdev);
4279
4280 qpair = vha->hw->base_qpair;
4281 if (qpair->lun_cnt == 0) {
4282 qpair->lun_cnt++;
4283 h = qla_qpair_to_hint(tgt, qpair);
4284 BUG_ON(!h);
4285 rc = btree_insert64(&tgt->lun_qpair_map,
4286 cmd->unpacked_lun, h, GFP_ATOMIC);
4287 if (rc) {
4288 qpair->lun_cnt--;
4289 ql_log(ql_log_info, vha, 0xd037,
4290 "Unable to insert lun %llx into lun_qpair_map\n",
4291 cmd->unpacked_lun);
4292 }
4293 goto out;
4294 } else {
4295 lcnt = qpair->lun_cnt;
4296 }
4297
4298 h = NULL;
4299 list_for_each_entry(qp, &base_vha->qp_list,
4300 qp_list_elem) {
4301 if (qp->lun_cnt == 0) {
4302 qp->lun_cnt++;
4303 h = qla_qpair_to_hint(tgt, qp);
4304 BUG_ON(!h);
4305 rc = btree_insert64(&tgt->lun_qpair_map,
4306 cmd->unpacked_lun, h, GFP_ATOMIC);
4307 if (rc) {
4308 qp->lun_cnt--;
4309 ql_log(ql_log_info, vha, 0xd038,
4310 "Unable to insert lun %llx into lun_qpair_map\n",
4311 cmd->unpacked_lun);
4312 }
4313 qpair = qp;
4314 goto out;
4315 } else {
4316 if (qp->lun_cnt < lcnt) {
4317 lcnt = qp->lun_cnt;
4318 qpair = qp;
4319 continue;
4320 }
4321 }
4322 }
4323 BUG_ON(!qpair);
4324 qpair->lun_cnt++;
4325 h = qla_qpair_to_hint(tgt, qpair);
4326 BUG_ON(!h);
4327 rc = btree_insert64(&tgt->lun_qpair_map,
4328 cmd->unpacked_lun, h, GFP_ATOMIC);
4329 if (rc) {
4330 qpair->lun_cnt--;
4331 ql_log(ql_log_info, vha, 0xd039,
4332 "Unable to insert lun %llx into lun_qpair_map\n",
4333 cmd->unpacked_lun);
4334 }
4335 }
4336 } else {
4337 h = &tgt->qphints[0];
4338 }
4339 out:
4340 cmd->qpair = h->qpair;
4341 cmd->se_cmd.cpuid = h->cpuid;
4342 }
4343
qlt_get_tag(scsi_qla_host_t * vha,struct fc_port * sess,struct atio_from_isp * atio)4344 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4345 struct fc_port *sess,
4346 struct atio_from_isp *atio)
4347 {
4348 struct qla_tgt_cmd *cmd;
4349
4350 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4351 if (!cmd)
4352 return NULL;
4353
4354 cmd->cmd_type = TYPE_TGT_CMD;
4355 memcpy(&cmd->atio, atio, sizeof(*atio));
4356 INIT_LIST_HEAD(&cmd->sess_cmd_list);
4357 cmd->state = QLA_TGT_STATE_NEW;
4358 cmd->tgt = vha->vha_tgt.qla_tgt;
4359 qlt_incr_num_pend_cmds(vha);
4360 cmd->vha = vha;
4361 cmd->sess = sess;
4362 cmd->loop_id = sess->loop_id;
4363 cmd->conf_compl_supported = sess->conf_compl_supported;
4364
4365 cmd->trc_flags = 0;
4366 cmd->jiffies_at_alloc = get_jiffies_64();
4367
4368 cmd->unpacked_lun = scsilun_to_int(
4369 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4370 qlt_assign_qpair(vha, cmd);
4371 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4372 cmd->vp_idx = vha->vp_idx;
4373 cmd->edif = sess->edif.enable;
4374
4375 return cmd;
4376 }
4377
4378 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_cmd_for_atio(struct scsi_qla_host * vha,struct atio_from_isp * atio)4379 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4380 struct atio_from_isp *atio)
4381 {
4382 struct qla_hw_data *ha = vha->hw;
4383 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4384 struct fc_port *sess;
4385 struct qla_tgt_cmd *cmd;
4386 unsigned long flags;
4387 port_id_t id;
4388
4389 if (unlikely(tgt->tgt_stop)) {
4390 ql_dbg(ql_dbg_io, vha, 0x3061,
4391 "New command while device %p is shutting down\n", tgt);
4392 return -ENODEV;
4393 }
4394
4395 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4396 if (IS_SW_RESV_ADDR(id))
4397 return -EBUSY;
4398
4399 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4400 if (unlikely(!sess))
4401 return -EFAULT;
4402
4403 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4404 * session deletion, but it's still in sess_del_work wq */
4405 if (sess->deleted) {
4406 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4407 "New command while old session %p is being deleted\n",
4408 sess);
4409 return -EFAULT;
4410 }
4411
4412 /*
4413 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4414 */
4415 if (!kref_get_unless_zero(&sess->sess_kref)) {
4416 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4417 "%s: kref_get fail, %8phC oxid %x \n",
4418 __func__, sess->port_name,
4419 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4420 return -EFAULT;
4421 }
4422
4423 cmd = qlt_get_tag(vha, sess, atio);
4424 if (!cmd) {
4425 ql_dbg(ql_dbg_io, vha, 0x3062,
4426 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4427 ha->tgt.tgt_ops->put_sess(sess);
4428 return -EBUSY;
4429 }
4430
4431 cmd->cmd_in_wq = 1;
4432 cmd->trc_flags |= TRC_NEW_CMD;
4433
4434 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4435 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4436 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4437
4438 INIT_WORK(&cmd->work, qlt_do_work);
4439 if (vha->flags.qpairs_available) {
4440 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4441 } else if (ha->msix_count) {
4442 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4443 queue_work_on(smp_processor_id(), qla_tgt_wq,
4444 &cmd->work);
4445 else
4446 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4447 &cmd->work);
4448 } else {
4449 queue_work(qla_tgt_wq, &cmd->work);
4450 }
4451
4452 return 0;
4453 }
4454
4455 /* ha->hardware_lock supposed to be held on entry */
qlt_issue_task_mgmt(struct fc_port * sess,u64 lun,int fn,void * iocb,int flags)4456 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4457 int fn, void *iocb, int flags)
4458 {
4459 struct scsi_qla_host *vha = sess->vha;
4460 struct qla_hw_data *ha = vha->hw;
4461 struct qla_tgt_mgmt_cmd *mcmd;
4462 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4463 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4464
4465 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4466 if (!mcmd) {
4467 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4468 "qla_target(%d): Allocation of management "
4469 "command failed, some commands and their data could "
4470 "leak\n", vha->vp_idx);
4471 return -ENOMEM;
4472 }
4473 memset(mcmd, 0, sizeof(*mcmd));
4474 mcmd->sess = sess;
4475
4476 if (iocb) {
4477 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4478 sizeof(mcmd->orig_iocb.imm_ntfy));
4479 }
4480 mcmd->tmr_func = fn;
4481 mcmd->flags = flags;
4482 mcmd->reset_count = ha->base_qpair->chip_reset;
4483 mcmd->qpair = h->qpair;
4484 mcmd->vha = vha;
4485 mcmd->se_cmd.cpuid = h->cpuid;
4486 mcmd->unpacked_lun = lun;
4487
4488 switch (fn) {
4489 case QLA_TGT_LUN_RESET:
4490 case QLA_TGT_CLEAR_TS:
4491 case QLA_TGT_ABORT_TS:
4492 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4493 fallthrough;
4494 case QLA_TGT_CLEAR_ACA:
4495 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4496 mcmd->qpair = h->qpair;
4497 mcmd->se_cmd.cpuid = h->cpuid;
4498 break;
4499
4500 case QLA_TGT_TARGET_RESET:
4501 case QLA_TGT_NEXUS_LOSS_SESS:
4502 case QLA_TGT_NEXUS_LOSS:
4503 case QLA_TGT_ABORT_ALL:
4504 default:
4505 /* no-op */
4506 break;
4507 }
4508
4509 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4510 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4511 &mcmd->work);
4512
4513 return 0;
4514 }
4515
4516 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_task_mgmt(struct scsi_qla_host * vha,void * iocb)4517 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4518 {
4519 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4520 struct qla_hw_data *ha = vha->hw;
4521 struct fc_port *sess;
4522 u64 unpacked_lun;
4523 int fn;
4524 unsigned long flags;
4525
4526 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4527
4528 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4529 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4530 a->u.isp24.fcp_hdr.s_id);
4531 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4532
4533 unpacked_lun =
4534 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4535
4536 if (sess == NULL || sess->deleted)
4537 return -EFAULT;
4538
4539 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4540 }
4541
4542 /* ha->hardware_lock supposed to be held on entry */
__qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb,struct fc_port * sess)4543 static int __qlt_abort_task(struct scsi_qla_host *vha,
4544 struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4545 {
4546 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4547 struct qla_hw_data *ha = vha->hw;
4548 struct qla_tgt_mgmt_cmd *mcmd;
4549 u64 unpacked_lun;
4550 int rc;
4551
4552 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4553 if (mcmd == NULL) {
4554 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4555 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4556 vha->vp_idx, __func__);
4557 return -ENOMEM;
4558 }
4559 memset(mcmd, 0, sizeof(*mcmd));
4560
4561 mcmd->sess = sess;
4562 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4563 sizeof(mcmd->orig_iocb.imm_ntfy));
4564
4565 unpacked_lun =
4566 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4567 mcmd->reset_count = ha->base_qpair->chip_reset;
4568 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4569 mcmd->qpair = ha->base_qpair;
4570
4571 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4572 le16_to_cpu(iocb->u.isp2x.seq_id));
4573 if (rc != 0) {
4574 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4575 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4576 vha->vp_idx, rc);
4577 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4578 return -EFAULT;
4579 }
4580
4581 return 0;
4582 }
4583
4584 /* ha->hardware_lock supposed to be held on entry */
qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4585 static int qlt_abort_task(struct scsi_qla_host *vha,
4586 struct imm_ntfy_from_isp *iocb)
4587 {
4588 struct qla_hw_data *ha = vha->hw;
4589 struct fc_port *sess;
4590 int loop_id;
4591 unsigned long flags;
4592
4593 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4594
4595 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4596 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4597 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4598
4599 if (sess == NULL) {
4600 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4601 "qla_target(%d): task abort for unexisting "
4602 "session\n", vha->vp_idx);
4603 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4604 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4605 }
4606
4607 return __qlt_abort_task(vha, iocb, sess);
4608 }
4609
qlt_logo_completion_handler(fc_port_t * fcport,int rc)4610 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4611 {
4612 if (rc != MBS_COMMAND_COMPLETE) {
4613 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4614 "%s: se_sess %p / sess %p from"
4615 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4616 " LOGO failed: %#x\n",
4617 __func__,
4618 fcport->se_sess,
4619 fcport,
4620 fcport->port_name, fcport->loop_id,
4621 fcport->d_id.b.domain, fcport->d_id.b.area,
4622 fcport->d_id.b.al_pa, rc);
4623 }
4624
4625 fcport->logout_completed = 1;
4626 }
4627
4628 /*
4629 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4630 *
4631 * Schedules sessions with matching port_id/loop_id but different wwn for
4632 * deletion. Returns existing session with matching wwn if present.
4633 * Null otherwise.
4634 */
4635 struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t * vha,uint64_t wwn,port_id_t port_id,uint16_t loop_id,struct fc_port ** conflict_sess)4636 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4637 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4638 {
4639 struct fc_port *sess = NULL, *other_sess;
4640 uint64_t other_wwn;
4641
4642 *conflict_sess = NULL;
4643
4644 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4645
4646 other_wwn = wwn_to_u64(other_sess->port_name);
4647
4648 if (wwn == other_wwn) {
4649 WARN_ON(sess);
4650 sess = other_sess;
4651 continue;
4652 }
4653
4654 /* find other sess with nport_id collision */
4655 if (port_id.b24 == other_sess->d_id.b24) {
4656 if (loop_id != other_sess->loop_id) {
4657 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4658 "Invalidating sess %p loop_id %d wwn %llx.\n",
4659 other_sess, other_sess->loop_id, other_wwn);
4660
4661 /*
4662 * logout_on_delete is set by default, but another
4663 * session that has the same s_id/loop_id combo
4664 * might have cleared it when requested this session
4665 * deletion, so don't touch it
4666 */
4667 qlt_schedule_sess_for_deletion(other_sess);
4668 } else {
4669 /*
4670 * Another wwn used to have our s_id/loop_id
4671 * kill the session, but don't free the loop_id
4672 */
4673 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4674 "Invalidating sess %p loop_id %d wwn %llx.\n",
4675 other_sess, other_sess->loop_id, other_wwn);
4676
4677 other_sess->keep_nport_handle = 1;
4678 if (other_sess->disc_state != DSC_DELETED)
4679 *conflict_sess = other_sess;
4680 qlt_schedule_sess_for_deletion(other_sess);
4681 }
4682 continue;
4683 }
4684
4685 /* find other sess with nport handle collision */
4686 if ((loop_id == other_sess->loop_id) &&
4687 (loop_id != FC_NO_LOOP_ID)) {
4688 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4689 "Invalidating sess %p loop_id %d wwn %llx.\n",
4690 other_sess, other_sess->loop_id, other_wwn);
4691
4692 /* Same loop_id but different s_id
4693 * Ok to kill and logout */
4694 qlt_schedule_sess_for_deletion(other_sess);
4695 }
4696 }
4697
4698 return sess;
4699 }
4700
4701 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
abort_cmds_for_s_id(struct scsi_qla_host * vha,port_id_t * s_id)4702 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4703 {
4704 struct qla_tgt_sess_op *op;
4705 struct qla_tgt_cmd *cmd;
4706 uint32_t key;
4707 int count = 0;
4708 unsigned long flags;
4709
4710 key = (((u32)s_id->b.domain << 16) |
4711 ((u32)s_id->b.area << 8) |
4712 ((u32)s_id->b.al_pa));
4713
4714 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4715 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4716 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4717
4718 if (op_key == key) {
4719 op->aborted = true;
4720 count++;
4721 }
4722 }
4723
4724 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4725 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4726
4727 if (cmd_key == key) {
4728 cmd->aborted = 1;
4729 count++;
4730 }
4731 }
4732 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4733
4734 return count;
4735 }
4736
qlt_handle_login(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4737 static int qlt_handle_login(struct scsi_qla_host *vha,
4738 struct imm_ntfy_from_isp *iocb)
4739 {
4740 struct fc_port *sess = NULL, *conflict_sess = NULL;
4741 uint64_t wwn;
4742 port_id_t port_id;
4743 uint16_t loop_id, wd3_lo;
4744 int res = 0;
4745 struct qlt_plogi_ack_t *pla;
4746 unsigned long flags;
4747
4748 lockdep_assert_held(&vha->hw->hardware_lock);
4749
4750 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4751
4752 port_id.b.domain = iocb->u.isp24.port_id[2];
4753 port_id.b.area = iocb->u.isp24.port_id[1];
4754 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4755 port_id.b.rsvd_1 = 0;
4756
4757 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4758
4759 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4760 abort_cmds_for_s_id(vha, &port_id);
4761
4762 if (wwn) {
4763 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4764 sess = qlt_find_sess_invalidate_other(vha, wwn,
4765 port_id, loop_id, &conflict_sess);
4766 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4767 } else {
4768 ql_dbg(ql_dbg_disc, vha, 0xffff,
4769 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4770 __func__, __LINE__, loop_id, port_id.b24);
4771 qlt_send_term_imm_notif(vha, iocb, 1);
4772 goto out;
4773 }
4774
4775 if (IS_SW_RESV_ADDR(port_id)) {
4776 res = 1;
4777 goto out;
4778 }
4779
4780 if (vha->hw->flags.edif_enabled &&
4781 !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
4782 iocb->u.isp24.status_subcode == ELS_PLOGI &&
4783 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
4784 ql_dbg(ql_dbg_disc, vha, 0xffff,
4785 "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
4786 __func__, __LINE__, loop_id, port_id.b24);
4787 qlt_send_term_imm_notif(vha, iocb, 1);
4788 goto out;
4789 }
4790
4791 if (vha->hw->flags.edif_enabled) {
4792 if (DBELL_INACTIVE(vha)) {
4793 ql_dbg(ql_dbg_disc, vha, 0xffff,
4794 "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
4795 __func__, __LINE__, loop_id, port_id.b24);
4796 qlt_send_term_imm_notif(vha, iocb, 1);
4797 goto out;
4798 } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
4799 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
4800 ql_dbg(ql_dbg_disc, vha, 0xffff,
4801 "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
4802 __func__, __LINE__, loop_id, port_id.b24);
4803 qlt_send_term_imm_notif(vha, iocb, 1);
4804 goto out;
4805 }
4806 }
4807
4808 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4809 if (!pla) {
4810 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4811 "%s %d %8phC Term INOT due to mem alloc fail",
4812 __func__, __LINE__,
4813 iocb->u.isp24.port_name);
4814 qlt_send_term_imm_notif(vha, iocb, 1);
4815 goto out;
4816 }
4817
4818 if (conflict_sess) {
4819 conflict_sess->login_gen++;
4820 qlt_plogi_ack_link(vha, pla, conflict_sess,
4821 QLT_PLOGI_LINK_CONFLICT);
4822 }
4823
4824 if (!sess) {
4825 pla->ref_count++;
4826 ql_dbg(ql_dbg_disc, vha, 0xffff,
4827 "%s %d %8phC post new sess\n",
4828 __func__, __LINE__, iocb->u.isp24.port_name);
4829 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4830 qla24xx_post_newsess_work(vha, &port_id,
4831 iocb->u.isp24.port_name,
4832 iocb->u.isp24.u.plogi.node_name,
4833 pla, 0);
4834 else
4835 qla24xx_post_newsess_work(vha, &port_id,
4836 iocb->u.isp24.port_name, NULL,
4837 pla, 0);
4838
4839 goto out;
4840 }
4841
4842 if (sess->disc_state == DSC_UPD_FCPORT) {
4843 u16 sec;
4844
4845 /*
4846 * Remote port registration is still going on from
4847 * previous login. Allow it to finish before we
4848 * accept the new login.
4849 */
4850 sess->next_disc_state = DSC_DELETE_PEND;
4851 sec = jiffies_to_msecs(jiffies -
4852 sess->jiffies_at_registration) / 1000;
4853 if (sess->sec_since_registration < sec && sec &&
4854 !(sec % 5)) {
4855 sess->sec_since_registration = sec;
4856 ql_dbg(ql_dbg_disc, vha, 0xffff,
4857 "%s %8phC - Slow Rport registration (%d Sec)\n",
4858 __func__, sess->port_name, sec);
4859 }
4860
4861 if (!conflict_sess) {
4862 list_del(&pla->list);
4863 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4864 }
4865
4866 qlt_send_term_imm_notif(vha, iocb, 1);
4867 goto out;
4868 }
4869
4870 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4871 sess->d_id = port_id;
4872 sess->login_gen++;
4873 sess->loop_id = loop_id;
4874
4875 if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
4876 /* remote port has assigned Port ID */
4877 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
4878 vha->d_id = sess->d_id;
4879
4880 ql_dbg(ql_dbg_disc, vha, 0xffff,
4881 "%s %8phC - send port online\n",
4882 __func__, sess->port_name);
4883
4884 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
4885 sess->d_id.b24);
4886 }
4887
4888 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4889 sess->fw_login_state = DSC_LS_PRLI_PEND;
4890 sess->local = 0;
4891 sess->loop_id = loop_id;
4892 sess->d_id = port_id;
4893 sess->fw_login_state = DSC_LS_PRLI_PEND;
4894 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4895
4896 if (wd3_lo & BIT_7)
4897 sess->conf_compl_supported = 1;
4898
4899 if ((wd3_lo & BIT_4) == 0)
4900 sess->port_type = FCT_INITIATOR;
4901 else
4902 sess->port_type = FCT_TARGET;
4903
4904 } else
4905 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4906
4907
4908 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4909 "%s %d %8phC DS %d\n",
4910 __func__, __LINE__, sess->port_name, sess->disc_state);
4911
4912 switch (sess->disc_state) {
4913 case DSC_DELETED:
4914 case DSC_LOGIN_PEND:
4915 qlt_plogi_ack_unref(vha, pla);
4916 break;
4917
4918 default:
4919 /*
4920 * Under normal circumstances we want to release nport handle
4921 * during LOGO process to avoid nport handle leaks inside FW.
4922 * The exception is when LOGO is done while another PLOGI with
4923 * the same nport handle is waiting as might be the case here.
4924 * Note: there is always a possibily of a race where session
4925 * deletion has already started for other reasons (e.g. ACL
4926 * removal) and now PLOGI arrives:
4927 * 1. if PLOGI arrived in FW after nport handle has been freed,
4928 * FW must have assigned this PLOGI a new/same handle and we
4929 * can proceed ACK'ing it as usual when session deletion
4930 * completes.
4931 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4932 * bit reached it, the handle has now been released. We'll
4933 * get an error when we ACK this PLOGI. Nothing will be sent
4934 * back to initiator. Initiator should eventually retry
4935 * PLOGI and situation will correct itself.
4936 */
4937 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4938 (sess->d_id.b24 == port_id.b24));
4939
4940 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4941 "%s %d %8phC post del sess\n",
4942 __func__, __LINE__, sess->port_name);
4943
4944
4945 qlt_schedule_sess_for_deletion(sess);
4946 break;
4947 }
4948 out:
4949 return res;
4950 }
4951
4952 /*
4953 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4954 */
qlt_24xx_handle_els(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4955 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4956 struct imm_ntfy_from_isp *iocb)
4957 {
4958 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4959 struct qla_hw_data *ha = vha->hw;
4960 struct fc_port *sess = NULL, *conflict_sess = NULL;
4961 uint64_t wwn;
4962 port_id_t port_id;
4963 uint16_t loop_id;
4964 uint16_t wd3_lo;
4965 int res = 0;
4966 unsigned long flags;
4967
4968 lockdep_assert_held(&ha->hardware_lock);
4969
4970 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4971
4972 port_id.b.domain = iocb->u.isp24.port_id[2];
4973 port_id.b.area = iocb->u.isp24.port_id[1];
4974 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4975 port_id.b.rsvd_1 = 0;
4976
4977 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4978
4979 ql_dbg(ql_dbg_disc, vha, 0xf026,
4980 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4981 vha->vp_idx, iocb->u.isp24.port_id[2],
4982 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4983 iocb->u.isp24.status_subcode, loop_id,
4984 iocb->u.isp24.port_name);
4985
4986 /* res = 1 means ack at the end of thread
4987 * res = 0 means ack async/later.
4988 */
4989 switch (iocb->u.isp24.status_subcode) {
4990 case ELS_PLOGI:
4991 res = qlt_handle_login(vha, iocb);
4992 break;
4993
4994 case ELS_PRLI:
4995 if (N2N_TOPO(ha)) {
4996 sess = qla2x00_find_fcport_by_wwpn(vha,
4997 iocb->u.isp24.port_name, 1);
4998
4999 if (vha->hw->flags.edif_enabled && sess &&
5000 (!(sess->flags & FCF_FCSP_DEVICE) ||
5001 !sess->edif.authok)) {
5002 ql_dbg(ql_dbg_disc, vha, 0xffff,
5003 "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
5004 __func__, __LINE__, iocb->u.isp24.port_name);
5005 qlt_send_term_imm_notif(vha, iocb, 1);
5006 break;
5007 }
5008
5009 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
5010 ql_dbg(ql_dbg_disc, vha, 0xffff,
5011 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
5012 __func__, __LINE__,
5013 iocb->u.isp24.port_name);
5014 qlt_send_term_imm_notif(vha, iocb, 1);
5015 break;
5016 }
5017
5018 res = qlt_handle_login(vha, iocb);
5019 break;
5020 }
5021
5022 if (IS_SW_RESV_ADDR(port_id)) {
5023 res = 1;
5024 break;
5025 }
5026
5027 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
5028
5029 if (wwn) {
5030 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5031 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
5032 loop_id, &conflict_sess);
5033 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5034 }
5035
5036 if (conflict_sess) {
5037 switch (conflict_sess->disc_state) {
5038 case DSC_DELETED:
5039 case DSC_DELETE_PEND:
5040 break;
5041 default:
5042 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
5043 "PRLI with conflicting sess %p port %8phC\n",
5044 conflict_sess, conflict_sess->port_name);
5045 conflict_sess->fw_login_state =
5046 DSC_LS_PORT_UNAVAIL;
5047 qlt_send_term_imm_notif(vha, iocb, 1);
5048 res = 0;
5049 break;
5050 }
5051 }
5052
5053 if (sess != NULL) {
5054 bool delete = false;
5055 int sec;
5056
5057 if (vha->hw->flags.edif_enabled && sess &&
5058 (!(sess->flags & FCF_FCSP_DEVICE) ||
5059 !sess->edif.authok)) {
5060 ql_dbg(ql_dbg_disc, vha, 0xffff,
5061 "%s %d %8phC Term PRLI due to unauthorize prli\n",
5062 __func__, __LINE__, iocb->u.isp24.port_name);
5063 qlt_send_term_imm_notif(vha, iocb, 1);
5064 break;
5065 }
5066
5067 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5068 switch (sess->fw_login_state) {
5069 case DSC_LS_PLOGI_PEND:
5070 case DSC_LS_PLOGI_COMP:
5071 case DSC_LS_PRLI_COMP:
5072 break;
5073 default:
5074 delete = true;
5075 break;
5076 }
5077
5078 switch (sess->disc_state) {
5079 case DSC_UPD_FCPORT:
5080 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5081 flags);
5082
5083 sec = jiffies_to_msecs(jiffies -
5084 sess->jiffies_at_registration)/1000;
5085 if (sess->sec_since_registration < sec && sec &&
5086 !(sec % 5)) {
5087 sess->sec_since_registration = sec;
5088 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
5089 "%s %8phC : Slow Rport registration(%d Sec)\n",
5090 __func__, sess->port_name, sec);
5091 }
5092 qlt_send_term_imm_notif(vha, iocb, 1);
5093 return 0;
5094
5095 case DSC_LOGIN_PEND:
5096 case DSC_GPDB:
5097 case DSC_LOGIN_COMPLETE:
5098 case DSC_ADISC:
5099 delete = false;
5100 break;
5101 default:
5102 break;
5103 }
5104
5105 if (delete) {
5106 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5107 flags);
5108 /*
5109 * Impatient initiator sent PRLI before last
5110 * PLOGI could finish. Will force him to re-try,
5111 * while last one finishes.
5112 */
5113 ql_log(ql_log_warn, sess->vha, 0xf095,
5114 "sess %p PRLI received, before plogi ack.\n",
5115 sess);
5116 qlt_send_term_imm_notif(vha, iocb, 1);
5117 res = 0;
5118 break;
5119 }
5120
5121 /*
5122 * This shouldn't happen under normal circumstances,
5123 * since we have deleted the old session during PLOGI
5124 */
5125 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5126 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5127 sess->loop_id, sess, iocb->u.isp24.nport_handle);
5128
5129 sess->local = 0;
5130 sess->loop_id = loop_id;
5131 sess->d_id = port_id;
5132 sess->fw_login_state = DSC_LS_PRLI_PEND;
5133
5134 if (wd3_lo & BIT_7)
5135 sess->conf_compl_supported = 1;
5136
5137 if ((wd3_lo & BIT_4) == 0)
5138 sess->port_type = FCT_INITIATOR;
5139 else
5140 sess->port_type = FCT_TARGET;
5141
5142 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5143 }
5144 res = 1; /* send notify ack */
5145
5146 /* Make session global (not used in fabric mode) */
5147 if (ha->current_topology != ISP_CFG_F) {
5148 if (sess) {
5149 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5150 "%s %d %8phC post nack\n",
5151 __func__, __LINE__, sess->port_name);
5152 qla24xx_post_nack_work(vha, sess, iocb,
5153 SRB_NACK_PRLI);
5154 res = 0;
5155 } else {
5156 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5157 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5158 qla2xxx_wake_dpc(vha);
5159 }
5160 } else {
5161 if (sess) {
5162 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5163 "%s %d %8phC post nack\n",
5164 __func__, __LINE__, sess->port_name);
5165 qla24xx_post_nack_work(vha, sess, iocb,
5166 SRB_NACK_PRLI);
5167 res = 0;
5168 }
5169 }
5170 break;
5171
5172 case ELS_TPRLO:
5173 if (le16_to_cpu(iocb->u.isp24.flags) &
5174 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5175 loop_id = 0xFFFF;
5176 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5177 res = 1;
5178 break;
5179 }
5180 fallthrough;
5181 case ELS_LOGO:
5182 case ELS_PRLO:
5183 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5184 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5185 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5186
5187 if (sess) {
5188 sess->login_gen++;
5189 sess->fw_login_state = DSC_LS_LOGO_PEND;
5190 sess->logo_ack_needed = 1;
5191 memcpy(sess->iocb, iocb, IOCB_SIZE);
5192 }
5193
5194 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5195
5196 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5197 "%s: logo %llx res %d sess %p ",
5198 __func__, wwn, res, sess);
5199 if (res == 0) {
5200 /*
5201 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5202 * for LOGO_ACK & sess delete
5203 */
5204 BUG_ON(!sess);
5205 res = 0;
5206 } else {
5207 /* cmd did not go to upper layer. */
5208 if (sess) {
5209 qlt_schedule_sess_for_deletion(sess);
5210 res = 0;
5211 }
5212 /* else logo will be ack */
5213 }
5214 break;
5215 case ELS_PDISC:
5216 case ELS_ADISC:
5217 {
5218 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5219
5220 if (tgt->link_reinit_iocb_pending) {
5221 qlt_send_notify_ack(ha->base_qpair,
5222 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5223 tgt->link_reinit_iocb_pending = 0;
5224 }
5225
5226 sess = qla2x00_find_fcport_by_wwpn(vha,
5227 iocb->u.isp24.port_name, 1);
5228 if (sess) {
5229 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5230 "sess %p lid %d|%d DS %d LS %d\n",
5231 sess, sess->loop_id, loop_id,
5232 sess->disc_state, sess->fw_login_state);
5233 }
5234
5235 res = 1; /* send notify ack */
5236 break;
5237 }
5238
5239 case ELS_FLOGI: /* should never happen */
5240 default:
5241 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5242 "qla_target(%d): Unsupported ELS command %x "
5243 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5244 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5245 break;
5246 }
5247
5248 ql_dbg(ql_dbg_disc, vha, 0xf026,
5249 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5250 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5251
5252 return res;
5253 }
5254
5255 /*
5256 * ha->hardware_lock supposed to be held on entry.
5257 * Might drop it, then reacquire.
5258 */
qlt_handle_imm_notify(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)5259 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5260 struct imm_ntfy_from_isp *iocb)
5261 {
5262 struct qla_hw_data *ha = vha->hw;
5263 uint32_t add_flags = 0;
5264 int send_notify_ack = 1;
5265 uint16_t status;
5266
5267 lockdep_assert_held(&ha->hardware_lock);
5268
5269 status = le16_to_cpu(iocb->u.isp2x.status);
5270 switch (status) {
5271 case IMM_NTFY_LIP_RESET:
5272 {
5273 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5274 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5275 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5276 iocb->u.isp24.status_subcode);
5277
5278 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5279 send_notify_ack = 0;
5280 break;
5281 }
5282
5283 case IMM_NTFY_LIP_LINK_REINIT:
5284 {
5285 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5286
5287 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5288 "qla_target(%d): LINK REINIT (loop %#x, "
5289 "subcode %x)\n", vha->vp_idx,
5290 le16_to_cpu(iocb->u.isp24.nport_handle),
5291 iocb->u.isp24.status_subcode);
5292 if (tgt->link_reinit_iocb_pending) {
5293 qlt_send_notify_ack(ha->base_qpair,
5294 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5295 }
5296 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5297 tgt->link_reinit_iocb_pending = 1;
5298 /*
5299 * QLogic requires to wait after LINK REINIT for possible
5300 * PDISC or ADISC ELS commands
5301 */
5302 send_notify_ack = 0;
5303 break;
5304 }
5305
5306 case IMM_NTFY_PORT_LOGOUT:
5307 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5308 "qla_target(%d): Port logout (loop "
5309 "%#x, subcode %x)\n", vha->vp_idx,
5310 le16_to_cpu(iocb->u.isp24.nport_handle),
5311 iocb->u.isp24.status_subcode);
5312
5313 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5314 send_notify_ack = 0;
5315 /* The sessions will be cleared in the callback, if needed */
5316 break;
5317
5318 case IMM_NTFY_GLBL_TPRLO:
5319 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5320 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5321 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5322 send_notify_ack = 0;
5323 /* The sessions will be cleared in the callback, if needed */
5324 break;
5325
5326 case IMM_NTFY_PORT_CONFIG:
5327 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5328 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5329 status);
5330 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5331 send_notify_ack = 0;
5332 /* The sessions will be cleared in the callback, if needed */
5333 break;
5334
5335 case IMM_NTFY_GLBL_LOGO:
5336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5337 "qla_target(%d): Link failure detected\n",
5338 vha->vp_idx);
5339 /* I_T nexus loss */
5340 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5341 send_notify_ack = 0;
5342 break;
5343
5344 case IMM_NTFY_IOCB_OVERFLOW:
5345 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5346 "qla_target(%d): Cannot provide requested "
5347 "capability (IOCB overflowed the immediate notify "
5348 "resource count)\n", vha->vp_idx);
5349 break;
5350
5351 case IMM_NTFY_ABORT_TASK:
5352 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5353 "qla_target(%d): Abort Task (S %08x I %#x -> "
5354 "L %#x)\n", vha->vp_idx,
5355 le16_to_cpu(iocb->u.isp2x.seq_id),
5356 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5357 le16_to_cpu(iocb->u.isp2x.lun));
5358 if (qlt_abort_task(vha, iocb) == 0)
5359 send_notify_ack = 0;
5360 break;
5361
5362 case IMM_NTFY_RESOURCE:
5363 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5364 "qla_target(%d): Out of resources, host %ld\n",
5365 vha->vp_idx, vha->host_no);
5366 break;
5367
5368 case IMM_NTFY_MSG_RX:
5369 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5370 "qla_target(%d): Immediate notify task %x\n",
5371 vha->vp_idx, iocb->u.isp2x.task_flags);
5372 break;
5373
5374 case IMM_NTFY_ELS:
5375 if (qlt_24xx_handle_els(vha, iocb) == 0)
5376 send_notify_ack = 0;
5377 break;
5378 default:
5379 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5380 "qla_target(%d): Received unknown immediate "
5381 "notify status %x\n", vha->vp_idx, status);
5382 break;
5383 }
5384
5385 if (send_notify_ack)
5386 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5387 0, 0);
5388 }
5389
5390 /*
5391 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5392 * This function sends busy to ISP 2xxx or 24xx.
5393 */
__qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)5394 static int __qlt_send_busy(struct qla_qpair *qpair,
5395 struct atio_from_isp *atio, uint16_t status)
5396 {
5397 struct scsi_qla_host *vha = qpair->vha;
5398 struct ctio7_to_24xx *ctio24;
5399 struct qla_hw_data *ha = vha->hw;
5400 request_t *pkt;
5401 struct fc_port *sess = NULL;
5402 unsigned long flags;
5403 u16 temp;
5404 port_id_t id;
5405
5406 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5407
5408 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5409 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5410 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5411 if (!sess) {
5412 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5413 return 0;
5414 }
5415 /* Sending marker isn't necessary, since we called from ISR */
5416
5417 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5418 if (!pkt) {
5419 ql_dbg(ql_dbg_io, vha, 0x3063,
5420 "qla_target(%d): %s failed: unable to allocate "
5421 "request packet", vha->vp_idx, __func__);
5422 return -ENOMEM;
5423 }
5424
5425 qpair->tgt_counters.num_q_full_sent++;
5426 pkt->entry_count = 1;
5427 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5428
5429 ctio24 = (struct ctio7_to_24xx *)pkt;
5430 ctio24->entry_type = CTIO_TYPE7;
5431 ctio24->nport_handle = cpu_to_le16(sess->loop_id);
5432 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5433 ctio24->vp_index = vha->vp_idx;
5434 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5435 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5436 temp = (atio->u.isp24.attr << 9) |
5437 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5438 CTIO7_FLAGS_DONT_RET_CTIO;
5439 ctio24->u.status1.flags = cpu_to_le16(temp);
5440 /*
5441 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5442 * if the explicit conformation is used.
5443 */
5444 ctio24->u.status1.ox_id =
5445 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
5446 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5447
5448 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
5449
5450 if (ctio24->u.status1.residual != 0)
5451 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
5452
5453 /* Memory Barrier */
5454 wmb();
5455 if (qpair->reqq_start_iocbs)
5456 qpair->reqq_start_iocbs(qpair);
5457 else
5458 qla2x00_start_iocbs(vha, qpair->req);
5459 return 0;
5460 }
5461
5462 /*
5463 * This routine is used to allocate a command for either a QFull condition
5464 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5465 * out previously.
5466 */
5467 static void
qlt_alloc_qfull_cmd(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint16_t status,int qfull)5468 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5469 struct atio_from_isp *atio, uint16_t status, int qfull)
5470 {
5471 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5472 struct qla_hw_data *ha = vha->hw;
5473 struct fc_port *sess;
5474 struct qla_tgt_cmd *cmd;
5475 unsigned long flags;
5476
5477 if (unlikely(tgt->tgt_stop)) {
5478 ql_dbg(ql_dbg_io, vha, 0x300a,
5479 "New command while device %p is shutting down\n", tgt);
5480 return;
5481 }
5482
5483 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5484 vha->hw->tgt.num_qfull_cmds_dropped++;
5485 if (vha->hw->tgt.num_qfull_cmds_dropped >
5486 vha->qla_stats.stat_max_qfull_cmds_dropped)
5487 vha->qla_stats.stat_max_qfull_cmds_dropped =
5488 vha->hw->tgt.num_qfull_cmds_dropped;
5489
5490 ql_dbg(ql_dbg_io, vha, 0x3068,
5491 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5492 vha->vp_idx, __func__,
5493 vha->hw->tgt.num_qfull_cmds_dropped);
5494
5495 qlt_chk_exch_leak_thresh_hold(vha);
5496 return;
5497 }
5498
5499 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5500 (vha, atio->u.isp24.fcp_hdr.s_id);
5501 if (!sess)
5502 return;
5503
5504 cmd = ha->tgt.tgt_ops->get_cmd(sess);
5505 if (!cmd) {
5506 ql_dbg(ql_dbg_io, vha, 0x3009,
5507 "qla_target(%d): %s: Allocation of cmd failed\n",
5508 vha->vp_idx, __func__);
5509
5510 vha->hw->tgt.num_qfull_cmds_dropped++;
5511 if (vha->hw->tgt.num_qfull_cmds_dropped >
5512 vha->qla_stats.stat_max_qfull_cmds_dropped)
5513 vha->qla_stats.stat_max_qfull_cmds_dropped =
5514 vha->hw->tgt.num_qfull_cmds_dropped;
5515
5516 qlt_chk_exch_leak_thresh_hold(vha);
5517 return;
5518 }
5519
5520 qlt_incr_num_pend_cmds(vha);
5521 INIT_LIST_HEAD(&cmd->cmd_list);
5522 memcpy(&cmd->atio, atio, sizeof(*atio));
5523
5524 cmd->tgt = vha->vha_tgt.qla_tgt;
5525 cmd->vha = vha;
5526 cmd->reset_count = ha->base_qpair->chip_reset;
5527 cmd->q_full = 1;
5528 cmd->qpair = ha->base_qpair;
5529
5530 if (qfull) {
5531 cmd->q_full = 1;
5532 /* NOTE: borrowing the state field to carry the status */
5533 cmd->state = status;
5534 } else
5535 cmd->term_exchg = 1;
5536
5537 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5538 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5539
5540 vha->hw->tgt.num_qfull_cmds_alloc++;
5541 if (vha->hw->tgt.num_qfull_cmds_alloc >
5542 vha->qla_stats.stat_max_qfull_cmds_alloc)
5543 vha->qla_stats.stat_max_qfull_cmds_alloc =
5544 vha->hw->tgt.num_qfull_cmds_alloc;
5545 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5546 }
5547
5548 int
qlt_free_qfull_cmds(struct qla_qpair * qpair)5549 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5550 {
5551 struct scsi_qla_host *vha = qpair->vha;
5552 struct qla_hw_data *ha = vha->hw;
5553 unsigned long flags;
5554 struct qla_tgt_cmd *cmd, *tcmd;
5555 struct list_head free_list, q_full_list;
5556 int rc = 0;
5557
5558 if (list_empty(&ha->tgt.q_full_list))
5559 return 0;
5560
5561 INIT_LIST_HEAD(&free_list);
5562 INIT_LIST_HEAD(&q_full_list);
5563
5564 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5565 if (list_empty(&ha->tgt.q_full_list)) {
5566 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5567 return 0;
5568 }
5569
5570 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5571 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5572
5573 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5574 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5575 if (cmd->q_full)
5576 /* cmd->state is a borrowed field to hold status */
5577 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5578 else if (cmd->term_exchg)
5579 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5580
5581 if (rc == -ENOMEM)
5582 break;
5583
5584 if (cmd->q_full)
5585 ql_dbg(ql_dbg_io, vha, 0x3006,
5586 "%s: busy sent for ox_id[%04x]\n", __func__,
5587 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5588 else if (cmd->term_exchg)
5589 ql_dbg(ql_dbg_io, vha, 0x3007,
5590 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5591 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5592 else
5593 ql_dbg(ql_dbg_io, vha, 0x3008,
5594 "%s: Unexpected cmd in QFull list %p\n", __func__,
5595 cmd);
5596
5597 list_move_tail(&cmd->cmd_list, &free_list);
5598
5599 /* piggy back on hardware_lock for protection */
5600 vha->hw->tgt.num_qfull_cmds_alloc--;
5601 }
5602 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5603
5604 cmd = NULL;
5605
5606 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5607 list_del(&cmd->cmd_list);
5608 /* This cmd was never sent to TCM. There is no need
5609 * to schedule free or call free_cmd
5610 */
5611 qlt_free_cmd(cmd);
5612 }
5613
5614 if (!list_empty(&q_full_list)) {
5615 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5616 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5617 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5618 }
5619
5620 return rc;
5621 }
5622
5623 static void
qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)5624 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5625 uint16_t status)
5626 {
5627 int rc = 0;
5628 struct scsi_qla_host *vha = qpair->vha;
5629
5630 rc = __qlt_send_busy(qpair, atio, status);
5631 if (rc == -ENOMEM)
5632 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5633 }
5634
5635 static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct atio_from_isp * atio,uint8_t ha_locked)5636 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5637 struct atio_from_isp *atio, uint8_t ha_locked)
5638 {
5639 struct qla_hw_data *ha = vha->hw;
5640 unsigned long flags;
5641
5642 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5643 return 0;
5644
5645 if (!ha_locked)
5646 spin_lock_irqsave(&ha->hardware_lock, flags);
5647 qlt_send_busy(qpair, atio, qla_sam_status);
5648 if (!ha_locked)
5649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5650
5651 return 1;
5652 }
5653
5654 /* ha->hardware_lock supposed to be held on entry */
5655 /* called via callback from qla2xxx */
qlt_24xx_atio_pkt(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)5656 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5657 struct atio_from_isp *atio, uint8_t ha_locked)
5658 {
5659 struct qla_hw_data *ha = vha->hw;
5660 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5661 int rc;
5662 unsigned long flags = 0;
5663
5664 if (unlikely(tgt == NULL)) {
5665 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5666 "ATIO pkt, but no tgt (ha %p)", ha);
5667 return;
5668 }
5669 /*
5670 * In tgt_stop mode we also should allow all requests to pass.
5671 * Otherwise, some commands can stuck.
5672 */
5673
5674 tgt->atio_irq_cmd_count++;
5675
5676 switch (atio->u.raw.entry_type) {
5677 case ATIO_TYPE7:
5678 if (unlikely(atio->u.isp24.exchange_addr ==
5679 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
5680 ql_dbg(ql_dbg_io, vha, 0x3065,
5681 "qla_target(%d): ATIO_TYPE7 "
5682 "received with UNKNOWN exchange address, "
5683 "sending QUEUE_FULL\n", vha->vp_idx);
5684 if (!ha_locked)
5685 spin_lock_irqsave(&ha->hardware_lock, flags);
5686 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5687 if (!ha_locked)
5688 spin_unlock_irqrestore(&ha->hardware_lock,
5689 flags);
5690 break;
5691 }
5692
5693 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5694 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5695 atio, ha_locked);
5696 if (rc != 0) {
5697 tgt->atio_irq_cmd_count--;
5698 return;
5699 }
5700 rc = qlt_handle_cmd_for_atio(vha, atio);
5701 } else {
5702 rc = qlt_handle_task_mgmt(vha, atio);
5703 }
5704 if (unlikely(rc != 0)) {
5705 if (!ha_locked)
5706 spin_lock_irqsave(&ha->hardware_lock, flags);
5707 switch (rc) {
5708 case -ENODEV:
5709 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5710 "qla_target: Unable to send command to target\n");
5711 break;
5712 case -EBADF:
5713 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5714 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5715 qlt_send_term_exchange(ha->base_qpair, NULL,
5716 atio, 1, 0);
5717 break;
5718 case -EBUSY:
5719 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5720 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5721 vha->vp_idx);
5722 qlt_send_busy(ha->base_qpair, atio,
5723 tc_sam_status);
5724 break;
5725 default:
5726 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5727 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5728 vha->vp_idx);
5729 qlt_send_busy(ha->base_qpair, atio,
5730 qla_sam_status);
5731 break;
5732 }
5733 if (!ha_locked)
5734 spin_unlock_irqrestore(&ha->hardware_lock,
5735 flags);
5736 }
5737 break;
5738
5739 case IMMED_NOTIFY_TYPE:
5740 {
5741 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5742 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5743 "qla_target(%d): Received ATIO packet %x "
5744 "with error status %x\n", vha->vp_idx,
5745 atio->u.raw.entry_type,
5746 atio->u.isp2x.entry_status);
5747 break;
5748 }
5749 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5750
5751 if (!ha_locked)
5752 spin_lock_irqsave(&ha->hardware_lock, flags);
5753 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5754 if (!ha_locked)
5755 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5756 break;
5757 }
5758
5759 default:
5760 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5761 "qla_target(%d): Received unknown ATIO atio "
5762 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5763 break;
5764 }
5765
5766 tgt->atio_irq_cmd_count--;
5767 }
5768
5769 /*
5770 * qpair lock is assume to be held
5771 * rc = 0 : send terminate & abts respond
5772 * rc != 0: do not send term & abts respond
5773 */
qlt_chk_unresolv_exchg(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct abts_resp_from_24xx_fw * entry)5774 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5775 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5776 {
5777 struct qla_hw_data *ha = vha->hw;
5778 int rc = 0;
5779
5780 /*
5781 * Detect unresolved exchange. If the same ABTS is unable
5782 * to terminate an existing command and the same ABTS loops
5783 * between FW & Driver, then force FW dump. Under 1 jiff,
5784 * we should see multiple loops.
5785 */
5786 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5787 qpair->retry_term_jiff == jiffies) {
5788 /* found existing exchange */
5789 qpair->retry_term_cnt++;
5790 if (qpair->retry_term_cnt >= 5) {
5791 rc = -EIO;
5792 qpair->retry_term_cnt = 0;
5793 ql_log(ql_log_warn, vha, 0xffff,
5794 "Unable to send ABTS Respond. Dumping firmware.\n");
5795 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5796 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5797
5798 if (qpair == ha->base_qpair)
5799 ha->isp_ops->fw_dump(vha);
5800 else
5801 qla2xxx_dump_fw(vha);
5802
5803 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5804 qla2xxx_wake_dpc(vha);
5805 }
5806 } else if (qpair->retry_term_jiff != jiffies) {
5807 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5808 qpair->retry_term_cnt = 0;
5809 qpair->retry_term_jiff = jiffies;
5810 }
5811
5812 return rc;
5813 }
5814
5815
qlt_handle_abts_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)5816 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5817 struct rsp_que *rsp, response_t *pkt)
5818 {
5819 struct abts_resp_from_24xx_fw *entry =
5820 (struct abts_resp_from_24xx_fw *)pkt;
5821 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5822 struct qla_tgt_mgmt_cmd *mcmd;
5823 struct qla_hw_data *ha = vha->hw;
5824
5825 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5826 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5827 ql_dbg(ql_dbg_async, vha, 0xe064,
5828 "qla_target(%d): ABTS Comp without mcmd\n",
5829 vha->vp_idx);
5830 return;
5831 }
5832
5833 if (mcmd)
5834 vha = mcmd->vha;
5835 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5836
5837 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5838 "ABTS_RESP_24XX: compl_status %x\n",
5839 entry->compl_status);
5840
5841 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5842 if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
5843 le32_to_cpu(entry->error_subcode2) == 0) {
5844 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5845 ha->tgt.tgt_ops->free_mcmd(mcmd);
5846 return;
5847 }
5848 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5849 pkt, mcmd);
5850 } else {
5851 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5852 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5853 vha->vp_idx, entry->compl_status,
5854 entry->error_subcode1,
5855 entry->error_subcode2);
5856 ha->tgt.tgt_ops->free_mcmd(mcmd);
5857 }
5858 } else if (mcmd) {
5859 ha->tgt.tgt_ops->free_mcmd(mcmd);
5860 }
5861 }
5862
5863 /* ha->hardware_lock supposed to be held on entry */
5864 /* called via callback from qla2xxx */
qlt_response_pkt(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)5865 static void qlt_response_pkt(struct scsi_qla_host *vha,
5866 struct rsp_que *rsp, response_t *pkt)
5867 {
5868 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5869
5870 if (unlikely(tgt == NULL)) {
5871 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5872 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5873 vha->vp_idx, pkt->entry_type, vha->hw);
5874 return;
5875 }
5876
5877 /*
5878 * In tgt_stop mode we also should allow all requests to pass.
5879 * Otherwise, some commands can stuck.
5880 */
5881
5882 switch (pkt->entry_type) {
5883 case CTIO_CRC2:
5884 case CTIO_TYPE7:
5885 {
5886 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5887
5888 qlt_do_ctio_completion(vha, rsp, entry->handle,
5889 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5890 entry);
5891 break;
5892 }
5893
5894 case ACCEPT_TGT_IO_TYPE:
5895 {
5896 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5897 int rc;
5898
5899 if (atio->u.isp2x.status !=
5900 cpu_to_le16(ATIO_CDB_VALID)) {
5901 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5902 "qla_target(%d): ATIO with error "
5903 "status %x received\n", vha->vp_idx,
5904 le16_to_cpu(atio->u.isp2x.status));
5905 break;
5906 }
5907
5908 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5909 if (rc != 0)
5910 return;
5911
5912 rc = qlt_handle_cmd_for_atio(vha, atio);
5913 if (unlikely(rc != 0)) {
5914 switch (rc) {
5915 case -ENODEV:
5916 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5917 "qla_target: Unable to send command to target\n");
5918 break;
5919 case -EBADF:
5920 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5921 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5922 qlt_send_term_exchange(rsp->qpair, NULL,
5923 atio, 1, 0);
5924 break;
5925 case -EBUSY:
5926 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5927 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5928 vha->vp_idx);
5929 qlt_send_busy(rsp->qpair, atio,
5930 tc_sam_status);
5931 break;
5932 default:
5933 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5934 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5935 vha->vp_idx);
5936 qlt_send_busy(rsp->qpair, atio,
5937 qla_sam_status);
5938 break;
5939 }
5940 }
5941 }
5942 break;
5943
5944 case CONTINUE_TGT_IO_TYPE:
5945 {
5946 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5947
5948 qlt_do_ctio_completion(vha, rsp, entry->handle,
5949 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5950 entry);
5951 break;
5952 }
5953
5954 case CTIO_A64_TYPE:
5955 {
5956 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5957
5958 qlt_do_ctio_completion(vha, rsp, entry->handle,
5959 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5960 entry);
5961 break;
5962 }
5963
5964 case IMMED_NOTIFY_TYPE:
5965 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5966 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5967 break;
5968
5969 case NOTIFY_ACK_TYPE:
5970 if (tgt->notify_ack_expected > 0) {
5971 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5972
5973 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5974 "NOTIFY_ACK seq %08x status %x\n",
5975 le16_to_cpu(entry->u.isp2x.seq_id),
5976 le16_to_cpu(entry->u.isp2x.status));
5977 tgt->notify_ack_expected--;
5978 if (entry->u.isp2x.status !=
5979 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5980 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5981 "qla_target(%d): NOTIFY_ACK "
5982 "failed %x\n", vha->vp_idx,
5983 le16_to_cpu(entry->u.isp2x.status));
5984 }
5985 } else {
5986 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5987 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5988 vha->vp_idx);
5989 }
5990 break;
5991
5992 case ABTS_RECV_24XX:
5993 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5994 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5995 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5996 break;
5997
5998 case ABTS_RESP_24XX:
5999 if (tgt->abts_resp_expected > 0) {
6000 qlt_handle_abts_completion(vha, rsp, pkt);
6001 } else {
6002 ql_dbg(ql_dbg_tgt, vha, 0xe064,
6003 "qla_target(%d): Unexpected ABTS_RESP_24XX "
6004 "received\n", vha->vp_idx);
6005 }
6006 break;
6007
6008 default:
6009 ql_dbg(ql_dbg_tgt, vha, 0xe065,
6010 "qla_target(%d): Received unknown response pkt "
6011 "type %x\n", vha->vp_idx, pkt->entry_type);
6012 break;
6013 }
6014
6015 }
6016
6017 /*
6018 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
6019 */
qlt_async_event(uint16_t code,struct scsi_qla_host * vha,uint16_t * mailbox)6020 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
6021 uint16_t *mailbox)
6022 {
6023 struct qla_hw_data *ha = vha->hw;
6024 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6025 int login_code;
6026
6027 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
6028 return;
6029
6030 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
6031 IS_QLA2100(ha))
6032 return;
6033 /*
6034 * In tgt_stop mode we also should allow all requests to pass.
6035 * Otherwise, some commands can stuck.
6036 */
6037
6038
6039 switch (code) {
6040 case MBA_RESET: /* Reset */
6041 case MBA_SYSTEM_ERR: /* System Error */
6042 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
6043 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
6044 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
6045 "qla_target(%d): System error async event %#x "
6046 "occurred", vha->vp_idx, code);
6047 break;
6048 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
6049 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6050 break;
6051
6052 case MBA_LOOP_UP:
6053 {
6054 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6055 "qla_target(%d): Async LOOP_UP occurred "
6056 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
6057 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6058 if (tgt->link_reinit_iocb_pending) {
6059 qlt_send_notify_ack(ha->base_qpair,
6060 &tgt->link_reinit_iocb,
6061 0, 0, 0, 0, 0, 0);
6062 tgt->link_reinit_iocb_pending = 0;
6063 }
6064 break;
6065 }
6066
6067 case MBA_LIP_OCCURRED:
6068 case MBA_LOOP_DOWN:
6069 case MBA_LIP_RESET:
6070 case MBA_RSCN_UPDATE:
6071 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6072 "qla_target(%d): Async event %#x occurred "
6073 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6074 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6075 break;
6076
6077 case MBA_REJECTED_FCP_CMD:
6078 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
6079 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
6080 vha->vp_idx,
6081 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6082
6083 if (mailbox[3] == 1) {
6084 /* exchange starvation. */
6085 vha->hw->exch_starvation++;
6086 if (vha->hw->exch_starvation > 5) {
6087 ql_log(ql_log_warn, vha, 0xd03a,
6088 "Exchange starvation-. Resetting RISC\n");
6089
6090 vha->hw->exch_starvation = 0;
6091 if (IS_P3P_TYPE(vha->hw))
6092 set_bit(FCOE_CTX_RESET_NEEDED,
6093 &vha->dpc_flags);
6094 else
6095 set_bit(ISP_ABORT_NEEDED,
6096 &vha->dpc_flags);
6097 qla2xxx_wake_dpc(vha);
6098 }
6099 }
6100 break;
6101
6102 case MBA_PORT_UPDATE:
6103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
6104 "qla_target(%d): Port update async event %#x "
6105 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
6106 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6107 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6108
6109 login_code = mailbox[2];
6110 if (login_code == 0x4) {
6111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
6112 "Async MB 2: Got PLOGI Complete\n");
6113 vha->hw->exch_starvation = 0;
6114 } else if (login_code == 0x7)
6115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6116 "Async MB 2: Port Logged Out\n");
6117 break;
6118 default:
6119 break;
6120 }
6121
6122 }
6123
qlt_get_port_database(struct scsi_qla_host * vha,uint16_t loop_id)6124 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6125 uint16_t loop_id)
6126 {
6127 fc_port_t *fcport, *tfcp, *del;
6128 int rc;
6129 unsigned long flags;
6130 u8 newfcport = 0;
6131
6132 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6133 if (!fcport) {
6134 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6135 "qla_target(%d): Allocation of tmp FC port failed",
6136 vha->vp_idx);
6137 return NULL;
6138 }
6139
6140 fcport->loop_id = loop_id;
6141
6142 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6143 if (rc != QLA_SUCCESS) {
6144 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6145 "qla_target(%d): Failed to retrieve fcport "
6146 "information -- get_port_database() returned %x "
6147 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6148 kfree(fcport);
6149 return NULL;
6150 }
6151
6152 del = NULL;
6153 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6154 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6155
6156 if (tfcp) {
6157 tfcp->d_id = fcport->d_id;
6158 tfcp->port_type = fcport->port_type;
6159 tfcp->supported_classes = fcport->supported_classes;
6160 tfcp->flags |= fcport->flags;
6161 tfcp->scan_state = QLA_FCPORT_FOUND;
6162
6163 del = fcport;
6164 fcport = tfcp;
6165 } else {
6166 if (vha->hw->current_topology == ISP_CFG_F)
6167 fcport->flags |= FCF_FABRIC_DEVICE;
6168
6169 list_add_tail(&fcport->list, &vha->vp_fcports);
6170 if (!IS_SW_RESV_ADDR(fcport->d_id))
6171 vha->fcport_count++;
6172 fcport->login_gen++;
6173 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6174 fcport->login_succ = 1;
6175 newfcport = 1;
6176 }
6177
6178 fcport->deleted = 0;
6179 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6180
6181 switch (vha->host->active_mode) {
6182 case MODE_INITIATOR:
6183 case MODE_DUAL:
6184 if (newfcport) {
6185 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6186 qla24xx_sched_upd_fcport(fcport);
6187 } else {
6188 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6189 "%s %d %8phC post gpsc fcp_cnt %d\n",
6190 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6191 qla24xx_post_gpsc_work(vha, fcport);
6192 }
6193 }
6194 break;
6195
6196 case MODE_TARGET:
6197 default:
6198 break;
6199 }
6200 if (del)
6201 qla2x00_free_fcport(del);
6202
6203 return fcport;
6204 }
6205
6206 /* Must be called under tgt_mutex */
qlt_make_local_sess(struct scsi_qla_host * vha,be_id_t s_id)6207 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6208 be_id_t s_id)
6209 {
6210 struct fc_port *sess = NULL;
6211 fc_port_t *fcport = NULL;
6212 int rc, global_resets;
6213 uint16_t loop_id = 0;
6214
6215 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6216 /*
6217 * This is Domain Controller, so it should be
6218 * OK to drop SCSI commands from it.
6219 */
6220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6221 "Unable to find initiator with S_ID %x:%x:%x",
6222 s_id.domain, s_id.area, s_id.al_pa);
6223 return NULL;
6224 }
6225
6226 mutex_lock(&vha->vha_tgt.tgt_mutex);
6227
6228 retry:
6229 global_resets =
6230 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6231
6232 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6233 if (rc != 0) {
6234 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6235
6236 ql_log(ql_log_info, vha, 0xf071,
6237 "qla_target(%d): Unable to find "
6238 "initiator with S_ID %x:%x:%x",
6239 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6240
6241 if (rc == -ENOENT) {
6242 qlt_port_logo_t logo;
6243
6244 logo.id = be_to_port_id(s_id);
6245 logo.cmd_count = 1;
6246 qlt_send_first_logo(vha, &logo);
6247 }
6248
6249 return NULL;
6250 }
6251
6252 fcport = qlt_get_port_database(vha, loop_id);
6253 if (!fcport) {
6254 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6255 return NULL;
6256 }
6257
6258 if (global_resets !=
6259 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6260 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6261 "qla_target(%d): global reset during session discovery "
6262 "(counter was %d, new %d), retrying", vha->vp_idx,
6263 global_resets,
6264 atomic_read(&vha->vha_tgt.
6265 qla_tgt->tgt_global_resets_count));
6266 goto retry;
6267 }
6268
6269 sess = qlt_create_sess(vha, fcport, true);
6270
6271 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6272
6273 return sess;
6274 }
6275
qlt_abort_work(struct qla_tgt * tgt,struct qla_tgt_sess_work_param * prm)6276 static void qlt_abort_work(struct qla_tgt *tgt,
6277 struct qla_tgt_sess_work_param *prm)
6278 {
6279 struct scsi_qla_host *vha = tgt->vha;
6280 struct qla_hw_data *ha = vha->hw;
6281 struct fc_port *sess = NULL;
6282 unsigned long flags = 0, flags2 = 0;
6283 be_id_t s_id;
6284 int rc;
6285
6286 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6287
6288 if (tgt->tgt_stop)
6289 goto out_term2;
6290
6291 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6292
6293 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6294 if (!sess) {
6295 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6296
6297 sess = qlt_make_local_sess(vha, s_id);
6298 /* sess has got an extra creation ref */
6299
6300 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6301 if (!sess)
6302 goto out_term2;
6303 } else {
6304 if (sess->deleted) {
6305 sess = NULL;
6306 goto out_term2;
6307 }
6308
6309 if (!kref_get_unless_zero(&sess->sess_kref)) {
6310 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6311 "%s: kref_get fail %8phC \n",
6312 __func__, sess->port_name);
6313 sess = NULL;
6314 goto out_term2;
6315 }
6316 }
6317
6318 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6319 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6320
6321 ha->tgt.tgt_ops->put_sess(sess);
6322
6323 if (rc != 0)
6324 goto out_term;
6325 return;
6326
6327 out_term2:
6328 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6329
6330 out_term:
6331 spin_lock_irqsave(&ha->hardware_lock, flags);
6332 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6333 FCP_TMF_REJECTED, false);
6334 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6335 }
6336
qlt_tmr_work(struct qla_tgt * tgt,struct qla_tgt_sess_work_param * prm)6337 static void qlt_tmr_work(struct qla_tgt *tgt,
6338 struct qla_tgt_sess_work_param *prm)
6339 {
6340 struct atio_from_isp *a = &prm->tm_iocb2;
6341 struct scsi_qla_host *vha = tgt->vha;
6342 struct qla_hw_data *ha = vha->hw;
6343 struct fc_port *sess;
6344 unsigned long flags;
6345 be_id_t s_id;
6346 int rc;
6347 u64 unpacked_lun;
6348 int fn;
6349 void *iocb;
6350
6351 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6352
6353 if (tgt->tgt_stop)
6354 goto out_term2;
6355
6356 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6357 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6358 if (!sess) {
6359 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6360
6361 sess = qlt_make_local_sess(vha, s_id);
6362 /* sess has got an extra creation ref */
6363
6364 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6365 if (!sess)
6366 goto out_term2;
6367 } else {
6368 if (sess->deleted) {
6369 goto out_term2;
6370 }
6371
6372 if (!kref_get_unless_zero(&sess->sess_kref)) {
6373 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6374 "%s: kref_get fail %8phC\n",
6375 __func__, sess->port_name);
6376 goto out_term2;
6377 }
6378 }
6379
6380 iocb = a;
6381 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6382 unpacked_lun =
6383 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6384
6385 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6386 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6387
6388 ha->tgt.tgt_ops->put_sess(sess);
6389
6390 if (rc != 0)
6391 goto out_term;
6392 return;
6393
6394 out_term2:
6395 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6396 out_term:
6397 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6398 }
6399
qlt_sess_work_fn(struct work_struct * work)6400 static void qlt_sess_work_fn(struct work_struct *work)
6401 {
6402 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6403 struct scsi_qla_host *vha = tgt->vha;
6404 unsigned long flags;
6405
6406 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6407
6408 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6409 while (!list_empty(&tgt->sess_works_list)) {
6410 struct qla_tgt_sess_work_param *prm = list_entry(
6411 tgt->sess_works_list.next, typeof(*prm),
6412 sess_works_list_entry);
6413
6414 /*
6415 * This work can be scheduled on several CPUs at time, so we
6416 * must delete the entry to eliminate double processing
6417 */
6418 list_del(&prm->sess_works_list_entry);
6419
6420 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6421
6422 switch (prm->type) {
6423 case QLA_TGT_SESS_WORK_ABORT:
6424 qlt_abort_work(tgt, prm);
6425 break;
6426 case QLA_TGT_SESS_WORK_TM:
6427 qlt_tmr_work(tgt, prm);
6428 break;
6429 default:
6430 BUG_ON(1);
6431 break;
6432 }
6433
6434 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6435
6436 kfree(prm);
6437 }
6438 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6439 }
6440
6441 /* Must be called under tgt_host_action_mutex */
qlt_add_target(struct qla_hw_data * ha,struct scsi_qla_host * base_vha)6442 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6443 {
6444 struct qla_tgt *tgt;
6445 int rc, i;
6446 struct qla_qpair_hint *h;
6447
6448 if (!QLA_TGT_MODE_ENABLED())
6449 return 0;
6450
6451 if (!IS_TGT_MODE_CAPABLE(ha)) {
6452 ql_log(ql_log_warn, base_vha, 0xe070,
6453 "This adapter does not support target mode.\n");
6454 return 0;
6455 }
6456
6457 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6458 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6459
6460 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6461
6462 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6463 if (!tgt) {
6464 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6465 "Unable to allocate struct qla_tgt\n");
6466 return -ENOMEM;
6467 }
6468
6469 tgt->qphints = kcalloc(ha->max_qpairs + 1,
6470 sizeof(struct qla_qpair_hint),
6471 GFP_KERNEL);
6472 if (!tgt->qphints) {
6473 kfree(tgt);
6474 ql_log(ql_log_warn, base_vha, 0x0197,
6475 "Unable to allocate qpair hints.\n");
6476 return -ENOMEM;
6477 }
6478
6479 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6480 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6481
6482 rc = btree_init64(&tgt->lun_qpair_map);
6483 if (rc) {
6484 kfree(tgt->qphints);
6485 kfree(tgt);
6486 ql_log(ql_log_info, base_vha, 0x0198,
6487 "Unable to initialize lun_qpair_map btree\n");
6488 return -EIO;
6489 }
6490 h = &tgt->qphints[0];
6491 h->qpair = ha->base_qpair;
6492 INIT_LIST_HEAD(&h->hint_elem);
6493 h->cpuid = ha->base_qpair->cpuid;
6494 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6495
6496 for (i = 0; i < ha->max_qpairs; i++) {
6497 unsigned long flags;
6498
6499 struct qla_qpair *qpair = ha->queue_pair_map[i];
6500
6501 h = &tgt->qphints[i + 1];
6502 INIT_LIST_HEAD(&h->hint_elem);
6503 if (qpair) {
6504 h->qpair = qpair;
6505 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6506 list_add_tail(&h->hint_elem, &qpair->hints_list);
6507 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6508 h->cpuid = qpair->cpuid;
6509 }
6510 }
6511
6512 tgt->ha = ha;
6513 tgt->vha = base_vha;
6514 init_waitqueue_head(&tgt->waitQ);
6515 INIT_LIST_HEAD(&tgt->del_sess_list);
6516 spin_lock_init(&tgt->sess_work_lock);
6517 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6518 INIT_LIST_HEAD(&tgt->sess_works_list);
6519 atomic_set(&tgt->tgt_global_resets_count, 0);
6520
6521 base_vha->vha_tgt.qla_tgt = tgt;
6522
6523 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6524 "qla_target(%d): using 64 Bit PCI addressing",
6525 base_vha->vp_idx);
6526 /* 3 is reserved */
6527 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6528
6529 mutex_lock(&qla_tgt_mutex);
6530 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6531 mutex_unlock(&qla_tgt_mutex);
6532
6533 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6534 ha->tgt.tgt_ops->add_target(base_vha);
6535
6536 return 0;
6537 }
6538
6539 /* Must be called under tgt_host_action_mutex */
qlt_remove_target(struct qla_hw_data * ha,struct scsi_qla_host * vha)6540 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6541 {
6542 if (!vha->vha_tgt.qla_tgt)
6543 return 0;
6544
6545 if (vha->fc_vport) {
6546 qlt_release(vha->vha_tgt.qla_tgt);
6547 return 0;
6548 }
6549
6550 /* free left over qfull cmds */
6551 qlt_init_term_exchange(vha);
6552
6553 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6554 vha->host_no, ha);
6555 qlt_release(vha->vha_tgt.qla_tgt);
6556
6557 return 0;
6558 }
6559
qla_remove_hostmap(struct qla_hw_data * ha)6560 void qla_remove_hostmap(struct qla_hw_data *ha)
6561 {
6562 struct scsi_qla_host *node;
6563 u32 key = 0;
6564
6565 btree_for_each_safe32(&ha->host_map, key, node)
6566 btree_remove32(&ha->host_map, key);
6567
6568 btree_destroy32(&ha->host_map);
6569 }
6570
qlt_lport_dump(struct scsi_qla_host * vha,u64 wwpn,unsigned char * b)6571 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6572 unsigned char *b)
6573 {
6574 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6575 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6576 put_unaligned_be64(wwpn, b);
6577 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6578 }
6579
6580 /**
6581 * qlt_lport_register - register lport with external module
6582 *
6583 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6584 * @phys_wwpn: physical port WWPN
6585 * @npiv_wwpn: NPIV WWPN
6586 * @npiv_wwnn: NPIV WWNN
6587 * @callback: lport initialization callback for tcm_qla2xxx code
6588 */
qlt_lport_register(void * target_lport_ptr,u64 phys_wwpn,u64 npiv_wwpn,u64 npiv_wwnn,int (* callback)(struct scsi_qla_host *,void *,u64,u64))6589 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6590 u64 npiv_wwpn, u64 npiv_wwnn,
6591 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6592 {
6593 struct qla_tgt *tgt;
6594 struct scsi_qla_host *vha;
6595 struct qla_hw_data *ha;
6596 struct Scsi_Host *host;
6597 unsigned long flags;
6598 int rc;
6599 u8 b[WWN_SIZE];
6600
6601 mutex_lock(&qla_tgt_mutex);
6602 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6603 vha = tgt->vha;
6604 ha = vha->hw;
6605
6606 host = vha->host;
6607 if (!host)
6608 continue;
6609
6610 if (!(host->hostt->supported_mode & MODE_TARGET))
6611 continue;
6612
6613 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6614 continue;
6615
6616 spin_lock_irqsave(&ha->hardware_lock, flags);
6617 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6618 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6619 host->host_no);
6620 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6621 continue;
6622 }
6623 if (tgt->tgt_stop) {
6624 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6625 host->host_no);
6626 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6627 continue;
6628 }
6629 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6630
6631 if (!scsi_host_get(host)) {
6632 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6633 "Unable to scsi_host_get() for"
6634 " qla2xxx scsi_host\n");
6635 continue;
6636 }
6637 qlt_lport_dump(vha, phys_wwpn, b);
6638
6639 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6640 scsi_host_put(host);
6641 continue;
6642 }
6643 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6644 if (rc != 0)
6645 scsi_host_put(host);
6646
6647 mutex_unlock(&qla_tgt_mutex);
6648 return rc;
6649 }
6650 mutex_unlock(&qla_tgt_mutex);
6651
6652 return -ENODEV;
6653 }
6654 EXPORT_SYMBOL(qlt_lport_register);
6655
6656 /**
6657 * qlt_lport_deregister - Degister lport
6658 *
6659 * @vha: Registered scsi_qla_host pointer
6660 */
qlt_lport_deregister(struct scsi_qla_host * vha)6661 void qlt_lport_deregister(struct scsi_qla_host *vha)
6662 {
6663 struct qla_hw_data *ha = vha->hw;
6664 struct Scsi_Host *sh = vha->host;
6665 /*
6666 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6667 */
6668 vha->vha_tgt.target_lport_ptr = NULL;
6669 ha->tgt.tgt_ops = NULL;
6670 /*
6671 * Release the Scsi_Host reference for the underlying qla2xxx host
6672 */
6673 scsi_host_put(sh);
6674 }
6675 EXPORT_SYMBOL(qlt_lport_deregister);
6676
6677 /* Must be called under HW lock */
qlt_set_mode(struct scsi_qla_host * vha)6678 void qlt_set_mode(struct scsi_qla_host *vha)
6679 {
6680 switch (vha->qlini_mode) {
6681 case QLA2XXX_INI_MODE_DISABLED:
6682 case QLA2XXX_INI_MODE_EXCLUSIVE:
6683 vha->host->active_mode = MODE_TARGET;
6684 break;
6685 case QLA2XXX_INI_MODE_ENABLED:
6686 vha->host->active_mode = MODE_INITIATOR;
6687 break;
6688 case QLA2XXX_INI_MODE_DUAL:
6689 vha->host->active_mode = MODE_DUAL;
6690 break;
6691 default:
6692 break;
6693 }
6694 }
6695
6696 /* Must be called under HW lock */
qlt_clear_mode(struct scsi_qla_host * vha)6697 static void qlt_clear_mode(struct scsi_qla_host *vha)
6698 {
6699 switch (vha->qlini_mode) {
6700 case QLA2XXX_INI_MODE_DISABLED:
6701 vha->host->active_mode = MODE_UNKNOWN;
6702 break;
6703 case QLA2XXX_INI_MODE_EXCLUSIVE:
6704 vha->host->active_mode = MODE_INITIATOR;
6705 break;
6706 case QLA2XXX_INI_MODE_ENABLED:
6707 case QLA2XXX_INI_MODE_DUAL:
6708 vha->host->active_mode = MODE_INITIATOR;
6709 break;
6710 default:
6711 break;
6712 }
6713 }
6714
6715 /*
6716 * qla_tgt_enable_vha - NO LOCK HELD
6717 *
6718 * host_reset, bring up w/ Target Mode Enabled
6719 */
6720 void
qlt_enable_vha(struct scsi_qla_host * vha)6721 qlt_enable_vha(struct scsi_qla_host *vha)
6722 {
6723 struct qla_hw_data *ha = vha->hw;
6724 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6725 unsigned long flags;
6726 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6727
6728 if (!tgt) {
6729 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6730 "Unable to locate qla_tgt pointer from"
6731 " struct qla_hw_data\n");
6732 dump_stack();
6733 return;
6734 }
6735 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6736 return;
6737
6738 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6739 ha->tgt.num_act_qpairs = ha->max_qpairs;
6740 spin_lock_irqsave(&ha->hardware_lock, flags);
6741 tgt->tgt_stopped = 0;
6742 qlt_set_mode(vha);
6743 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6744
6745 mutex_lock(&ha->optrom_mutex);
6746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6747 "%s.\n", __func__);
6748 if (vha->vp_idx) {
6749 qla24xx_disable_vp(vha);
6750 qla24xx_enable_vp(vha);
6751 } else {
6752 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6753 qla2xxx_wake_dpc(base_vha);
6754 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6755 QLA_SUCCESS);
6756 }
6757 mutex_unlock(&ha->optrom_mutex);
6758 }
6759 EXPORT_SYMBOL(qlt_enable_vha);
6760
6761 /*
6762 * qla_tgt_disable_vha - NO LOCK HELD
6763 *
6764 * Disable Target Mode and reset the adapter
6765 */
qlt_disable_vha(struct scsi_qla_host * vha)6766 static void qlt_disable_vha(struct scsi_qla_host *vha)
6767 {
6768 struct qla_hw_data *ha = vha->hw;
6769 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6770 unsigned long flags;
6771
6772 if (!tgt) {
6773 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6774 "Unable to locate qla_tgt pointer from"
6775 " struct qla_hw_data\n");
6776 dump_stack();
6777 return;
6778 }
6779
6780 spin_lock_irqsave(&ha->hardware_lock, flags);
6781 qlt_clear_mode(vha);
6782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6783
6784 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6785 qla2xxx_wake_dpc(vha);
6786
6787 /*
6788 * We are expecting the offline state.
6789 * QLA_FUNCTION_FAILED means that adapter is offline.
6790 */
6791 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6792 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6793 "adapter is offline\n");
6794 }
6795
6796 /*
6797 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6798 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6799 * members.
6800 */
6801 void
qlt_vport_create(struct scsi_qla_host * vha,struct qla_hw_data * ha)6802 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6803 {
6804 vha->vha_tgt.qla_tgt = NULL;
6805
6806 mutex_init(&vha->vha_tgt.tgt_mutex);
6807 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6808
6809 qlt_clear_mode(vha);
6810
6811 /*
6812 * NOTE: Currently the value is kept the same for <24xx and
6813 * >=24xx ISPs. If it is necessary to change it,
6814 * the check should be added for specific ISPs,
6815 * assigning the value appropriately.
6816 */
6817 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6818
6819 qlt_add_target(ha, vha);
6820 }
6821
6822 u8
qlt_rff_id(struct scsi_qla_host * vha)6823 qlt_rff_id(struct scsi_qla_host *vha)
6824 {
6825 u8 fc4_feature = 0;
6826 /*
6827 * FC-4 Feature bit 0 indicates target functionality to the name server.
6828 */
6829 if (qla_tgt_mode_enabled(vha)) {
6830 fc4_feature = BIT_0;
6831 } else if (qla_ini_mode_enabled(vha)) {
6832 fc4_feature = BIT_1;
6833 } else if (qla_dual_mode_enabled(vha))
6834 fc4_feature = BIT_0 | BIT_1;
6835
6836 return fc4_feature;
6837 }
6838
6839 /*
6840 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6841 * @ha: HA context
6842 *
6843 * Beginning of ATIO ring has initialization control block already built
6844 * by nvram config routine.
6845 *
6846 * Returns 0 on success.
6847 */
6848 void
qlt_init_atio_q_entries(struct scsi_qla_host * vha)6849 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6850 {
6851 struct qla_hw_data *ha = vha->hw;
6852 uint16_t cnt;
6853 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6854
6855 if (qla_ini_mode_enabled(vha))
6856 return;
6857
6858 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6859 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6860 pkt++;
6861 }
6862
6863 }
6864
6865 /*
6866 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6867 * @ha: SCSI driver HA context
6868 */
6869 void
qlt_24xx_process_atio_queue(struct scsi_qla_host * vha,uint8_t ha_locked)6870 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6871 {
6872 struct qla_hw_data *ha = vha->hw;
6873 struct atio_from_isp *pkt;
6874 int cnt, i;
6875
6876 if (!ha->flags.fw_started)
6877 return;
6878
6879 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6880 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6881 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6882 cnt = pkt->u.raw.entry_count;
6883
6884 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6885 /*
6886 * This packet is corrupted. The header + payload
6887 * can not be trusted. There is no point in passing
6888 * it further up.
6889 */
6890 ql_log(ql_log_warn, vha, 0xd03c,
6891 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6892 &pkt->u.isp24.fcp_hdr.s_id,
6893 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6894 pkt->u.isp24.exchange_addr, pkt);
6895
6896 adjust_corrupted_atio(pkt);
6897 qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6898 ha_locked, 0);
6899 } else {
6900 qlt_24xx_atio_pkt_all_vps(vha,
6901 (struct atio_from_isp *)pkt, ha_locked);
6902 }
6903
6904 for (i = 0; i < cnt; i++) {
6905 ha->tgt.atio_ring_index++;
6906 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6907 ha->tgt.atio_ring_index = 0;
6908 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6909 } else
6910 ha->tgt.atio_ring_ptr++;
6911
6912 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6913 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6914 }
6915 wmb();
6916 }
6917
6918 /* Adjust ring index */
6919 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6920 }
6921
6922 void
qlt_24xx_config_rings(struct scsi_qla_host * vha)6923 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6924 {
6925 struct qla_hw_data *ha = vha->hw;
6926 struct qla_msix_entry *msix = &ha->msix_entries[2];
6927 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6928
6929 if (!QLA_TGT_MODE_ENABLED())
6930 return;
6931
6932 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6933 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6934 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6935
6936 if (ha->flags.msix_enabled) {
6937 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6938 icb->msix_atio = cpu_to_le16(msix->entry);
6939 icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
6940 ql_dbg(ql_dbg_init, vha, 0xf072,
6941 "Registering ICB vector 0x%x for atio que.\n",
6942 msix->entry);
6943 }
6944 } else {
6945 /* INTx|MSI */
6946 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6947 icb->msix_atio = 0;
6948 icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6949 ql_dbg(ql_dbg_init, vha, 0xf072,
6950 "%s: Use INTx for ATIOQ.\n", __func__);
6951 }
6952 }
6953 }
6954
6955 void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_24xx * nv)6956 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6957 {
6958 struct qla_hw_data *ha = vha->hw;
6959 u32 tmp;
6960
6961 if (!QLA_TGT_MODE_ENABLED())
6962 return;
6963
6964 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6965 if (!ha->tgt.saved_set) {
6966 /* We save only once */
6967 ha->tgt.saved_exchange_count = nv->exchange_count;
6968 ha->tgt.saved_firmware_options_1 =
6969 nv->firmware_options_1;
6970 ha->tgt.saved_firmware_options_2 =
6971 nv->firmware_options_2;
6972 ha->tgt.saved_firmware_options_3 =
6973 nv->firmware_options_3;
6974 ha->tgt.saved_set = 1;
6975 }
6976
6977 if (qla_tgt_mode_enabled(vha))
6978 nv->exchange_count = cpu_to_le16(0xFFFF);
6979 else /* dual */
6980 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6981
6982 /* Enable target mode */
6983 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6984
6985 /* Disable ini mode, if requested */
6986 if (qla_tgt_mode_enabled(vha))
6987 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6988
6989 /* Disable Full Login after LIP */
6990 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6991 /* Enable initial LIP */
6992 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6993 if (ql2xtgt_tape_enable)
6994 /* Enable FC Tape support */
6995 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6996 else
6997 /* Disable FC Tape support */
6998 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6999
7000 /* Disable Full Login after LIP */
7001 nv->host_p &= cpu_to_le32(~BIT_10);
7002
7003 /*
7004 * clear BIT 15 explicitly as we have seen at least
7005 * a couple of instances where this was set and this
7006 * was causing the firmware to not be initialized.
7007 */
7008 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7009 /* Enable target PRLI control */
7010 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7011
7012 if (IS_QLA25XX(ha)) {
7013 /* Change Loop-prefer to Pt-Pt */
7014 tmp = ~(BIT_4|BIT_5|BIT_6);
7015 nv->firmware_options_2 &= cpu_to_le32(tmp);
7016 tmp = P2P << 4;
7017 nv->firmware_options_2 |= cpu_to_le32(tmp);
7018 }
7019 } else {
7020 if (ha->tgt.saved_set) {
7021 nv->exchange_count = ha->tgt.saved_exchange_count;
7022 nv->firmware_options_1 =
7023 ha->tgt.saved_firmware_options_1;
7024 nv->firmware_options_2 =
7025 ha->tgt.saved_firmware_options_2;
7026 nv->firmware_options_3 =
7027 ha->tgt.saved_firmware_options_3;
7028 }
7029 return;
7030 }
7031
7032 if (ha->base_qpair->enable_class_2) {
7033 if (vha->flags.init_done)
7034 fc_host_supported_classes(vha->host) =
7035 FC_COS_CLASS2 | FC_COS_CLASS3;
7036
7037 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7038 } else {
7039 if (vha->flags.init_done)
7040 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7041
7042 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7043 }
7044 }
7045
7046 void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_24xx * icb)7047 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
7048 struct init_cb_24xx *icb)
7049 {
7050 struct qla_hw_data *ha = vha->hw;
7051
7052 if (!QLA_TGT_MODE_ENABLED())
7053 return;
7054
7055 if (ha->tgt.node_name_set) {
7056 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7057 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7058 }
7059 }
7060
7061 void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_81xx * nv)7062 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
7063 {
7064 struct qla_hw_data *ha = vha->hw;
7065 u32 tmp;
7066
7067 if (!QLA_TGT_MODE_ENABLED())
7068 return;
7069
7070 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7071 if (!ha->tgt.saved_set) {
7072 /* We save only once */
7073 ha->tgt.saved_exchange_count = nv->exchange_count;
7074 ha->tgt.saved_firmware_options_1 =
7075 nv->firmware_options_1;
7076 ha->tgt.saved_firmware_options_2 =
7077 nv->firmware_options_2;
7078 ha->tgt.saved_firmware_options_3 =
7079 nv->firmware_options_3;
7080 ha->tgt.saved_set = 1;
7081 }
7082
7083 if (qla_tgt_mode_enabled(vha))
7084 nv->exchange_count = cpu_to_le16(0xFFFF);
7085 else /* dual */
7086 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7087
7088 /* Enable target mode */
7089 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
7090
7091 /* Disable ini mode, if requested */
7092 if (qla_tgt_mode_enabled(vha))
7093 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
7094 /* Disable Full Login after LIP */
7095 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7096 /* Enable initial LIP */
7097 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
7098 /*
7099 * clear BIT 15 explicitly as we have seen at
7100 * least a couple of instances where this was set
7101 * and this was causing the firmware to not be
7102 * initialized.
7103 */
7104 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7105 if (ql2xtgt_tape_enable)
7106 /* Enable FC tape support */
7107 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7108 else
7109 /* Disable FC tape support */
7110 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
7111
7112 /* Disable Full Login after LIP */
7113 nv->host_p &= cpu_to_le32(~BIT_10);
7114 /* Enable target PRLI control */
7115 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7116
7117 /* Change Loop-prefer to Pt-Pt */
7118 tmp = ~(BIT_4|BIT_5|BIT_6);
7119 nv->firmware_options_2 &= cpu_to_le32(tmp);
7120 tmp = P2P << 4;
7121 nv->firmware_options_2 |= cpu_to_le32(tmp);
7122 } else {
7123 if (ha->tgt.saved_set) {
7124 nv->exchange_count = ha->tgt.saved_exchange_count;
7125 nv->firmware_options_1 =
7126 ha->tgt.saved_firmware_options_1;
7127 nv->firmware_options_2 =
7128 ha->tgt.saved_firmware_options_2;
7129 nv->firmware_options_3 =
7130 ha->tgt.saved_firmware_options_3;
7131 }
7132 return;
7133 }
7134
7135 if (ha->base_qpair->enable_class_2) {
7136 if (vha->flags.init_done)
7137 fc_host_supported_classes(vha->host) =
7138 FC_COS_CLASS2 | FC_COS_CLASS3;
7139
7140 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7141 } else {
7142 if (vha->flags.init_done)
7143 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7144
7145 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7146 }
7147 }
7148
7149 void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_81xx * icb)7150 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7151 struct init_cb_81xx *icb)
7152 {
7153 struct qla_hw_data *ha = vha->hw;
7154
7155 if (!QLA_TGT_MODE_ENABLED())
7156 return;
7157
7158 if (ha->tgt.node_name_set) {
7159 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7160 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7161 }
7162 }
7163
7164 void
qlt_83xx_iospace_config(struct qla_hw_data * ha)7165 qlt_83xx_iospace_config(struct qla_hw_data *ha)
7166 {
7167 if (!QLA_TGT_MODE_ENABLED())
7168 return;
7169
7170 ha->msix_count += 1; /* For ATIO Q */
7171 }
7172
7173
7174 void
qlt_modify_vp_config(struct scsi_qla_host * vha,struct vp_config_entry_24xx * vpmod)7175 qlt_modify_vp_config(struct scsi_qla_host *vha,
7176 struct vp_config_entry_24xx *vpmod)
7177 {
7178 /* enable target mode. Bit5 = 1 => disable */
7179 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7180 vpmod->options_idx1 &= ~BIT_5;
7181
7182 /* Disable ini mode, if requested. bit4 = 1 => disable */
7183 if (qla_tgt_mode_enabled(vha))
7184 vpmod->options_idx1 &= ~BIT_4;
7185 }
7186
7187 void
qlt_probe_one_stage1(struct scsi_qla_host * base_vha,struct qla_hw_data * ha)7188 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7189 {
7190 mutex_init(&base_vha->vha_tgt.tgt_mutex);
7191 if (!QLA_TGT_MODE_ENABLED())
7192 return;
7193
7194 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
7195 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7196 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7197 } else {
7198 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7199 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7200 }
7201
7202 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7203
7204 INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7205 INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7206 qlt_unknown_atio_work_fn);
7207
7208 qlt_clear_mode(base_vha);
7209
7210 qlt_update_vp_map(base_vha, SET_VP_IDX);
7211 }
7212
7213 irqreturn_t
qla83xx_msix_atio_q(int irq,void * dev_id)7214 qla83xx_msix_atio_q(int irq, void *dev_id)
7215 {
7216 struct rsp_que *rsp;
7217 scsi_qla_host_t *vha;
7218 struct qla_hw_data *ha;
7219 unsigned long flags;
7220
7221 rsp = (struct rsp_que *) dev_id;
7222 ha = rsp->hw;
7223 vha = pci_get_drvdata(ha->pdev);
7224
7225 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7226
7227 qlt_24xx_process_atio_queue(vha, 0);
7228
7229 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7230
7231 return IRQ_HANDLED;
7232 }
7233
7234 static void
qlt_handle_abts_recv_work(struct work_struct * work)7235 qlt_handle_abts_recv_work(struct work_struct *work)
7236 {
7237 struct qla_tgt_sess_op *op = container_of(work,
7238 struct qla_tgt_sess_op, work);
7239 scsi_qla_host_t *vha = op->vha;
7240 struct qla_hw_data *ha = vha->hw;
7241 unsigned long flags;
7242
7243 if (qla2x00_reset_active(vha) ||
7244 (op->chip_reset != ha->base_qpair->chip_reset))
7245 return;
7246
7247 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7248 qlt_24xx_process_atio_queue(vha, 0);
7249 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7250
7251 spin_lock_irqsave(&ha->hardware_lock, flags);
7252 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7253 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7254
7255 kfree(op);
7256 }
7257
7258 void
qlt_handle_abts_recv(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)7259 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7260 response_t *pkt)
7261 {
7262 struct qla_tgt_sess_op *op;
7263
7264 op = kzalloc(sizeof(*op), GFP_ATOMIC);
7265
7266 if (!op) {
7267 /* do not reach for ATIO queue here. This is best effort err
7268 * recovery at this point.
7269 */
7270 qlt_response_pkt_all_vps(vha, rsp, pkt);
7271 return;
7272 }
7273
7274 memcpy(&op->atio, pkt, sizeof(*pkt));
7275 op->vha = vha;
7276 op->chip_reset = vha->hw->base_qpair->chip_reset;
7277 op->rsp = rsp;
7278 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7279 queue_work(qla_tgt_wq, &op->work);
7280 return;
7281 }
7282
7283 int
qlt_mem_alloc(struct qla_hw_data * ha)7284 qlt_mem_alloc(struct qla_hw_data *ha)
7285 {
7286 if (!QLA_TGT_MODE_ENABLED())
7287 return 0;
7288
7289 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7290 sizeof(struct qla_tgt_vp_map),
7291 GFP_KERNEL);
7292 if (!ha->tgt.tgt_vp_map)
7293 return -ENOMEM;
7294
7295 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7296 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7297 &ha->tgt.atio_dma, GFP_KERNEL);
7298 if (!ha->tgt.atio_ring) {
7299 kfree(ha->tgt.tgt_vp_map);
7300 return -ENOMEM;
7301 }
7302 return 0;
7303 }
7304
7305 void
qlt_mem_free(struct qla_hw_data * ha)7306 qlt_mem_free(struct qla_hw_data *ha)
7307 {
7308 if (!QLA_TGT_MODE_ENABLED())
7309 return;
7310
7311 if (ha->tgt.atio_ring) {
7312 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7313 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7314 ha->tgt.atio_dma);
7315 }
7316 ha->tgt.atio_ring = NULL;
7317 ha->tgt.atio_dma = 0;
7318 kfree(ha->tgt.tgt_vp_map);
7319 ha->tgt.tgt_vp_map = NULL;
7320 }
7321
7322 /* vport_slock to be held by the caller */
7323 void
qlt_update_vp_map(struct scsi_qla_host * vha,int cmd)7324 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7325 {
7326 void *slot;
7327 u32 key;
7328 int rc;
7329
7330 key = vha->d_id.b24;
7331
7332 switch (cmd) {
7333 case SET_VP_IDX:
7334 if (!QLA_TGT_MODE_ENABLED())
7335 return;
7336 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7337 break;
7338 case SET_AL_PA:
7339 slot = btree_lookup32(&vha->hw->host_map, key);
7340 if (!slot) {
7341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7342 "Save vha in host_map %p %06x\n", vha, key);
7343 rc = btree_insert32(&vha->hw->host_map,
7344 key, vha, GFP_ATOMIC);
7345 if (rc)
7346 ql_log(ql_log_info, vha, 0xd03e,
7347 "Unable to insert s_id into host_map: %06x\n",
7348 key);
7349 return;
7350 }
7351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7352 "replace existing vha in host_map %p %06x\n", vha, key);
7353 btree_update32(&vha->hw->host_map, key, vha);
7354 break;
7355 case RESET_VP_IDX:
7356 if (!QLA_TGT_MODE_ENABLED())
7357 return;
7358 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7359 break;
7360 case RESET_AL_PA:
7361 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7362 "clear vha in host_map %p %06x\n", vha, key);
7363 slot = btree_lookup32(&vha->hw->host_map, key);
7364 if (slot)
7365 btree_remove32(&vha->hw->host_map, key);
7366 vha->d_id.b24 = 0;
7367 break;
7368 }
7369 }
7370
qlt_update_host_map(struct scsi_qla_host * vha,port_id_t id)7371 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7372 {
7373
7374 if (!vha->d_id.b24) {
7375 vha->d_id = id;
7376 qlt_update_vp_map(vha, SET_AL_PA);
7377 } else if (vha->d_id.b24 != id.b24) {
7378 qlt_update_vp_map(vha, RESET_AL_PA);
7379 vha->d_id = id;
7380 qlt_update_vp_map(vha, SET_AL_PA);
7381 }
7382 }
7383
qlt_parse_ini_mode(void)7384 static int __init qlt_parse_ini_mode(void)
7385 {
7386 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7387 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7388 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7389 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7390 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7391 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7392 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7393 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7394 else
7395 return false;
7396
7397 return true;
7398 }
7399
qlt_init(void)7400 int __init qlt_init(void)
7401 {
7402 int ret;
7403
7404 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7405 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7406
7407 if (!qlt_parse_ini_mode()) {
7408 ql_log(ql_log_fatal, NULL, 0xe06b,
7409 "qlt_parse_ini_mode() failed\n");
7410 return -EINVAL;
7411 }
7412
7413 if (!QLA_TGT_MODE_ENABLED())
7414 return 0;
7415
7416 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7417 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7418 qla_tgt_mgmt_cmd), 0, NULL);
7419 if (!qla_tgt_mgmt_cmd_cachep) {
7420 ql_log(ql_log_fatal, NULL, 0xd04b,
7421 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7422 return -ENOMEM;
7423 }
7424
7425 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7426 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7427 0, NULL);
7428
7429 if (!qla_tgt_plogi_cachep) {
7430 ql_log(ql_log_fatal, NULL, 0xe06d,
7431 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7432 ret = -ENOMEM;
7433 goto out_mgmt_cmd_cachep;
7434 }
7435
7436 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7437 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7438 if (!qla_tgt_mgmt_cmd_mempool) {
7439 ql_log(ql_log_fatal, NULL, 0xe06e,
7440 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7441 ret = -ENOMEM;
7442 goto out_plogi_cachep;
7443 }
7444
7445 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7446 if (!qla_tgt_wq) {
7447 ql_log(ql_log_fatal, NULL, 0xe06f,
7448 "alloc_workqueue for qla_tgt_wq failed\n");
7449 ret = -ENOMEM;
7450 goto out_cmd_mempool;
7451 }
7452 /*
7453 * Return 1 to signal that initiator-mode is being disabled
7454 */
7455 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7456
7457 out_cmd_mempool:
7458 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7459 out_plogi_cachep:
7460 kmem_cache_destroy(qla_tgt_plogi_cachep);
7461 out_mgmt_cmd_cachep:
7462 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7463 return ret;
7464 }
7465
qlt_exit(void)7466 void qlt_exit(void)
7467 {
7468 if (!QLA_TGT_MODE_ENABLED())
7469 return;
7470
7471 destroy_workqueue(qla_tgt_wq);
7472 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7473 kmem_cache_destroy(qla_tgt_plogi_cachep);
7474 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7475 }
7476