1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 * based on qla2x00t.c code:
6 *
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 * Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30
31 #include "qla_def.h"
32 #include "qla_target.h"
33
34 static int ql2xtgt_tape_enable;
35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(ql2xtgt_tape_enable,
37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
38
39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
40 module_param(qlini_mode, charp, S_IRUGO);
41 MODULE_PARM_DESC(qlini_mode,
42 "Determines when initiator mode will be enabled. Possible values: "
43 "\"exclusive\" - initiator mode will be enabled on load, "
44 "disabled on enabling target mode and then on disabling target mode "
45 "enabled back; "
46 "\"disabled\" - initiator mode will never be enabled; "
47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
48 "when ready "
49 "\"enabled\" (default) - initiator mode will always stay enabled.");
50
51 int ql2xuctrlirq = 1;
52 module_param(ql2xuctrlirq, int, 0644);
53 MODULE_PARM_DESC(ql2xuctrlirq,
54 "User to control IRQ placement via smp_affinity."
55 "Valid with qlini_mode=disabled."
56 "1(default): enable");
57
58 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
59
60 static int qla_sam_status = SAM_STAT_BUSY;
61 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
62
63 /*
64 * From scsi/fc/fc_fcp.h
65 */
66 enum fcp_resp_rsp_codes {
67 FCP_TMF_CMPL = 0,
68 FCP_DATA_LEN_INVALID = 1,
69 FCP_CMND_FIELDS_INVALID = 2,
70 FCP_DATA_PARAM_MISMATCH = 3,
71 FCP_TMF_REJECTED = 4,
72 FCP_TMF_FAILED = 5,
73 FCP_TMF_INVALID_LUN = 9,
74 };
75
76 /*
77 * fc_pri_ta from scsi/fc/fc_fcp.h
78 */
79 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
80 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
81 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
82 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
83 #define FCP_PTA_MASK 7 /* mask for task attribute field */
84 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
85 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
86
87 /*
88 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
89 * must be called under HW lock and could unlock/lock it inside.
90 * It isn't an issue, since in the current implementation on the time when
91 * those functions are called:
92 *
93 * - Either context is IRQ and only IRQ handler can modify HW data,
94 * including rings related fields,
95 *
96 * - Or access to target mode variables from struct qla_tgt doesn't
97 * cross those functions boundaries, except tgt_stop, which
98 * additionally protected by irq_cmd_count.
99 */
100 /* Predefs for callbacks handed to qla2xxx LLD */
101 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
102 struct atio_from_isp *pkt, uint8_t);
103 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
104 response_t *pkt);
105 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
110 struct atio_from_isp *atio, uint16_t status, int qfull);
111 static void qlt_disable_vha(struct scsi_qla_host *vha);
112 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
113 static void qlt_send_notify_ack(struct qla_qpair *qpair,
114 struct imm_ntfy_from_isp *ntfy,
115 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
116 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *imm, int ha_locked);
119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
120 fc_port_t *fcport, bool local);
121 void qlt_unreg_sess(struct fc_port *sess);
122 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
123 struct abts_recv_from_24xx *);
124 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
125 uint16_t);
126 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
127 static inline uint32_t qlt_make_handle(struct qla_qpair *);
128
129 /*
130 * Global Variables
131 */
132 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
133 struct kmem_cache *qla_tgt_plogi_cachep;
134 static mempool_t *qla_tgt_mgmt_cmd_mempool;
135 static struct workqueue_struct *qla_tgt_wq;
136 static DEFINE_MUTEX(qla_tgt_mutex);
137 static LIST_HEAD(qla_tgt_glist);
138
prot_op_str(u32 prot_op)139 static const char *prot_op_str(u32 prot_op)
140 {
141 switch (prot_op) {
142 case TARGET_PROT_NORMAL: return "NORMAL";
143 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
144 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
145 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
146 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
147 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
148 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
149 default: return "UNKNOWN";
150 }
151 }
152
153 /* This API intentionally takes dest as a parameter, rather than returning
154 * int value to avoid caller forgetting to issue wmb() after the store */
qlt_do_generation_tick(struct scsi_qla_host * vha,int * dest)155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
156 {
157 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
158 *dest = atomic_inc_return(&base_vha->generation_tick);
159 /* memory barrier */
160 wmb();
161 }
162
163 /* Might release hw lock, then reaquire!! */
qlt_issue_marker(struct scsi_qla_host * vha,int vha_locked)164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
165 {
166 /* Send marker if required */
167 if (unlikely(vha->marker_needed != 0)) {
168 int rc = qla2x00_issue_marker(vha, vha_locked);
169
170 if (rc != QLA_SUCCESS) {
171 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
172 "qla_target(%d): issue_marker() failed\n",
173 vha->vp_idx);
174 }
175 return rc;
176 }
177 return QLA_SUCCESS;
178 }
179
qla_find_host_by_d_id(struct scsi_qla_host * vha,be_id_t d_id)180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
181 be_id_t d_id)
182 {
183 struct scsi_qla_host *host;
184 uint32_t key;
185
186 if (vha->d_id.b.area == d_id.area &&
187 vha->d_id.b.domain == d_id.domain &&
188 vha->d_id.b.al_pa == d_id.al_pa)
189 return vha;
190
191 key = be_to_port_id(d_id).b24;
192
193 host = btree_lookup32(&vha->hw->host_map, key);
194 if (!host)
195 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
196 "Unable to find host %06x\n", key);
197
198 return host;
199 }
200
201 static inline
qlt_find_host_by_vp_idx(struct scsi_qla_host * vha,uint16_t vp_idx)202 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
203 uint16_t vp_idx)
204 {
205 struct qla_hw_data *ha = vha->hw;
206
207 if (vha->vp_idx == vp_idx)
208 return vha;
209
210 BUG_ON(ha->tgt.tgt_vp_map == NULL);
211 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
212 return ha->tgt.tgt_vp_map[vp_idx].vha;
213
214 return NULL;
215 }
216
qlt_incr_num_pend_cmds(struct scsi_qla_host * vha)217 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
218 {
219 unsigned long flags;
220
221 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
222
223 vha->hw->tgt.num_pend_cmds++;
224 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
225 vha->qla_stats.stat_max_pend_cmds =
226 vha->hw->tgt.num_pend_cmds;
227 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
228 }
qlt_decr_num_pend_cmds(struct scsi_qla_host * vha)229 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
230 {
231 unsigned long flags;
232
233 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
234 vha->hw->tgt.num_pend_cmds--;
235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
236 }
237
238
qlt_queue_unknown_atio(scsi_qla_host_t * vha,struct atio_from_isp * atio,uint8_t ha_locked)239 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
240 struct atio_from_isp *atio, uint8_t ha_locked)
241 {
242 struct qla_tgt_sess_op *u;
243 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
244 unsigned long flags;
245
246 if (tgt->tgt_stop) {
247 ql_dbg(ql_dbg_async, vha, 0x502c,
248 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
249 vha->vp_idx);
250 goto out_term;
251 }
252
253 u = kzalloc(sizeof(*u), GFP_ATOMIC);
254 if (u == NULL)
255 goto out_term;
256
257 u->vha = vha;
258 memcpy(&u->atio, atio, sizeof(*atio));
259 INIT_LIST_HEAD(&u->cmd_list);
260
261 spin_lock_irqsave(&vha->cmd_list_lock, flags);
262 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
263 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
264
265 schedule_delayed_work(&vha->unknown_atio_work, 1);
266
267 out:
268 return;
269
270 out_term:
271 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
272 goto out;
273 }
274
qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host * vha,uint8_t ha_locked)275 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
276 uint8_t ha_locked)
277 {
278 struct qla_tgt_sess_op *u, *t;
279 scsi_qla_host_t *host;
280 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
281 unsigned long flags;
282 uint8_t queued = 0;
283
284 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
285 if (u->aborted) {
286 ql_dbg(ql_dbg_async, vha, 0x502e,
287 "Freeing unknown %s %p, because of Abort\n",
288 "ATIO_TYPE7", u);
289 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
290 &u->atio, ha_locked, 0);
291 goto abort;
292 }
293
294 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
295 if (host != NULL) {
296 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
297 "Requeuing unknown ATIO_TYPE7 %p\n", u);
298 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
299 } else if (tgt->tgt_stop) {
300 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
301 "Freeing unknown %s %p, because tgt is being stopped\n",
302 "ATIO_TYPE7", u);
303 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
304 &u->atio, ha_locked, 0);
305 } else {
306 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
307 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
308 if (!queued) {
309 queued = 1;
310 schedule_delayed_work(&vha->unknown_atio_work,
311 1);
312 }
313 continue;
314 }
315
316 abort:
317 spin_lock_irqsave(&vha->cmd_list_lock, flags);
318 list_del(&u->cmd_list);
319 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
320 kfree(u);
321 }
322 }
323
qlt_unknown_atio_work_fn(struct work_struct * work)324 void qlt_unknown_atio_work_fn(struct work_struct *work)
325 {
326 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
327 struct scsi_qla_host, unknown_atio_work);
328
329 qlt_try_to_dequeue_unknown_atios(vha, 0);
330 }
331
qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)332 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
333 struct atio_from_isp *atio, uint8_t ha_locked)
334 {
335 ql_dbg(ql_dbg_tgt, vha, 0xe072,
336 "%s: qla_target(%d): type %x ox_id %04x\n",
337 __func__, vha->vp_idx, atio->u.raw.entry_type,
338 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
339
340 switch (atio->u.raw.entry_type) {
341 case ATIO_TYPE7:
342 {
343 struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
344 atio->u.isp24.fcp_hdr.d_id);
345 if (unlikely(NULL == host)) {
346 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
347 "qla_target(%d): Received ATIO_TYPE7 "
348 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
349 atio->u.isp24.fcp_hdr.d_id.domain,
350 atio->u.isp24.fcp_hdr.d_id.area,
351 atio->u.isp24.fcp_hdr.d_id.al_pa);
352
353
354 qlt_queue_unknown_atio(vha, atio, ha_locked);
355 break;
356 }
357 if (unlikely(!list_empty(&vha->unknown_atio_list)))
358 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
359
360 qlt_24xx_atio_pkt(host, atio, ha_locked);
361 break;
362 }
363
364 case IMMED_NOTIFY_TYPE:
365 {
366 struct scsi_qla_host *host = vha;
367 struct imm_ntfy_from_isp *entry =
368 (struct imm_ntfy_from_isp *)atio;
369
370 qlt_issue_marker(vha, ha_locked);
371
372 if ((entry->u.isp24.vp_index != 0xFF) &&
373 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
374 host = qlt_find_host_by_vp_idx(vha,
375 entry->u.isp24.vp_index);
376 if (unlikely(!host)) {
377 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
378 "qla_target(%d): Received "
379 "ATIO (IMMED_NOTIFY_TYPE) "
380 "with unknown vp_index %d\n",
381 vha->vp_idx, entry->u.isp24.vp_index);
382 break;
383 }
384 }
385 qlt_24xx_atio_pkt(host, atio, ha_locked);
386 break;
387 }
388
389 case VP_RPT_ID_IOCB_TYPE:
390 qla24xx_report_id_acquisition(vha,
391 (struct vp_rpt_id_entry_24xx *)atio);
392 break;
393
394 case ABTS_RECV_24XX:
395 {
396 struct abts_recv_from_24xx *entry =
397 (struct abts_recv_from_24xx *)atio;
398 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
399 entry->vp_index);
400 unsigned long flags;
401
402 if (unlikely(!host)) {
403 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
404 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
405 "received, with unknown vp_index %d\n",
406 vha->vp_idx, entry->vp_index);
407 break;
408 }
409 if (!ha_locked)
410 spin_lock_irqsave(&host->hw->hardware_lock, flags);
411 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
412 if (!ha_locked)
413 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
414 break;
415 }
416
417 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
418
419 default:
420 ql_dbg(ql_dbg_tgt, vha, 0xe040,
421 "qla_target(%d): Received unknown ATIO atio "
422 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
423 break;
424 }
425
426 return false;
427 }
428
qlt_response_pkt_all_vps(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)429 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
430 struct rsp_que *rsp, response_t *pkt)
431 {
432 switch (pkt->entry_type) {
433 case CTIO_CRC2:
434 ql_dbg(ql_dbg_tgt, vha, 0xe073,
435 "qla_target(%d):%s: CRC2 Response pkt\n",
436 vha->vp_idx, __func__);
437 fallthrough;
438 case CTIO_TYPE7:
439 {
440 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
441 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
442 entry->vp_index);
443 if (unlikely(!host)) {
444 ql_dbg(ql_dbg_tgt, vha, 0xe041,
445 "qla_target(%d): Response pkt (CTIO_TYPE7) "
446 "received, with unknown vp_index %d\n",
447 vha->vp_idx, entry->vp_index);
448 break;
449 }
450 qlt_response_pkt(host, rsp, pkt);
451 break;
452 }
453
454 case IMMED_NOTIFY_TYPE:
455 {
456 struct scsi_qla_host *host;
457 struct imm_ntfy_from_isp *entry =
458 (struct imm_ntfy_from_isp *)pkt;
459
460 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
461 if (unlikely(!host)) {
462 ql_dbg(ql_dbg_tgt, vha, 0xe042,
463 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
464 "received, with unknown vp_index %d\n",
465 vha->vp_idx, entry->u.isp24.vp_index);
466 break;
467 }
468 qlt_response_pkt(host, rsp, pkt);
469 break;
470 }
471
472 case NOTIFY_ACK_TYPE:
473 {
474 struct scsi_qla_host *host = vha;
475 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
476
477 if (0xFF != entry->u.isp24.vp_index) {
478 host = qlt_find_host_by_vp_idx(vha,
479 entry->u.isp24.vp_index);
480 if (unlikely(!host)) {
481 ql_dbg(ql_dbg_tgt, vha, 0xe043,
482 "qla_target(%d): Response "
483 "pkt (NOTIFY_ACK_TYPE) "
484 "received, with unknown "
485 "vp_index %d\n", vha->vp_idx,
486 entry->u.isp24.vp_index);
487 break;
488 }
489 }
490 qlt_response_pkt(host, rsp, pkt);
491 break;
492 }
493
494 case ABTS_RECV_24XX:
495 {
496 struct abts_recv_from_24xx *entry =
497 (struct abts_recv_from_24xx *)pkt;
498 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
499 entry->vp_index);
500 if (unlikely(!host)) {
501 ql_dbg(ql_dbg_tgt, vha, 0xe044,
502 "qla_target(%d): Response pkt "
503 "(ABTS_RECV_24XX) received, with unknown "
504 "vp_index %d\n", vha->vp_idx, entry->vp_index);
505 break;
506 }
507 qlt_response_pkt(host, rsp, pkt);
508 break;
509 }
510
511 case ABTS_RESP_24XX:
512 {
513 struct abts_resp_to_24xx *entry =
514 (struct abts_resp_to_24xx *)pkt;
515 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
516 entry->vp_index);
517 if (unlikely(!host)) {
518 ql_dbg(ql_dbg_tgt, vha, 0xe045,
519 "qla_target(%d): Response pkt "
520 "(ABTS_RECV_24XX) received, with unknown "
521 "vp_index %d\n", vha->vp_idx, entry->vp_index);
522 break;
523 }
524 qlt_response_pkt(host, rsp, pkt);
525 break;
526 }
527 default:
528 qlt_response_pkt(vha, rsp, pkt);
529 break;
530 }
531
532 }
533
534 /*
535 * All qlt_plogi_ack_t operations are protected by hardware_lock
536 */
qla24xx_post_nack_work(struct scsi_qla_host * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)537 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
538 struct imm_ntfy_from_isp *ntfy, int type)
539 {
540 struct qla_work_evt *e;
541
542 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
543 if (!e)
544 return QLA_FUNCTION_FAILED;
545
546 e->u.nack.fcport = fcport;
547 e->u.nack.type = type;
548 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
549 return qla2x00_post_work(vha, e);
550 }
551
qla2x00_async_nack_sp_done(srb_t * sp,int res)552 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
553 {
554 struct scsi_qla_host *vha = sp->vha;
555 unsigned long flags;
556
557 ql_dbg(ql_dbg_disc, vha, 0x20f2,
558 "Async done-%s res %x %8phC type %d\n",
559 sp->name, res, sp->fcport->port_name, sp->type);
560
561 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
562 sp->fcport->flags &= ~FCF_ASYNC_SENT;
563 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
564
565 switch (sp->type) {
566 case SRB_NACK_PLOGI:
567 sp->fcport->login_gen++;
568 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
569 sp->fcport->logout_on_delete = 1;
570 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
571 sp->fcport->send_els_logo = 0;
572
573 if (sp->fcport->flags & FCF_FCSP_DEVICE) {
574 ql_dbg(ql_dbg_edif, vha, 0x20ef,
575 "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
576 sp->fcport->port_name);
577 qla2x00_set_fcport_disc_state(sp->fcport,
578 DSC_LOGIN_AUTH_PEND);
579 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
580 sp->fcport->d_id.b24);
581 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
582 0, sp->fcport);
583 }
584 break;
585
586 case SRB_NACK_PRLI:
587 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
588 sp->fcport->deleted = 0;
589 sp->fcport->send_els_logo = 0;
590
591 if (!sp->fcport->login_succ &&
592 !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
593 sp->fcport->login_succ = 1;
594
595 vha->fcport_count++;
596 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
597 qla24xx_sched_upd_fcport(sp->fcport);
598 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
599 } else {
600 sp->fcport->login_retry = 0;
601 qla2x00_set_fcport_disc_state(sp->fcport,
602 DSC_LOGIN_COMPLETE);
603 sp->fcport->deleted = 0;
604 sp->fcport->logout_on_delete = 1;
605 }
606 break;
607
608 case SRB_NACK_LOGO:
609 sp->fcport->login_gen++;
610 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
611 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
612 break;
613 }
614 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
615
616 kref_put(&sp->cmd_kref, qla2x00_sp_release);
617 }
618
qla24xx_async_notify_ack(scsi_qla_host_t * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)619 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
620 struct imm_ntfy_from_isp *ntfy, int type)
621 {
622 int rval = QLA_FUNCTION_FAILED;
623 srb_t *sp;
624 char *c = NULL;
625
626 fcport->flags |= FCF_ASYNC_SENT;
627 switch (type) {
628 case SRB_NACK_PLOGI:
629 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
630 c = "PLOGI";
631 if (vha->hw->flags.edif_enabled &&
632 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
633 fcport->flags |= FCF_FCSP_DEVICE;
634 break;
635 case SRB_NACK_PRLI:
636 fcport->fw_login_state = DSC_LS_PRLI_PEND;
637 fcport->deleted = 0;
638 c = "PRLI";
639 break;
640 case SRB_NACK_LOGO:
641 fcport->fw_login_state = DSC_LS_LOGO_PEND;
642 c = "LOGO";
643 break;
644 }
645
646 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
647 if (!sp)
648 goto done;
649
650 sp->type = type;
651 sp->name = "nack";
652 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
653 qla2x00_async_nack_sp_done);
654
655 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
656
657 ql_dbg(ql_dbg_disc, vha, 0x20f4,
658 "Async-%s %8phC hndl %x %s\n",
659 sp->name, fcport->port_name, sp->handle, c);
660
661 rval = qla2x00_start_sp(sp);
662 if (rval != QLA_SUCCESS)
663 goto done_free_sp;
664
665 return rval;
666
667 done_free_sp:
668 kref_put(&sp->cmd_kref, qla2x00_sp_release);
669 done:
670 fcport->flags &= ~FCF_ASYNC_SENT;
671 return rval;
672 }
673
qla24xx_do_nack_work(struct scsi_qla_host * vha,struct qla_work_evt * e)674 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
675 {
676 fc_port_t *t;
677
678 switch (e->u.nack.type) {
679 case SRB_NACK_PRLI:
680 t = e->u.nack.fcport;
681 flush_work(&t->del_work);
682 flush_work(&t->free_work);
683 mutex_lock(&vha->vha_tgt.tgt_mutex);
684 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
685 mutex_unlock(&vha->vha_tgt.tgt_mutex);
686 if (t) {
687 ql_log(ql_log_info, vha, 0xd034,
688 "%s create sess success %p", __func__, t);
689 /* create sess has an extra kref */
690 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
691 }
692 break;
693 }
694 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
695 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
696 }
697
qla24xx_delete_sess_fn(struct work_struct * work)698 void qla24xx_delete_sess_fn(struct work_struct *work)
699 {
700 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
701 struct qla_hw_data *ha = NULL;
702
703 if (!fcport || !fcport->vha || !fcport->vha->hw)
704 return;
705
706 ha = fcport->vha->hw;
707
708 if (fcport->se_sess) {
709 ha->tgt.tgt_ops->shutdown_sess(fcport);
710 ha->tgt.tgt_ops->put_sess(fcport);
711 } else {
712 qlt_unreg_sess(fcport);
713 }
714 }
715
716 /*
717 * Called from qla2x00_reg_remote_port()
718 */
qlt_fc_port_added(struct scsi_qla_host * vha,fc_port_t * fcport)719 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
720 {
721 struct qla_hw_data *ha = vha->hw;
722 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
723 struct fc_port *sess = fcport;
724 unsigned long flags;
725
726 if (!vha->hw->tgt.tgt_ops)
727 return;
728
729 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
730 if (tgt->tgt_stop) {
731 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
732 return;
733 }
734
735 if (fcport->disc_state == DSC_DELETE_PEND) {
736 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
737 return;
738 }
739
740 if (!sess->se_sess) {
741 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
742
743 mutex_lock(&vha->vha_tgt.tgt_mutex);
744 sess = qlt_create_sess(vha, fcport, false);
745 mutex_unlock(&vha->vha_tgt.tgt_mutex);
746
747 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
748 } else {
749 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
750 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
751 return;
752 }
753
754 if (!kref_get_unless_zero(&sess->sess_kref)) {
755 ql_dbg(ql_dbg_disc, vha, 0x2107,
756 "%s: kref_get fail sess %8phC \n",
757 __func__, sess->port_name);
758 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
759 return;
760 }
761
762 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
763 "qla_target(%u): %ssession for port %8phC "
764 "(loop ID %d) reappeared\n", vha->vp_idx,
765 sess->local ? "local " : "", sess->port_name, sess->loop_id);
766
767 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
768 "Reappeared sess %p\n", sess);
769
770 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
771 fcport->loop_id,
772 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
773 }
774
775 if (sess && sess->local) {
776 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
777 "qla_target(%u): local session for "
778 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
779 fcport->port_name, sess->loop_id);
780 sess->local = 0;
781 }
782 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
783
784 ha->tgt.tgt_ops->put_sess(sess);
785 }
786
787 /*
788 * This is a zero-base ref-counting solution, since hardware_lock
789 * guarantees that ref_count is not modified concurrently.
790 * Upon successful return content of iocb is undefined
791 */
792 static struct qlt_plogi_ack_t *
qlt_plogi_ack_find_add(struct scsi_qla_host * vha,port_id_t * id,struct imm_ntfy_from_isp * iocb)793 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
794 struct imm_ntfy_from_isp *iocb)
795 {
796 struct qlt_plogi_ack_t *pla;
797
798 lockdep_assert_held(&vha->hw->hardware_lock);
799
800 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
801 if (pla->id.b24 == id->b24) {
802 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
803 "%s %d %8phC Term INOT due to new INOT",
804 __func__, __LINE__,
805 pla->iocb.u.isp24.port_name);
806 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
807 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
808 return pla;
809 }
810 }
811
812 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
813 if (!pla) {
814 ql_dbg(ql_dbg_async, vha, 0x5088,
815 "qla_target(%d): Allocation of plogi_ack failed\n",
816 vha->vp_idx);
817 return NULL;
818 }
819
820 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
821 pla->id = *id;
822 list_add_tail(&pla->list, &vha->plogi_ack_list);
823
824 return pla;
825 }
826
qlt_plogi_ack_unref(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla)827 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
828 struct qlt_plogi_ack_t *pla)
829 {
830 struct imm_ntfy_from_isp *iocb = &pla->iocb;
831 port_id_t port_id;
832 uint16_t loop_id;
833 fc_port_t *fcport = pla->fcport;
834
835 BUG_ON(!pla->ref_count);
836 pla->ref_count--;
837
838 if (pla->ref_count)
839 return;
840
841 ql_dbg(ql_dbg_disc, vha, 0x5089,
842 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
843 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
844 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
845 iocb->u.isp24.port_id[0],
846 le16_to_cpu(iocb->u.isp24.nport_handle),
847 iocb->u.isp24.exchange_address, iocb->ox_id);
848
849 port_id.b.domain = iocb->u.isp24.port_id[2];
850 port_id.b.area = iocb->u.isp24.port_id[1];
851 port_id.b.al_pa = iocb->u.isp24.port_id[0];
852 port_id.b.rsvd_1 = 0;
853
854 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
855
856 fcport->loop_id = loop_id;
857 fcport->d_id = port_id;
858 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
859 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
860 else
861 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
862
863 list_for_each_entry(fcport, &vha->vp_fcports, list) {
864 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
865 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
866 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
867 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
868 }
869
870 list_del(&pla->list);
871 kmem_cache_free(qla_tgt_plogi_cachep, pla);
872 }
873
874 void
qlt_plogi_ack_link(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla,struct fc_port * sess,enum qlt_plogi_link_t link)875 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
876 struct fc_port *sess, enum qlt_plogi_link_t link)
877 {
878 struct imm_ntfy_from_isp *iocb = &pla->iocb;
879 /* Inc ref_count first because link might already be pointing at pla */
880 pla->ref_count++;
881
882 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
883 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
884 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
885 sess, link, sess->port_name,
886 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
887 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
888 pla->ref_count, pla, link);
889
890 if (link == QLT_PLOGI_LINK_CONFLICT) {
891 switch (sess->disc_state) {
892 case DSC_DELETED:
893 case DSC_DELETE_PEND:
894 pla->ref_count--;
895 return;
896 default:
897 break;
898 }
899 }
900
901 if (sess->plogi_link[link])
902 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
903
904 if (link == QLT_PLOGI_LINK_SAME_WWN)
905 pla->fcport = sess;
906
907 sess->plogi_link[link] = pla;
908 }
909
910 typedef struct {
911 /* These fields must be initialized by the caller */
912 port_id_t id;
913 /*
914 * number of cmds dropped while we were waiting for
915 * initiator to ack LOGO initialize to 1 if LOGO is
916 * triggered by a command, otherwise, to 0
917 */
918 int cmd_count;
919
920 /* These fields are used by callee */
921 struct list_head list;
922 } qlt_port_logo_t;
923
924 static void
qlt_send_first_logo(struct scsi_qla_host * vha,qlt_port_logo_t * logo)925 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
926 {
927 qlt_port_logo_t *tmp;
928 int res;
929
930 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
931 res = 0;
932 goto out;
933 }
934
935 mutex_lock(&vha->vha_tgt.tgt_mutex);
936
937 list_for_each_entry(tmp, &vha->logo_list, list) {
938 if (tmp->id.b24 == logo->id.b24) {
939 tmp->cmd_count += logo->cmd_count;
940 mutex_unlock(&vha->vha_tgt.tgt_mutex);
941 return;
942 }
943 }
944
945 list_add_tail(&logo->list, &vha->logo_list);
946
947 mutex_unlock(&vha->vha_tgt.tgt_mutex);
948
949 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
950
951 mutex_lock(&vha->vha_tgt.tgt_mutex);
952 list_del(&logo->list);
953 mutex_unlock(&vha->vha_tgt.tgt_mutex);
954
955 out:
956 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
957 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
958 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
959 logo->cmd_count, res);
960 }
961
qlt_free_session_done(struct work_struct * work)962 void qlt_free_session_done(struct work_struct *work)
963 {
964 struct fc_port *sess = container_of(work, struct fc_port,
965 free_work);
966 struct qla_tgt *tgt = sess->tgt;
967 struct scsi_qla_host *vha = sess->vha;
968 struct qla_hw_data *ha = vha->hw;
969 unsigned long flags;
970 bool logout_started = false;
971 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
972 struct qlt_plogi_ack_t *own =
973 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
974
975 ql_dbg(ql_dbg_disc, vha, 0xf084,
976 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
977 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
978 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
979 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
980 sess->logout_on_delete, sess->keep_nport_handle,
981 sess->send_els_logo);
982
983 if (!IS_SW_RESV_ADDR(sess->d_id)) {
984 qla2x00_mark_device_lost(vha, sess, 0);
985
986 if (sess->send_els_logo) {
987 qlt_port_logo_t logo;
988
989 logo.id = sess->d_id;
990 logo.cmd_count = 0;
991 INIT_LIST_HEAD(&logo.list);
992 if (!own)
993 qlt_send_first_logo(vha, &logo);
994 sess->send_els_logo = 0;
995 }
996
997 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
998 int rc;
999
1000 if (!own ||
1001 (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
1002 sess->logout_completed = 0;
1003 rc = qla2x00_post_async_logout_work(vha, sess,
1004 NULL);
1005 if (rc != QLA_SUCCESS)
1006 ql_log(ql_log_warn, vha, 0xf085,
1007 "Schedule logo failed sess %p rc %d\n",
1008 sess, rc);
1009 else
1010 logout_started = true;
1011 } else if (own && (own->iocb.u.isp24.status_subcode ==
1012 ELS_PRLI) && ha->flags.rida_fmt2) {
1013 rc = qla2x00_post_async_prlo_work(vha, sess,
1014 NULL);
1015 if (rc != QLA_SUCCESS)
1016 ql_log(ql_log_warn, vha, 0xf085,
1017 "Schedule PRLO failed sess %p rc %d\n",
1018 sess, rc);
1019 else
1020 logout_started = true;
1021 }
1022 } /* if sess->logout_on_delete */
1023
1024 if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1025 !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1026 sess->nvme_flag |= NVME_FLAG_DELETING;
1027 qla_nvme_unregister_remote_port(sess);
1028 }
1029
1030 if (ha->flags.edif_enabled &&
1031 (!own || (own &&
1032 own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
1033 sess->edif.authok = 0;
1034 if (!ha->flags.host_shutting_down) {
1035 ql_dbg(ql_dbg_edif, vha, 0x911e,
1036 "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
1037 __func__, sess->port_name);
1038 qla2x00_release_all_sadb(vha, sess);
1039 } else {
1040 ql_dbg(ql_dbg_edif, vha, 0x911e,
1041 "%s bypassing release_all_sadb\n",
1042 __func__);
1043 }
1044
1045 qla_edif_clear_appdata(vha, sess);
1046 qla_edif_sess_down(vha, sess);
1047 }
1048 }
1049
1050 /*
1051 * Release the target session for FC Nexus from fabric module code.
1052 */
1053 if (sess->se_sess != NULL)
1054 ha->tgt.tgt_ops->free_session(sess);
1055
1056 if (logout_started) {
1057 bool traced = false;
1058 u16 cnt = 0;
1059
1060 while (!READ_ONCE(sess->logout_completed)) {
1061 if (!traced) {
1062 ql_dbg(ql_dbg_disc, vha, 0xf086,
1063 "%s: waiting for sess %p logout\n",
1064 __func__, sess);
1065 traced = true;
1066 }
1067 msleep(100);
1068 cnt++;
1069 /*
1070 * Driver timeout is set to 22 Sec, update count value to loop
1071 * long enough for log-out to complete before advancing. Otherwise,
1072 * straddling logout can interfere with re-login attempt.
1073 */
1074 if (cnt > 230)
1075 break;
1076 }
1077
1078 ql_dbg(ql_dbg_disc, vha, 0xf087,
1079 "%s: sess %p logout completed\n", __func__, sess);
1080 }
1081
1082 if (sess->logo_ack_needed) {
1083 sess->logo_ack_needed = 0;
1084 qla24xx_async_notify_ack(vha, sess,
1085 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1086 }
1087
1088 spin_lock_irqsave(&vha->work_lock, flags);
1089 sess->flags &= ~FCF_ASYNC_SENT;
1090 spin_unlock_irqrestore(&vha->work_lock, flags);
1091
1092 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1093 if (sess->se_sess) {
1094 sess->se_sess = NULL;
1095 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1096 tgt->sess_count--;
1097 }
1098
1099 qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1100 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1101 sess->deleted = QLA_SESS_DELETED;
1102
1103 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1104 vha->fcport_count--;
1105 sess->login_succ = 0;
1106 }
1107
1108 qla2x00_clear_loop_id(sess);
1109
1110 if (sess->conflict) {
1111 sess->conflict->login_pause = 0;
1112 sess->conflict = NULL;
1113 if (!test_bit(UNLOADING, &vha->dpc_flags))
1114 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1115 }
1116
1117 {
1118 struct qlt_plogi_ack_t *con =
1119 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1120 struct imm_ntfy_from_isp *iocb;
1121
1122 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1123
1124 if (con) {
1125 iocb = &con->iocb;
1126 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1127 "se_sess %p / sess %p port %8phC is gone,"
1128 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1129 sess->se_sess, sess, sess->port_name,
1130 own ? "releasing own PLOGI" : "no own PLOGI pending",
1131 own ? own->ref_count : -1,
1132 iocb->u.isp24.port_name, con->ref_count);
1133 qlt_plogi_ack_unref(vha, con);
1134 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1135 } else {
1136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1137 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1138 sess->se_sess, sess, sess->port_name,
1139 own ? "releasing own PLOGI" :
1140 "no own PLOGI pending",
1141 own ? own->ref_count : -1);
1142 }
1143
1144 if (own) {
1145 sess->fw_login_state = DSC_LS_PLOGI_PEND;
1146 qlt_plogi_ack_unref(vha, own);
1147 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1148 }
1149 }
1150
1151 sess->explicit_logout = 0;
1152 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1153 sess->free_pending = 0;
1154
1155 qla2x00_dfs_remove_rport(vha, sess);
1156
1157 ql_dbg(ql_dbg_disc, vha, 0xf001,
1158 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1159 sess, sess->port_name, vha->fcport_count);
1160
1161 if (tgt && (tgt->sess_count == 0))
1162 wake_up_all(&tgt->waitQ);
1163
1164 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1165 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1166 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1167 switch (vha->host->active_mode) {
1168 case MODE_INITIATOR:
1169 case MODE_DUAL:
1170 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1171 qla2xxx_wake_dpc(vha);
1172 break;
1173 case MODE_TARGET:
1174 default:
1175 /* no-op */
1176 break;
1177 }
1178 }
1179
1180 if (vha->fcport_count == 0)
1181 wake_up_all(&vha->fcport_waitQ);
1182 }
1183
1184 /* ha->tgt.sess_lock supposed to be held on entry */
qlt_unreg_sess(struct fc_port * sess)1185 void qlt_unreg_sess(struct fc_port *sess)
1186 {
1187 struct scsi_qla_host *vha = sess->vha;
1188 unsigned long flags;
1189
1190 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1191 "%s sess %p for deletion %8phC\n",
1192 __func__, sess, sess->port_name);
1193
1194 spin_lock_irqsave(&sess->vha->work_lock, flags);
1195 if (sess->free_pending) {
1196 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1197 return;
1198 }
1199 sess->free_pending = 1;
1200 /*
1201 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1202 * management from being sent.
1203 */
1204 sess->flags |= FCF_ASYNC_SENT;
1205 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1206
1207 if (sess->se_sess)
1208 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1209
1210 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1211 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1212 sess->last_rscn_gen = sess->rscn_gen;
1213 sess->last_login_gen = sess->login_gen;
1214
1215 queue_work(sess->vha->hw->wq, &sess->free_work);
1216 }
1217 EXPORT_SYMBOL(qlt_unreg_sess);
1218
qlt_reset(struct scsi_qla_host * vha,void * iocb,int mcmd)1219 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1220 {
1221 struct qla_hw_data *ha = vha->hw;
1222 struct fc_port *sess = NULL;
1223 uint16_t loop_id;
1224 int res = 0;
1225 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1226 unsigned long flags;
1227
1228 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1229 if (loop_id == 0xFFFF) {
1230 /* Global event */
1231 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1232 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1233 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1234 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1235 } else {
1236 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1237 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1238 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1239 }
1240
1241 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1242 "Using sess for qla_tgt_reset: %p\n", sess);
1243 if (!sess) {
1244 res = -ESRCH;
1245 return res;
1246 }
1247
1248 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1249 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1250 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1251 mcmd, loop_id);
1252
1253 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1254 }
1255
qla24xx_chk_fcp_state(struct fc_port * sess)1256 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1257 {
1258 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1259 sess->logout_on_delete = 0;
1260 sess->logo_ack_needed = 0;
1261 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1262 }
1263 }
1264
qlt_schedule_sess_for_deletion(struct fc_port * sess)1265 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1266 {
1267 struct qla_tgt *tgt = sess->tgt;
1268 unsigned long flags;
1269 u16 sec;
1270
1271 switch (sess->disc_state) {
1272 case DSC_DELETE_PEND:
1273 return;
1274 case DSC_DELETED:
1275 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1276 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
1277 if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1278 wake_up_all(&tgt->waitQ);
1279
1280 if (sess->vha->fcport_count == 0)
1281 wake_up_all(&sess->vha->fcport_waitQ);
1282 return;
1283 }
1284 break;
1285 case DSC_UPD_FCPORT:
1286 /*
1287 * This port is not done reporting to upper layer.
1288 * let it finish
1289 */
1290 sess->next_disc_state = DSC_DELETE_PEND;
1291 sec = jiffies_to_msecs(jiffies -
1292 sess->jiffies_at_registration)/1000;
1293 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1294 sess->sec_since_registration = sec;
1295 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1296 "%s %8phC : Slow Rport registration(%d Sec)\n",
1297 __func__, sess->port_name, sec);
1298 }
1299 return;
1300 default:
1301 break;
1302 }
1303
1304 spin_lock_irqsave(&sess->vha->work_lock, flags);
1305 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1306 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1307 return;
1308 }
1309 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1310 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1311
1312 sess->prli_pend_timer = 0;
1313 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1314
1315 qla24xx_chk_fcp_state(sess);
1316
1317 ql_dbg(ql_log_warn, sess->vha, 0xe001,
1318 "Scheduling sess %p for deletion %8phC fc4_type %x\n",
1319 sess, sess->port_name, sess->fc4_type);
1320
1321 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1322 }
1323
qlt_clear_tgt_db(struct qla_tgt * tgt)1324 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1325 {
1326 struct fc_port *sess;
1327 scsi_qla_host_t *vha = tgt->vha;
1328
1329 list_for_each_entry(sess, &vha->vp_fcports, list) {
1330 if (sess->se_sess)
1331 qlt_schedule_sess_for_deletion(sess);
1332 }
1333
1334 /* At this point tgt could be already dead */
1335 }
1336
qla24xx_get_loop_id(struct scsi_qla_host * vha,be_id_t s_id,uint16_t * loop_id)1337 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1338 uint16_t *loop_id)
1339 {
1340 struct qla_hw_data *ha = vha->hw;
1341 dma_addr_t gid_list_dma;
1342 struct gid_list_info *gid_list, *gid;
1343 int res, rc, i;
1344 uint16_t entries;
1345
1346 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1347 &gid_list_dma, GFP_KERNEL);
1348 if (!gid_list) {
1349 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1350 "qla_target(%d): DMA Alloc failed of %u\n",
1351 vha->vp_idx, qla2x00_gid_list_size(ha));
1352 return -ENOMEM;
1353 }
1354
1355 /* Get list of logged in devices */
1356 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1357 if (rc != QLA_SUCCESS) {
1358 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1359 "qla_target(%d): get_id_list() failed: %x\n",
1360 vha->vp_idx, rc);
1361 res = -EBUSY;
1362 goto out_free_id_list;
1363 }
1364
1365 gid = gid_list;
1366 res = -ENOENT;
1367 for (i = 0; i < entries; i++) {
1368 if (gid->al_pa == s_id.al_pa &&
1369 gid->area == s_id.area &&
1370 gid->domain == s_id.domain) {
1371 *loop_id = le16_to_cpu(gid->loop_id);
1372 res = 0;
1373 break;
1374 }
1375 gid = (void *)gid + ha->gid_list_info_size;
1376 }
1377
1378 out_free_id_list:
1379 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1380 gid_list, gid_list_dma);
1381 return res;
1382 }
1383
1384 /*
1385 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1386 * Caller must put it.
1387 */
qlt_create_sess(struct scsi_qla_host * vha,fc_port_t * fcport,bool local)1388 static struct fc_port *qlt_create_sess(
1389 struct scsi_qla_host *vha,
1390 fc_port_t *fcport,
1391 bool local)
1392 {
1393 struct qla_hw_data *ha = vha->hw;
1394 struct fc_port *sess = fcport;
1395 unsigned long flags;
1396
1397 if (vha->vha_tgt.qla_tgt->tgt_stop)
1398 return NULL;
1399
1400 if (fcport->se_sess) {
1401 if (!kref_get_unless_zero(&sess->sess_kref)) {
1402 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1403 "%s: kref_get_unless_zero failed for %8phC\n",
1404 __func__, sess->port_name);
1405 return NULL;
1406 }
1407 return fcport;
1408 }
1409 sess->tgt = vha->vha_tgt.qla_tgt;
1410 sess->local = local;
1411
1412 /*
1413 * Under normal circumstances we want to logout from firmware when
1414 * session eventually ends and release corresponding nport handle.
1415 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1416 * code will adjust these flags as necessary.
1417 */
1418 sess->logout_on_delete = 1;
1419 sess->keep_nport_handle = 0;
1420 sess->logout_completed = 0;
1421
1422 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1423 &fcport->port_name[0], sess) < 0) {
1424 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1425 "(%d) %8phC check_initiator_node_acl failed\n",
1426 vha->vp_idx, fcport->port_name);
1427 return NULL;
1428 } else {
1429 kref_init(&fcport->sess_kref);
1430 /*
1431 * Take an extra reference to ->sess_kref here to handle
1432 * fc_port access across ->tgt.sess_lock reaquire.
1433 */
1434 if (!kref_get_unless_zero(&sess->sess_kref)) {
1435 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1436 "%s: kref_get_unless_zero failed for %8phC\n",
1437 __func__, sess->port_name);
1438 return NULL;
1439 }
1440
1441 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1442 if (!IS_SW_RESV_ADDR(sess->d_id))
1443 vha->vha_tgt.qla_tgt->sess_count++;
1444
1445 qlt_do_generation_tick(vha, &sess->generation);
1446 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1447 }
1448
1449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1450 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1451 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1452 vha->vha_tgt.qla_tgt->sess_count);
1453
1454 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1455 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1456 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1457 vha->vp_idx, local ? "local " : "", fcport->port_name,
1458 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1459 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
1460
1461 return sess;
1462 }
1463
1464 /*
1465 * max_gen - specifies maximum session generation
1466 * at which this deletion requestion is still valid
1467 */
1468 void
qlt_fc_port_deleted(struct scsi_qla_host * vha,fc_port_t * fcport,int max_gen)1469 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1470 {
1471 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1472 struct fc_port *sess = fcport;
1473 unsigned long flags;
1474
1475 if (!vha->hw->tgt.tgt_ops)
1476 return;
1477
1478 if (!tgt)
1479 return;
1480
1481 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1482 if (tgt->tgt_stop) {
1483 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1484 return;
1485 }
1486 if (!sess->se_sess) {
1487 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1488 return;
1489 }
1490
1491 if (max_gen - sess->generation < 0) {
1492 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1493 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1494 "Ignoring stale deletion request for se_sess %p / sess %p"
1495 " for port %8phC, req_gen %d, sess_gen %d\n",
1496 sess->se_sess, sess, sess->port_name, max_gen,
1497 sess->generation);
1498 return;
1499 }
1500
1501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1502
1503 sess->local = 1;
1504 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1505 qlt_schedule_sess_for_deletion(sess);
1506 }
1507
test_tgt_sess_count(struct qla_tgt * tgt)1508 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1509 {
1510 struct qla_hw_data *ha = tgt->ha;
1511 unsigned long flags;
1512 int res;
1513 /*
1514 * We need to protect against race, when tgt is freed before or
1515 * inside wake_up()
1516 */
1517 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1518 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1519 "tgt %p, sess_count=%d\n",
1520 tgt, tgt->sess_count);
1521 res = (tgt->sess_count == 0);
1522 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1523
1524 return res;
1525 }
1526
1527 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase1(struct qla_tgt * tgt)1528 int qlt_stop_phase1(struct qla_tgt *tgt)
1529 {
1530 struct scsi_qla_host *vha = tgt->vha;
1531 struct qla_hw_data *ha = tgt->ha;
1532 unsigned long flags;
1533
1534 mutex_lock(&ha->optrom_mutex);
1535 mutex_lock(&qla_tgt_mutex);
1536
1537 if (tgt->tgt_stop || tgt->tgt_stopped) {
1538 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1539 "Already in tgt->tgt_stop or tgt_stopped state\n");
1540 mutex_unlock(&qla_tgt_mutex);
1541 mutex_unlock(&ha->optrom_mutex);
1542 return -EPERM;
1543 }
1544
1545 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1546 vha->host_no, vha);
1547 /*
1548 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1549 * Lock is needed, because we still can get an incoming packet.
1550 */
1551 mutex_lock(&vha->vha_tgt.tgt_mutex);
1552 tgt->tgt_stop = 1;
1553 qlt_clear_tgt_db(tgt);
1554 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1555 mutex_unlock(&qla_tgt_mutex);
1556
1557 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1558 "Waiting for sess works (tgt %p)", tgt);
1559 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1560 do {
1561 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1562 flush_work(&tgt->sess_work);
1563 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1564 } while (!list_empty(&tgt->sess_works_list));
1565 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1566
1567 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1568 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1569
1570 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1571
1572 /* Big hammer */
1573 if (!ha->flags.host_shutting_down &&
1574 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1575 qlt_disable_vha(vha);
1576
1577 /* Wait for sessions to clear out (just in case) */
1578 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1579 mutex_unlock(&ha->optrom_mutex);
1580
1581 return 0;
1582 }
1583 EXPORT_SYMBOL(qlt_stop_phase1);
1584
1585 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase2(struct qla_tgt * tgt)1586 void qlt_stop_phase2(struct qla_tgt *tgt)
1587 {
1588 scsi_qla_host_t *vha = tgt->vha;
1589
1590 if (tgt->tgt_stopped) {
1591 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1592 "Already in tgt->tgt_stopped state\n");
1593 dump_stack();
1594 return;
1595 }
1596 if (!tgt->tgt_stop) {
1597 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1598 "%s: phase1 stop is not completed\n", __func__);
1599 dump_stack();
1600 return;
1601 }
1602
1603 mutex_lock(&tgt->ha->optrom_mutex);
1604 mutex_lock(&vha->vha_tgt.tgt_mutex);
1605 tgt->tgt_stop = 0;
1606 tgt->tgt_stopped = 1;
1607 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1608 mutex_unlock(&tgt->ha->optrom_mutex);
1609
1610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1611 tgt);
1612
1613 switch (vha->qlini_mode) {
1614 case QLA2XXX_INI_MODE_EXCLUSIVE:
1615 vha->flags.online = 1;
1616 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1617 break;
1618 default:
1619 break;
1620 }
1621 }
1622 EXPORT_SYMBOL(qlt_stop_phase2);
1623
1624 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
qlt_release(struct qla_tgt * tgt)1625 static void qlt_release(struct qla_tgt *tgt)
1626 {
1627 scsi_qla_host_t *vha = tgt->vha;
1628 void *node;
1629 u64 key = 0;
1630 u16 i;
1631 struct qla_qpair_hint *h;
1632 struct qla_hw_data *ha = vha->hw;
1633
1634 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1635 qlt_stop_phase1(tgt);
1636
1637 if (!tgt->tgt_stopped)
1638 qlt_stop_phase2(tgt);
1639
1640 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1641 unsigned long flags;
1642
1643 h = &tgt->qphints[i];
1644 if (h->qpair) {
1645 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1646 list_del(&h->hint_elem);
1647 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1648 h->qpair = NULL;
1649 }
1650 }
1651 kfree(tgt->qphints);
1652 mutex_lock(&qla_tgt_mutex);
1653 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1654 mutex_unlock(&qla_tgt_mutex);
1655
1656 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1657 btree_remove64(&tgt->lun_qpair_map, key);
1658
1659 btree_destroy64(&tgt->lun_qpair_map);
1660
1661 if (vha->vp_idx)
1662 if (ha->tgt.tgt_ops &&
1663 ha->tgt.tgt_ops->remove_target &&
1664 vha->vha_tgt.target_lport_ptr)
1665 ha->tgt.tgt_ops->remove_target(vha);
1666
1667 vha->vha_tgt.qla_tgt = NULL;
1668
1669 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1670 "Release of tgt %p finished\n", tgt);
1671
1672 kfree(tgt);
1673 }
1674
1675 /* ha->hardware_lock supposed to be held on entry */
qlt_sched_sess_work(struct qla_tgt * tgt,int type,const void * param,unsigned int param_size)1676 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1677 const void *param, unsigned int param_size)
1678 {
1679 struct qla_tgt_sess_work_param *prm;
1680 unsigned long flags;
1681
1682 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1683 if (!prm) {
1684 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1685 "qla_target(%d): Unable to create session "
1686 "work, command will be refused", 0);
1687 return -ENOMEM;
1688 }
1689
1690 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1691 "Scheduling work (type %d, prm %p)"
1692 " to find session for param %p (size %d, tgt %p)\n",
1693 type, prm, param, param_size, tgt);
1694
1695 prm->type = type;
1696 memcpy(&prm->tm_iocb, param, param_size);
1697
1698 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1699 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1700 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1701
1702 schedule_work(&tgt->sess_work);
1703
1704 return 0;
1705 }
1706
1707 /*
1708 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1709 */
qlt_send_notify_ack(struct qla_qpair * qpair,struct imm_ntfy_from_isp * ntfy,uint32_t add_flags,uint16_t resp_code,int resp_code_valid,uint16_t srr_flags,uint16_t srr_reject_code,uint8_t srr_explan)1710 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1711 struct imm_ntfy_from_isp *ntfy,
1712 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1713 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1714 {
1715 struct scsi_qla_host *vha = qpair->vha;
1716 struct qla_hw_data *ha = vha->hw;
1717 request_t *pkt;
1718 struct nack_to_isp *nack;
1719
1720 if (!ha->flags.fw_started)
1721 return;
1722
1723 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1724
1725 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1726 if (!pkt) {
1727 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1728 "qla_target(%d): %s failed: unable to allocate "
1729 "request packet\n", vha->vp_idx, __func__);
1730 return;
1731 }
1732
1733 if (vha->vha_tgt.qla_tgt != NULL)
1734 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1735
1736 pkt->entry_type = NOTIFY_ACK_TYPE;
1737 pkt->entry_count = 1;
1738
1739 nack = (struct nack_to_isp *)pkt;
1740 nack->ox_id = ntfy->ox_id;
1741
1742 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1743 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1744 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1745 nack->u.isp24.flags = ntfy->u.isp24.flags &
1746 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
1747 }
1748 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1749 nack->u.isp24.status = ntfy->u.isp24.status;
1750 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1751 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1752 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1753 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1754 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1755 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1756 nack->u.isp24.srr_reject_code = srr_reject_code;
1757 nack->u.isp24.srr_reject_code_expl = srr_explan;
1758 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1759
1760 /* TODO qualify this with EDIF enable */
1761 if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
1762 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
1763 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
1764 }
1765
1766 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1767 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1768 vha->vp_idx, nack->u.isp24.status);
1769
1770 /* Memory Barrier */
1771 wmb();
1772 qla2x00_start_iocbs(vha, qpair->req);
1773 }
1774
qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd * mcmd)1775 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1776 {
1777 struct scsi_qla_host *vha = mcmd->vha;
1778 struct qla_hw_data *ha = vha->hw;
1779 struct abts_resp_to_24xx *resp;
1780 __le32 f_ctl;
1781 uint32_t h;
1782 uint8_t *p;
1783 int rc;
1784 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1785 struct qla_qpair *qpair = mcmd->qpair;
1786
1787 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1788 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1789 ha, mcmd->fc_tm_rsp);
1790
1791 rc = qlt_check_reserve_free_req(qpair, 1);
1792 if (rc) {
1793 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1794 "qla_target(%d): %s failed: unable to allocate request packet\n",
1795 vha->vp_idx, __func__);
1796 return -EAGAIN;
1797 }
1798
1799 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1800 memset(resp, 0, sizeof(*resp));
1801
1802 h = qlt_make_handle(qpair);
1803 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1804 /*
1805 * CTIO type 7 from the firmware doesn't provide a way to
1806 * know the initiator's LOOP ID, hence we can't find
1807 * the session and, so, the command.
1808 */
1809 return -EAGAIN;
1810 } else {
1811 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1812 }
1813
1814 resp->handle = make_handle(qpair->req->id, h);
1815 resp->entry_type = ABTS_RESP_24XX;
1816 resp->entry_count = 1;
1817 resp->nport_handle = abts->nport_handle;
1818 resp->vp_index = vha->vp_idx;
1819 resp->sof_type = abts->sof_type;
1820 resp->exchange_address = abts->exchange_address;
1821 resp->fcp_hdr_le = abts->fcp_hdr_le;
1822 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1823 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1824 F_CTL_SEQ_INITIATIVE);
1825 p = (uint8_t *)&f_ctl;
1826 resp->fcp_hdr_le.f_ctl[0] = *p++;
1827 resp->fcp_hdr_le.f_ctl[1] = *p++;
1828 resp->fcp_hdr_le.f_ctl[2] = *p;
1829
1830 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1831 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1832
1833 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1834 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1835 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1836 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1837 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1838 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1839 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1840 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1841 } else {
1842 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1843 resp->payload.ba_rjt.reason_code =
1844 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1845 /* Other bytes are zero */
1846 }
1847
1848 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1849
1850 /* Memory Barrier */
1851 wmb();
1852 if (qpair->reqq_start_iocbs)
1853 qpair->reqq_start_iocbs(qpair);
1854 else
1855 qla2x00_start_iocbs(vha, qpair->req);
1856
1857 return rc;
1858 }
1859
1860 /*
1861 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1862 */
qlt_24xx_send_abts_resp(struct qla_qpair * qpair,struct abts_recv_from_24xx * abts,uint32_t status,bool ids_reversed)1863 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1864 struct abts_recv_from_24xx *abts, uint32_t status,
1865 bool ids_reversed)
1866 {
1867 struct scsi_qla_host *vha = qpair->vha;
1868 struct qla_hw_data *ha = vha->hw;
1869 struct abts_resp_to_24xx *resp;
1870 __le32 f_ctl;
1871 uint8_t *p;
1872
1873 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1874 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1875 ha, abts, status);
1876
1877 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1878 NULL);
1879 if (!resp) {
1880 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1881 "qla_target(%d): %s failed: unable to allocate "
1882 "request packet", vha->vp_idx, __func__);
1883 return;
1884 }
1885
1886 resp->entry_type = ABTS_RESP_24XX;
1887 resp->handle = QLA_TGT_SKIP_HANDLE;
1888 resp->entry_count = 1;
1889 resp->nport_handle = abts->nport_handle;
1890 resp->vp_index = vha->vp_idx;
1891 resp->sof_type = abts->sof_type;
1892 resp->exchange_address = abts->exchange_address;
1893 resp->fcp_hdr_le = abts->fcp_hdr_le;
1894 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1895 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1896 F_CTL_SEQ_INITIATIVE);
1897 p = (uint8_t *)&f_ctl;
1898 resp->fcp_hdr_le.f_ctl[0] = *p++;
1899 resp->fcp_hdr_le.f_ctl[1] = *p++;
1900 resp->fcp_hdr_le.f_ctl[2] = *p;
1901 if (ids_reversed) {
1902 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1903 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1904 } else {
1905 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1906 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1907 }
1908 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1909 if (status == FCP_TMF_CMPL) {
1910 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1911 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1912 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1913 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1914 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1915 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1916 } else {
1917 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1918 resp->payload.ba_rjt.reason_code =
1919 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1920 /* Other bytes are zero */
1921 }
1922
1923 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1924
1925 /* Memory Barrier */
1926 wmb();
1927 if (qpair->reqq_start_iocbs)
1928 qpair->reqq_start_iocbs(qpair);
1929 else
1930 qla2x00_start_iocbs(vha, qpair->req);
1931 }
1932
1933 /*
1934 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1935 */
qlt_24xx_retry_term_exchange(struct scsi_qla_host * vha,struct qla_qpair * qpair,response_t * pkt,struct qla_tgt_mgmt_cmd * mcmd)1936 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1937 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1938 {
1939 struct ctio7_to_24xx *ctio;
1940 u16 tmp;
1941 struct abts_recv_from_24xx *entry;
1942
1943 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1944 if (ctio == NULL) {
1945 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1946 "qla_target(%d): %s failed: unable to allocate "
1947 "request packet\n", vha->vp_idx, __func__);
1948 return;
1949 }
1950
1951 if (mcmd)
1952 /* abts from remote port */
1953 entry = &mcmd->orig_iocb.abts;
1954 else
1955 /* abts from this driver. */
1956 entry = (struct abts_recv_from_24xx *)pkt;
1957
1958 /*
1959 * We've got on entrance firmware's response on by us generated
1960 * ABTS response. So, in it ID fields are reversed.
1961 */
1962
1963 ctio->entry_type = CTIO_TYPE7;
1964 ctio->entry_count = 1;
1965 ctio->nport_handle = entry->nport_handle;
1966 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1967 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1968 ctio->vp_index = vha->vp_idx;
1969 ctio->exchange_addr = entry->exchange_addr_to_abort;
1970 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1971
1972 if (mcmd) {
1973 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1974
1975 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1976 tmp |= (mcmd->abort_io_attr << 9);
1977 else if (qpair->retry_term_cnt & 1)
1978 tmp |= (0x4 << 9);
1979 } else {
1980 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1981
1982 if (qpair->retry_term_cnt & 1)
1983 tmp |= (0x4 << 9);
1984 }
1985 ctio->u.status1.flags = cpu_to_le16(tmp);
1986 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1987
1988 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1989 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1990 le16_to_cpu(ctio->u.status1.flags),
1991 le16_to_cpu(ctio->u.status1.ox_id),
1992 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1993
1994 /* Memory Barrier */
1995 wmb();
1996 if (qpair->reqq_start_iocbs)
1997 qpair->reqq_start_iocbs(qpair);
1998 else
1999 qla2x00_start_iocbs(vha, qpair->req);
2000
2001 if (mcmd)
2002 qlt_build_abts_resp_iocb(mcmd);
2003 else
2004 qlt_24xx_send_abts_resp(qpair,
2005 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
2006
2007 }
2008
2009 /* drop cmds for the given lun
2010 * XXX only looks for cmds on the port through which lun reset was recieved
2011 * XXX does not go through the list of other port (which may have cmds
2012 * for the same lun)
2013 */
abort_cmds_for_lun(struct scsi_qla_host * vha,u64 lun,be_id_t s_id)2014 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
2015 {
2016 struct qla_tgt_sess_op *op;
2017 struct qla_tgt_cmd *cmd;
2018 uint32_t key;
2019 unsigned long flags;
2020
2021 key = sid_to_key(s_id);
2022 spin_lock_irqsave(&vha->cmd_list_lock, flags);
2023 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
2024 uint32_t op_key;
2025 u64 op_lun;
2026
2027 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
2028 op_lun = scsilun_to_int(
2029 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
2030 if (op_key == key && op_lun == lun)
2031 op->aborted = true;
2032 }
2033
2034 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2035 uint32_t cmd_key;
2036 u64 cmd_lun;
2037
2038 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2039 cmd_lun = scsilun_to_int(
2040 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2041 if (cmd_key == key && cmd_lun == lun)
2042 cmd->aborted = 1;
2043 }
2044 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2045 }
2046
qlt_find_qphint(struct scsi_qla_host * vha,uint64_t unpacked_lun)2047 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2048 uint64_t unpacked_lun)
2049 {
2050 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2051 struct qla_qpair_hint *h = NULL;
2052
2053 if (vha->flags.qpairs_available) {
2054 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2055 if (!h)
2056 h = &tgt->qphints[0];
2057 } else {
2058 h = &tgt->qphints[0];
2059 }
2060
2061 return h;
2062 }
2063
qlt_do_tmr_work(struct work_struct * work)2064 static void qlt_do_tmr_work(struct work_struct *work)
2065 {
2066 struct qla_tgt_mgmt_cmd *mcmd =
2067 container_of(work, struct qla_tgt_mgmt_cmd, work);
2068 struct qla_hw_data *ha = mcmd->vha->hw;
2069 int rc;
2070 uint32_t tag;
2071 unsigned long flags;
2072
2073 switch (mcmd->tmr_func) {
2074 case QLA_TGT_ABTS:
2075 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
2076 break;
2077 default:
2078 tag = 0;
2079 break;
2080 }
2081
2082 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2083 mcmd->tmr_func, tag);
2084
2085 if (rc != 0) {
2086 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2087 switch (mcmd->tmr_func) {
2088 case QLA_TGT_ABTS:
2089 mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2090 qlt_build_abts_resp_iocb(mcmd);
2091 break;
2092 case QLA_TGT_LUN_RESET:
2093 case QLA_TGT_CLEAR_TS:
2094 case QLA_TGT_ABORT_TS:
2095 case QLA_TGT_CLEAR_ACA:
2096 case QLA_TGT_TARGET_RESET:
2097 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2098 qla_sam_status);
2099 break;
2100
2101 case QLA_TGT_ABORT_ALL:
2102 case QLA_TGT_NEXUS_LOSS_SESS:
2103 case QLA_TGT_NEXUS_LOSS:
2104 qlt_send_notify_ack(mcmd->qpair,
2105 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2106 break;
2107 }
2108 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2109
2110 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2111 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2112 mcmd->vha->vp_idx, rc);
2113 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2114 }
2115 }
2116
2117 /* ha->hardware_lock supposed to be held on entry */
__qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts,struct fc_port * sess)2118 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2119 struct abts_recv_from_24xx *abts, struct fc_port *sess)
2120 {
2121 struct qla_hw_data *ha = vha->hw;
2122 struct qla_tgt_mgmt_cmd *mcmd;
2123 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2124 struct qla_tgt_cmd *abort_cmd;
2125
2126 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2127 "qla_target(%d): task abort (tag=%d)\n",
2128 vha->vp_idx, abts->exchange_addr_to_abort);
2129
2130 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2131 if (mcmd == NULL) {
2132 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2133 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2134 vha->vp_idx, __func__);
2135 return -ENOMEM;
2136 }
2137 memset(mcmd, 0, sizeof(*mcmd));
2138 mcmd->cmd_type = TYPE_TGT_TMCMD;
2139 mcmd->sess = sess;
2140 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2141 mcmd->reset_count = ha->base_qpair->chip_reset;
2142 mcmd->tmr_func = QLA_TGT_ABTS;
2143 mcmd->qpair = h->qpair;
2144 mcmd->vha = vha;
2145
2146 /*
2147 * LUN is looked up by target-core internally based on the passed
2148 * abts->exchange_addr_to_abort tag.
2149 */
2150 mcmd->se_cmd.cpuid = h->cpuid;
2151
2152 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2153 le32_to_cpu(abts->exchange_addr_to_abort));
2154 if (!abort_cmd) {
2155 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2156 return -EIO;
2157 }
2158 mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
2159
2160 if (abort_cmd->qpair) {
2161 mcmd->qpair = abort_cmd->qpair;
2162 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2163 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2164 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2165 }
2166
2167 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2168 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2169
2170 return 0;
2171 }
2172
2173 /*
2174 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2175 */
qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts)2176 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2177 struct abts_recv_from_24xx *abts)
2178 {
2179 struct qla_hw_data *ha = vha->hw;
2180 struct fc_port *sess;
2181 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
2182 be_id_t s_id;
2183 int rc;
2184 unsigned long flags;
2185
2186 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2188 "qla_target(%d): ABTS: Abort Sequence not "
2189 "supported\n", vha->vp_idx);
2190 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2191 false);
2192 return;
2193 }
2194
2195 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2196 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2197 "qla_target(%d): ABTS: Unknown Exchange "
2198 "Address received\n", vha->vp_idx);
2199 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2200 false);
2201 return;
2202 }
2203
2204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2205 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2206 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2207 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2208 le32_to_cpu(abts->fcp_hdr_le.parameter));
2209
2210 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2211
2212 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2213 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2214 if (!sess) {
2215 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2216 "qla_target(%d): task abort for non-existent session\n",
2217 vha->vp_idx);
2218 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2219
2220 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2221 false);
2222 return;
2223 }
2224 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2225
2226
2227 if (sess->deleted) {
2228 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2229 false);
2230 return;
2231 }
2232
2233 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2234 if (rc != 0) {
2235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2236 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2237 vha->vp_idx, rc);
2238 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2239 false);
2240 return;
2241 }
2242 }
2243
2244 /*
2245 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2246 */
qlt_24xx_send_task_mgmt_ctio(struct qla_qpair * qpair,struct qla_tgt_mgmt_cmd * mcmd,uint32_t resp_code)2247 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2248 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2249 {
2250 struct scsi_qla_host *ha = mcmd->vha;
2251 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2252 struct ctio7_to_24xx *ctio;
2253 uint16_t temp;
2254
2255 ql_dbg(ql_dbg_tgt, ha, 0xe008,
2256 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2257 ha, atio, resp_code);
2258
2259
2260 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2261 if (ctio == NULL) {
2262 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2263 "qla_target(%d): %s failed: unable to allocate "
2264 "request packet\n", ha->vp_idx, __func__);
2265 return;
2266 }
2267
2268 ctio->entry_type = CTIO_TYPE7;
2269 ctio->entry_count = 1;
2270 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2271 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
2272 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2273 ctio->vp_index = ha->vp_idx;
2274 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2275 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2276 temp = (atio->u.isp24.attr << 9)|
2277 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2278 ctio->u.status1.flags = cpu_to_le16(temp);
2279 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2280 ctio->u.status1.ox_id = cpu_to_le16(temp);
2281 ctio->u.status1.scsi_status =
2282 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2283 ctio->u.status1.response_len = cpu_to_le16(8);
2284 ctio->u.status1.sense_data[0] = resp_code;
2285
2286 /* Memory Barrier */
2287 wmb();
2288 if (qpair->reqq_start_iocbs)
2289 qpair->reqq_start_iocbs(qpair);
2290 else
2291 qla2x00_start_iocbs(ha, qpair->req);
2292 }
2293
qlt_free_mcmd(struct qla_tgt_mgmt_cmd * mcmd)2294 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2295 {
2296 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2297 }
2298 EXPORT_SYMBOL(qlt_free_mcmd);
2299
2300 /*
2301 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2302 * reacquire
2303 */
qlt_send_resp_ctio(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,uint8_t scsi_status,uint8_t sense_key,uint8_t asc,uint8_t ascq)2304 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2305 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2306 {
2307 struct atio_from_isp *atio = &cmd->atio;
2308 struct ctio7_to_24xx *ctio;
2309 uint16_t temp;
2310 struct scsi_qla_host *vha = cmd->vha;
2311
2312 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2313 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2314 "sense_key=%02x, asc=%02x, ascq=%02x",
2315 vha, atio, scsi_status, sense_key, asc, ascq);
2316
2317 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2318 if (!ctio) {
2319 ql_dbg(ql_dbg_async, vha, 0x3067,
2320 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2321 vha->host_no, __func__);
2322 goto out;
2323 }
2324
2325 ctio->entry_type = CTIO_TYPE7;
2326 ctio->entry_count = 1;
2327 ctio->handle = QLA_TGT_SKIP_HANDLE;
2328 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2329 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2330 ctio->vp_index = vha->vp_idx;
2331 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2332 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2333 temp = (atio->u.isp24.attr << 9) |
2334 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2335 ctio->u.status1.flags = cpu_to_le16(temp);
2336 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2337 ctio->u.status1.ox_id = cpu_to_le16(temp);
2338 ctio->u.status1.scsi_status =
2339 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2340 ctio->u.status1.response_len = cpu_to_le16(18);
2341 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2342
2343 if (ctio->u.status1.residual != 0)
2344 ctio->u.status1.scsi_status |=
2345 cpu_to_le16(SS_RESIDUAL_UNDER);
2346
2347 /* Fixed format sense data. */
2348 ctio->u.status1.sense_data[0] = 0x70;
2349 ctio->u.status1.sense_data[2] = sense_key;
2350 /* Additional sense length */
2351 ctio->u.status1.sense_data[7] = 0xa;
2352 /* ASC and ASCQ */
2353 ctio->u.status1.sense_data[12] = asc;
2354 ctio->u.status1.sense_data[13] = ascq;
2355
2356 /* Memory Barrier */
2357 wmb();
2358
2359 if (qpair->reqq_start_iocbs)
2360 qpair->reqq_start_iocbs(qpair);
2361 else
2362 qla2x00_start_iocbs(vha, qpair->req);
2363
2364 out:
2365 return;
2366 }
2367
2368 /* callback from target fabric module code */
qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd * mcmd)2369 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2370 {
2371 struct scsi_qla_host *vha = mcmd->sess->vha;
2372 struct qla_hw_data *ha = vha->hw;
2373 unsigned long flags;
2374 struct qla_qpair *qpair = mcmd->qpair;
2375 bool free_mcmd = true;
2376
2377 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2378 "TM response mcmd (%p) status %#x state %#x",
2379 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2380
2381 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2382
2383 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2384 /*
2385 * Either the port is not online or this request was from
2386 * previous life, just abort the processing.
2387 */
2388 ql_dbg(ql_dbg_async, vha, 0xe100,
2389 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2390 vha->flags.online, qla2x00_reset_active(vha),
2391 mcmd->reset_count, qpair->chip_reset);
2392 ha->tgt.tgt_ops->free_mcmd(mcmd);
2393 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2394 return;
2395 }
2396
2397 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2398 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2399 case ELS_LOGO:
2400 case ELS_PRLO:
2401 case ELS_TPRLO:
2402 ql_dbg(ql_dbg_disc, vha, 0x2106,
2403 "TM response logo %8phC status %#x state %#x",
2404 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2405 mcmd->flags);
2406 qlt_schedule_sess_for_deletion(mcmd->sess);
2407 break;
2408 default:
2409 qlt_send_notify_ack(vha->hw->base_qpair,
2410 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2411 break;
2412 }
2413 } else {
2414 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2415 qlt_build_abts_resp_iocb(mcmd);
2416 free_mcmd = false;
2417 } else
2418 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2419 mcmd->fc_tm_rsp);
2420 }
2421 /*
2422 * Make the callback for ->free_mcmd() to queue_work() and invoke
2423 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2424 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2425 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2426 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2427 * qlt_xmit_tm_rsp() returns here..
2428 */
2429 if (free_mcmd)
2430 ha->tgt.tgt_ops->free_mcmd(mcmd);
2431
2432 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2433 }
2434 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2435
2436 /* No locks */
qlt_pci_map_calc_cnt(struct qla_tgt_prm * prm)2437 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2438 {
2439 struct qla_tgt_cmd *cmd = prm->cmd;
2440
2441 BUG_ON(cmd->sg_cnt == 0);
2442
2443 prm->sg = (struct scatterlist *)cmd->sg;
2444 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2445 cmd->sg_cnt, cmd->dma_data_direction);
2446 if (unlikely(prm->seg_cnt == 0))
2447 goto out_err;
2448
2449 prm->cmd->sg_mapped = 1;
2450
2451 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2452 /*
2453 * If greater than four sg entries then we need to allocate
2454 * the continuation entries
2455 */
2456 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2457 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2458 QLA_TGT_DATASEGS_PER_CMD_24XX,
2459 QLA_TGT_DATASEGS_PER_CONT_24XX);
2460 } else {
2461 /* DIF */
2462 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2463 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2464 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2465 prm->tot_dsds = prm->seg_cnt;
2466 } else
2467 prm->tot_dsds = prm->seg_cnt;
2468
2469 if (cmd->prot_sg_cnt) {
2470 prm->prot_sg = cmd->prot_sg;
2471 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2472 cmd->prot_sg, cmd->prot_sg_cnt,
2473 cmd->dma_data_direction);
2474 if (unlikely(prm->prot_seg_cnt == 0))
2475 goto out_err;
2476
2477 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2478 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2479 /* Dif Bundling not support here */
2480 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2481 cmd->blk_sz);
2482 prm->tot_dsds += prm->prot_seg_cnt;
2483 } else
2484 prm->tot_dsds += prm->prot_seg_cnt;
2485 }
2486 }
2487
2488 return 0;
2489
2490 out_err:
2491 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2492 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2493 0, prm->cmd->sg_cnt);
2494 return -1;
2495 }
2496
qlt_unmap_sg(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)2497 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2498 {
2499 struct qla_hw_data *ha;
2500 struct qla_qpair *qpair;
2501
2502 if (!cmd->sg_mapped)
2503 return;
2504
2505 qpair = cmd->qpair;
2506
2507 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2508 cmd->dma_data_direction);
2509 cmd->sg_mapped = 0;
2510
2511 if (cmd->prot_sg_cnt)
2512 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2513 cmd->dma_data_direction);
2514
2515 if (!cmd->ctx)
2516 return;
2517 ha = vha->hw;
2518 if (cmd->ctx_dsd_alloced)
2519 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2520
2521 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2522 }
2523
qlt_check_reserve_free_req(struct qla_qpair * qpair,uint32_t req_cnt)2524 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2525 uint32_t req_cnt)
2526 {
2527 uint32_t cnt;
2528 struct req_que *req = qpair->req;
2529
2530 if (req->cnt < (req_cnt + 2)) {
2531 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2532 rd_reg_dword_relaxed(req->req_q_out));
2533
2534 if (req->ring_index < cnt)
2535 req->cnt = cnt - req->ring_index;
2536 else
2537 req->cnt = req->length - (req->ring_index - cnt);
2538
2539 if (unlikely(req->cnt < (req_cnt + 2)))
2540 return -EAGAIN;
2541 }
2542
2543 req->cnt -= req_cnt;
2544
2545 return 0;
2546 }
2547
2548 /*
2549 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2550 */
qlt_get_req_pkt(struct req_que * req)2551 static inline void *qlt_get_req_pkt(struct req_que *req)
2552 {
2553 /* Adjust ring index. */
2554 req->ring_index++;
2555 if (req->ring_index == req->length) {
2556 req->ring_index = 0;
2557 req->ring_ptr = req->ring;
2558 } else {
2559 req->ring_ptr++;
2560 }
2561 return (cont_entry_t *)req->ring_ptr;
2562 }
2563
2564 /* ha->hardware_lock supposed to be held on entry */
qlt_make_handle(struct qla_qpair * qpair)2565 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2566 {
2567 uint32_t h;
2568 int index;
2569 uint8_t found = 0;
2570 struct req_que *req = qpair->req;
2571
2572 h = req->current_outstanding_cmd;
2573
2574 for (index = 1; index < req->num_outstanding_cmds; index++) {
2575 h++;
2576 if (h == req->num_outstanding_cmds)
2577 h = 1;
2578
2579 if (h == QLA_TGT_SKIP_HANDLE)
2580 continue;
2581
2582 if (!req->outstanding_cmds[h]) {
2583 found = 1;
2584 break;
2585 }
2586 }
2587
2588 if (found) {
2589 req->current_outstanding_cmd = h;
2590 } else {
2591 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2592 "qla_target(%d): Ran out of empty cmd slots\n",
2593 qpair->vha->vp_idx);
2594 h = QLA_TGT_NULL_HANDLE;
2595 }
2596
2597 return h;
2598 }
2599
2600 /* ha->hardware_lock supposed to be held on entry */
qlt_24xx_build_ctio_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)2601 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2602 struct qla_tgt_prm *prm)
2603 {
2604 uint32_t h;
2605 struct ctio7_to_24xx *pkt;
2606 struct atio_from_isp *atio = &prm->cmd->atio;
2607 uint16_t temp;
2608 struct qla_tgt_cmd *cmd = prm->cmd;
2609
2610 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2611 prm->pkt = pkt;
2612 memset(pkt, 0, sizeof(*pkt));
2613
2614 pkt->entry_type = CTIO_TYPE7;
2615 pkt->entry_count = (uint8_t)prm->req_cnt;
2616 pkt->vp_index = prm->cmd->vp_idx;
2617
2618 h = qlt_make_handle(qpair);
2619 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2620 /*
2621 * CTIO type 7 from the firmware doesn't provide a way to
2622 * know the initiator's LOOP ID, hence we can't find
2623 * the session and, so, the command.
2624 */
2625 return -EAGAIN;
2626 } else
2627 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2628
2629 pkt->handle = make_handle(qpair->req->id, h);
2630 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2631 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2632 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2633 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2634 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2635 temp = atio->u.isp24.attr << 9;
2636 pkt->u.status0.flags |= cpu_to_le16(temp);
2637 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2638 pkt->u.status0.ox_id = cpu_to_le16(temp);
2639 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2640
2641 if (cmd->edif) {
2642 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2643 prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
2644 if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2645 prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
2646
2647 pkt->u.status0.edif_flags |= EF_EN_EDIF;
2648 }
2649
2650 return 0;
2651 }
2652
2653 /*
2654 * ha->hardware_lock supposed to be held on entry. We have already made sure
2655 * that there is sufficient amount of request entries to not drop it.
2656 */
qlt_load_cont_data_segments(struct qla_tgt_prm * prm)2657 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2658 {
2659 int cnt;
2660 struct dsd64 *cur_dsd;
2661
2662 /* Build continuation packets */
2663 while (prm->seg_cnt > 0) {
2664 cont_a64_entry_t *cont_pkt64 =
2665 (cont_a64_entry_t *)qlt_get_req_pkt(
2666 prm->cmd->qpair->req);
2667
2668 /*
2669 * Make sure that from cont_pkt64 none of
2670 * 64-bit specific fields used for 32-bit
2671 * addressing. Cast to (cont_entry_t *) for
2672 * that.
2673 */
2674
2675 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2676
2677 cont_pkt64->entry_count = 1;
2678 cont_pkt64->sys_define = 0;
2679
2680 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2681 cur_dsd = cont_pkt64->dsd;
2682
2683 /* Load continuation entry data segments */
2684 for (cnt = 0;
2685 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2686 cnt++, prm->seg_cnt--) {
2687 append_dsd64(&cur_dsd, prm->sg);
2688 prm->sg = sg_next(prm->sg);
2689 }
2690 }
2691 }
2692
2693 /*
2694 * ha->hardware_lock supposed to be held on entry. We have already made sure
2695 * that there is sufficient amount of request entries to not drop it.
2696 */
qlt_load_data_segments(struct qla_tgt_prm * prm)2697 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2698 {
2699 int cnt;
2700 struct dsd64 *cur_dsd;
2701 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2702
2703 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2704
2705 /* Setup packet address segment pointer */
2706 cur_dsd = &pkt24->u.status0.dsd;
2707
2708 /* Set total data segment count */
2709 if (prm->seg_cnt)
2710 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2711
2712 if (prm->seg_cnt == 0) {
2713 /* No data transfer */
2714 cur_dsd->address = 0;
2715 cur_dsd->length = 0;
2716 return;
2717 }
2718
2719 /* If scatter gather */
2720
2721 /* Load command entry data segments */
2722 for (cnt = 0;
2723 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2724 cnt++, prm->seg_cnt--) {
2725 append_dsd64(&cur_dsd, prm->sg);
2726 prm->sg = sg_next(prm->sg);
2727 }
2728
2729 qlt_load_cont_data_segments(prm);
2730 }
2731
qlt_has_data(struct qla_tgt_cmd * cmd)2732 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2733 {
2734 return cmd->bufflen > 0;
2735 }
2736
qlt_print_dif_err(struct qla_tgt_prm * prm)2737 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2738 {
2739 struct qla_tgt_cmd *cmd;
2740 struct scsi_qla_host *vha;
2741
2742 /* asc 0x10=dif error */
2743 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2744 cmd = prm->cmd;
2745 vha = cmd->vha;
2746 /* ASCQ */
2747 switch (prm->sense_buffer[13]) {
2748 case 1:
2749 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2750 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2751 "se_cmd=%p tag[%x]",
2752 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2753 cmd->atio.u.isp24.exchange_addr);
2754 break;
2755 case 2:
2756 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2757 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2758 "se_cmd=%p tag[%x]",
2759 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2760 cmd->atio.u.isp24.exchange_addr);
2761 break;
2762 case 3:
2763 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2764 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2765 "se_cmd=%p tag[%x]",
2766 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2767 cmd->atio.u.isp24.exchange_addr);
2768 break;
2769 default:
2770 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2771 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2772 "se_cmd=%p tag[%x]",
2773 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2774 cmd->atio.u.isp24.exchange_addr);
2775 break;
2776 }
2777 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2778 }
2779 }
2780
2781 /*
2782 * Called without ha->hardware_lock held
2783 */
qlt_pre_xmit_response(struct qla_tgt_cmd * cmd,struct qla_tgt_prm * prm,int xmit_type,uint8_t scsi_status,uint32_t * full_req_cnt)2784 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2785 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2786 uint32_t *full_req_cnt)
2787 {
2788 struct se_cmd *se_cmd = &cmd->se_cmd;
2789 struct qla_qpair *qpair = cmd->qpair;
2790
2791 prm->cmd = cmd;
2792 prm->tgt = cmd->tgt;
2793 prm->pkt = NULL;
2794 prm->rq_result = scsi_status;
2795 prm->sense_buffer = &cmd->sense_buffer[0];
2796 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2797 prm->sg = NULL;
2798 prm->seg_cnt = -1;
2799 prm->req_cnt = 1;
2800 prm->residual = 0;
2801 prm->add_status_pkt = 0;
2802 prm->prot_sg = NULL;
2803 prm->prot_seg_cnt = 0;
2804 prm->tot_dsds = 0;
2805
2806 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2807 if (qlt_pci_map_calc_cnt(prm) != 0)
2808 return -EAGAIN;
2809 }
2810
2811 *full_req_cnt = prm->req_cnt;
2812
2813 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2814 prm->residual = se_cmd->residual_count;
2815 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2816 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2817 prm->residual, se_cmd->tag,
2818 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2819 cmd->bufflen, prm->rq_result);
2820 prm->rq_result |= SS_RESIDUAL_UNDER;
2821 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2822 prm->residual = se_cmd->residual_count;
2823 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2824 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2825 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2826 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2827 prm->rq_result |= SS_RESIDUAL_OVER;
2828 }
2829
2830 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2831 /*
2832 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2833 * ignored in *xmit_response() below
2834 */
2835 if (qlt_has_data(cmd)) {
2836 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2837 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2838 (prm->rq_result != 0))) {
2839 prm->add_status_pkt = 1;
2840 (*full_req_cnt)++;
2841 }
2842 }
2843 }
2844
2845 return 0;
2846 }
2847
qlt_need_explicit_conf(struct qla_tgt_cmd * cmd,int sending_sense)2848 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2849 int sending_sense)
2850 {
2851 if (cmd->qpair->enable_class_2)
2852 return 0;
2853
2854 if (sending_sense)
2855 return cmd->conf_compl_supported;
2856 else
2857 return cmd->qpair->enable_explicit_conf &&
2858 cmd->conf_compl_supported;
2859 }
2860
qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx * ctio,struct qla_tgt_prm * prm)2861 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2862 struct qla_tgt_prm *prm)
2863 {
2864 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2865 (uint32_t)sizeof(ctio->u.status1.sense_data));
2866 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2867 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2868 ctio->u.status0.flags |= cpu_to_le16(
2869 CTIO7_FLAGS_EXPLICIT_CONFORM |
2870 CTIO7_FLAGS_CONFORM_REQ);
2871 }
2872 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2873 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2874 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2875 int i;
2876
2877 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2878 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2879 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2880 "Skipping EXPLICIT_CONFORM and "
2881 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2882 "non GOOD status\n");
2883 goto skip_explict_conf;
2884 }
2885 ctio->u.status1.flags |= cpu_to_le16(
2886 CTIO7_FLAGS_EXPLICIT_CONFORM |
2887 CTIO7_FLAGS_CONFORM_REQ);
2888 }
2889 skip_explict_conf:
2890 ctio->u.status1.flags &=
2891 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2892 ctio->u.status1.flags |=
2893 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2894 ctio->u.status1.scsi_status |=
2895 cpu_to_le16(SS_SENSE_LEN_VALID);
2896 ctio->u.status1.sense_length =
2897 cpu_to_le16(prm->sense_buffer_len);
2898 for (i = 0; i < prm->sense_buffer_len/4; i++) {
2899 uint32_t v;
2900
2901 v = get_unaligned_be32(
2902 &((uint32_t *)prm->sense_buffer)[i]);
2903 put_unaligned_le32(v,
2904 &((uint32_t *)ctio->u.status1.sense_data)[i]);
2905 }
2906 qlt_print_dif_err(prm);
2907
2908 } else {
2909 ctio->u.status1.flags &=
2910 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2911 ctio->u.status1.flags |=
2912 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2913 ctio->u.status1.sense_length = 0;
2914 memset(ctio->u.status1.sense_data, 0,
2915 sizeof(ctio->u.status1.sense_data));
2916 }
2917
2918 /* Sense with len > 24, is it possible ??? */
2919 }
2920
2921 static inline int
qlt_hba_err_chk_enabled(struct se_cmd * se_cmd)2922 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2923 {
2924 switch (se_cmd->prot_op) {
2925 case TARGET_PROT_DOUT_INSERT:
2926 case TARGET_PROT_DIN_STRIP:
2927 if (ql2xenablehba_err_chk >= 1)
2928 return 1;
2929 break;
2930 case TARGET_PROT_DOUT_PASS:
2931 case TARGET_PROT_DIN_PASS:
2932 if (ql2xenablehba_err_chk >= 2)
2933 return 1;
2934 break;
2935 case TARGET_PROT_DIN_INSERT:
2936 case TARGET_PROT_DOUT_STRIP:
2937 return 1;
2938 default:
2939 break;
2940 }
2941 return 0;
2942 }
2943
2944 static inline int
qla_tgt_ref_mask_check(struct se_cmd * se_cmd)2945 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2946 {
2947 switch (se_cmd->prot_op) {
2948 case TARGET_PROT_DIN_INSERT:
2949 case TARGET_PROT_DOUT_INSERT:
2950 case TARGET_PROT_DIN_STRIP:
2951 case TARGET_PROT_DOUT_STRIP:
2952 case TARGET_PROT_DIN_PASS:
2953 case TARGET_PROT_DOUT_PASS:
2954 return 1;
2955 default:
2956 return 0;
2957 }
2958 return 0;
2959 }
2960
2961 /*
2962 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2963 */
2964 static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd * cmd,struct crc_context * ctx,uint16_t * pfw_prot_opts)2965 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2966 uint16_t *pfw_prot_opts)
2967 {
2968 struct se_cmd *se_cmd = &cmd->se_cmd;
2969 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2970 scsi_qla_host_t *vha = cmd->tgt->vha;
2971 struct qla_hw_data *ha = vha->hw;
2972 uint32_t t32 = 0;
2973
2974 /*
2975 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2976 * have been immplemented by TCM, before AppTag is avail.
2977 * Look for modesense_handlers[]
2978 */
2979 ctx->app_tag = 0;
2980 ctx->app_tag_mask[0] = 0x0;
2981 ctx->app_tag_mask[1] = 0x0;
2982
2983 if (IS_PI_UNINIT_CAPABLE(ha)) {
2984 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2985 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2986 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2987 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2988 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2989 }
2990
2991 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2992
2993 switch (se_cmd->prot_type) {
2994 case TARGET_DIF_TYPE0_PROT:
2995 /*
2996 * No check for ql2xenablehba_err_chk, as it
2997 * would be an I/O error if hba tag generation
2998 * is not done.
2999 */
3000 ctx->ref_tag = cpu_to_le32(lba);
3001 /* enable ALL bytes of the ref tag */
3002 ctx->ref_tag_mask[0] = 0xff;
3003 ctx->ref_tag_mask[1] = 0xff;
3004 ctx->ref_tag_mask[2] = 0xff;
3005 ctx->ref_tag_mask[3] = 0xff;
3006 break;
3007 case TARGET_DIF_TYPE1_PROT:
3008 /*
3009 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
3010 * REF tag, and 16 bit app tag.
3011 */
3012 ctx->ref_tag = cpu_to_le32(lba);
3013 if (!qla_tgt_ref_mask_check(se_cmd) ||
3014 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3015 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3016 break;
3017 }
3018 /* enable ALL bytes of the ref tag */
3019 ctx->ref_tag_mask[0] = 0xff;
3020 ctx->ref_tag_mask[1] = 0xff;
3021 ctx->ref_tag_mask[2] = 0xff;
3022 ctx->ref_tag_mask[3] = 0xff;
3023 break;
3024 case TARGET_DIF_TYPE2_PROT:
3025 /*
3026 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
3027 * tag has to match LBA in CDB + N
3028 */
3029 ctx->ref_tag = cpu_to_le32(lba);
3030 if (!qla_tgt_ref_mask_check(se_cmd) ||
3031 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3032 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3033 break;
3034 }
3035 /* enable ALL bytes of the ref tag */
3036 ctx->ref_tag_mask[0] = 0xff;
3037 ctx->ref_tag_mask[1] = 0xff;
3038 ctx->ref_tag_mask[2] = 0xff;
3039 ctx->ref_tag_mask[3] = 0xff;
3040 break;
3041 case TARGET_DIF_TYPE3_PROT:
3042 /* For TYPE 3 protection: 16 bit GUARD only */
3043 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3044 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
3045 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
3046 break;
3047 }
3048 }
3049
3050 static inline int
qlt_build_ctio_crc2_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)3051 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3052 {
3053 struct dsd64 *cur_dsd;
3054 uint32_t transfer_length = 0;
3055 uint32_t data_bytes;
3056 uint32_t dif_bytes;
3057 uint8_t bundling = 1;
3058 struct crc_context *crc_ctx_pkt = NULL;
3059 struct qla_hw_data *ha;
3060 struct ctio_crc2_to_fw *pkt;
3061 dma_addr_t crc_ctx_dma;
3062 uint16_t fw_prot_opts = 0;
3063 struct qla_tgt_cmd *cmd = prm->cmd;
3064 struct se_cmd *se_cmd = &cmd->se_cmd;
3065 uint32_t h;
3066 struct atio_from_isp *atio = &prm->cmd->atio;
3067 struct qla_tc_param tc;
3068 uint16_t t16;
3069 scsi_qla_host_t *vha = cmd->vha;
3070
3071 ha = vha->hw;
3072
3073 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3074 prm->pkt = pkt;
3075 memset(pkt, 0, sizeof(*pkt));
3076
3077 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3078 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3079 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3080 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3081
3082 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3083 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3084 bundling = 0;
3085
3086 /* Compute dif len and adjust data len to incude protection */
3087 data_bytes = cmd->bufflen;
3088 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3089
3090 switch (se_cmd->prot_op) {
3091 case TARGET_PROT_DIN_INSERT:
3092 case TARGET_PROT_DOUT_STRIP:
3093 transfer_length = data_bytes;
3094 if (cmd->prot_sg_cnt)
3095 data_bytes += dif_bytes;
3096 break;
3097 case TARGET_PROT_DIN_STRIP:
3098 case TARGET_PROT_DOUT_INSERT:
3099 case TARGET_PROT_DIN_PASS:
3100 case TARGET_PROT_DOUT_PASS:
3101 transfer_length = data_bytes + dif_bytes;
3102 break;
3103 default:
3104 BUG();
3105 break;
3106 }
3107
3108 if (!qlt_hba_err_chk_enabled(se_cmd))
3109 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3110 /* HBA error checking enabled */
3111 else if (IS_PI_UNINIT_CAPABLE(ha)) {
3112 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3113 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3114 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3115 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3116 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3117 }
3118
3119 switch (se_cmd->prot_op) {
3120 case TARGET_PROT_DIN_INSERT:
3121 case TARGET_PROT_DOUT_INSERT:
3122 fw_prot_opts |= PO_MODE_DIF_INSERT;
3123 break;
3124 case TARGET_PROT_DIN_STRIP:
3125 case TARGET_PROT_DOUT_STRIP:
3126 fw_prot_opts |= PO_MODE_DIF_REMOVE;
3127 break;
3128 case TARGET_PROT_DIN_PASS:
3129 case TARGET_PROT_DOUT_PASS:
3130 fw_prot_opts |= PO_MODE_DIF_PASS;
3131 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3132 break;
3133 default:/* Normal Request */
3134 fw_prot_opts |= PO_MODE_DIF_PASS;
3135 break;
3136 }
3137
3138 /* ---- PKT ---- */
3139 /* Update entry type to indicate Command Type CRC_2 IOCB */
3140 pkt->entry_type = CTIO_CRC2;
3141 pkt->entry_count = 1;
3142 pkt->vp_index = cmd->vp_idx;
3143
3144 h = qlt_make_handle(qpair);
3145 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3146 /*
3147 * CTIO type 7 from the firmware doesn't provide a way to
3148 * know the initiator's LOOP ID, hence we can't find
3149 * the session and, so, the command.
3150 */
3151 return -EAGAIN;
3152 } else
3153 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3154
3155 pkt->handle = make_handle(qpair->req->id, h);
3156 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3157 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3158 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3159 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3160 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3161
3162 /* silence compile warning */
3163 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3164 pkt->ox_id = cpu_to_le16(t16);
3165
3166 t16 = (atio->u.isp24.attr << 9);
3167 pkt->flags |= cpu_to_le16(t16);
3168 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3169
3170 /* Set transfer direction */
3171 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3172 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3173 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3174 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3175
3176 pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
3177 /* Fibre channel byte count */
3178 pkt->transfer_length = cpu_to_le32(transfer_length);
3179
3180 /* ----- CRC context -------- */
3181
3182 /* Allocate CRC context from global pool */
3183 crc_ctx_pkt = cmd->ctx =
3184 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3185
3186 if (!crc_ctx_pkt)
3187 goto crc_queuing_error;
3188
3189 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3190 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3191
3192 /* Set handle */
3193 crc_ctx_pkt->handle = pkt->handle;
3194
3195 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3196
3197 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3198 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
3199
3200 if (!bundling) {
3201 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3202 } else {
3203 /*
3204 * Configure Bundling if we need to fetch interlaving
3205 * protection PCI accesses
3206 */
3207 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3208 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3209 crc_ctx_pkt->u.bundling.dseg_count =
3210 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3211 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3212 }
3213
3214 /* Finish the common fields of CRC pkt */
3215 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3216 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
3217 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3218 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3219
3220 memset((uint8_t *)&tc, 0 , sizeof(tc));
3221 tc.vha = vha;
3222 tc.blk_sz = cmd->blk_sz;
3223 tc.bufflen = cmd->bufflen;
3224 tc.sg = cmd->sg;
3225 tc.prot_sg = cmd->prot_sg;
3226 tc.ctx = crc_ctx_pkt;
3227 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3228
3229 /* Walks data segments */
3230 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3231
3232 if (!bundling && prm->prot_seg_cnt) {
3233 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3234 prm->tot_dsds, &tc))
3235 goto crc_queuing_error;
3236 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3237 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3238 goto crc_queuing_error;
3239
3240 if (bundling && prm->prot_seg_cnt) {
3241 /* Walks dif segments */
3242 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3243
3244 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3245 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3246 prm->prot_seg_cnt, cmd))
3247 goto crc_queuing_error;
3248 }
3249 return QLA_SUCCESS;
3250
3251 crc_queuing_error:
3252 /* Cleanup will be performed by the caller */
3253 qpair->req->outstanding_cmds[h] = NULL;
3254
3255 return QLA_FUNCTION_FAILED;
3256 }
3257
3258 /*
3259 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3260 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3261 */
qlt_xmit_response(struct qla_tgt_cmd * cmd,int xmit_type,uint8_t scsi_status)3262 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3263 uint8_t scsi_status)
3264 {
3265 struct scsi_qla_host *vha = cmd->vha;
3266 struct qla_qpair *qpair = cmd->qpair;
3267 struct ctio7_to_24xx *pkt;
3268 struct qla_tgt_prm prm;
3269 uint32_t full_req_cnt = 0;
3270 unsigned long flags = 0;
3271 int res;
3272
3273 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3274 (cmd->sess && cmd->sess->deleted)) {
3275 cmd->state = QLA_TGT_STATE_PROCESSED;
3276 return 0;
3277 }
3278
3279 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3280 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3281 (xmit_type & QLA_TGT_XMIT_STATUS) ?
3282 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3283 &cmd->se_cmd, qpair->id);
3284
3285 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3286 &full_req_cnt);
3287 if (unlikely(res != 0)) {
3288 return res;
3289 }
3290
3291 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3292
3293 if (xmit_type == QLA_TGT_XMIT_STATUS)
3294 qpair->tgt_counters.core_qla_snd_status++;
3295 else
3296 qpair->tgt_counters.core_qla_que_buf++;
3297
3298 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3299 /*
3300 * Either the port is not online or this request was from
3301 * previous life, just abort the processing.
3302 */
3303 cmd->state = QLA_TGT_STATE_PROCESSED;
3304 ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3305 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3306 vha->flags.online, qla2x00_reset_active(vha),
3307 cmd->reset_count, qpair->chip_reset);
3308 res = 0;
3309 goto out_unmap_unlock;
3310 }
3311
3312 /* Does F/W have an IOCBs for this request */
3313 res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3314 if (unlikely(res))
3315 goto out_unmap_unlock;
3316
3317 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3318 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3319 else
3320 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3321 if (unlikely(res != 0)) {
3322 qpair->req->cnt += full_req_cnt;
3323 goto out_unmap_unlock;
3324 }
3325
3326 pkt = (struct ctio7_to_24xx *)prm.pkt;
3327
3328 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3329 pkt->u.status0.flags |=
3330 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3331 CTIO7_FLAGS_STATUS_MODE_0);
3332
3333 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3334 qlt_load_data_segments(&prm);
3335
3336 if (prm.add_status_pkt == 0) {
3337 if (xmit_type & QLA_TGT_XMIT_STATUS) {
3338 pkt->u.status0.scsi_status =
3339 cpu_to_le16(prm.rq_result);
3340 if (!cmd->edif)
3341 pkt->u.status0.residual =
3342 cpu_to_le32(prm.residual);
3343
3344 pkt->u.status0.flags |= cpu_to_le16(
3345 CTIO7_FLAGS_SEND_STATUS);
3346 if (qlt_need_explicit_conf(cmd, 0)) {
3347 pkt->u.status0.flags |=
3348 cpu_to_le16(
3349 CTIO7_FLAGS_EXPLICIT_CONFORM |
3350 CTIO7_FLAGS_CONFORM_REQ);
3351 }
3352 }
3353
3354 } else {
3355 /*
3356 * We have already made sure that there is sufficient
3357 * amount of request entries to not drop HW lock in
3358 * req_pkt().
3359 */
3360 struct ctio7_to_24xx *ctio =
3361 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3362 qpair->req);
3363
3364 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3365 "Building additional status packet 0x%p.\n",
3366 ctio);
3367
3368 /*
3369 * T10Dif: ctio_crc2_to_fw overlay ontop of
3370 * ctio7_to_24xx
3371 */
3372 memcpy(ctio, pkt, sizeof(*ctio));
3373 /* reset back to CTIO7 */
3374 ctio->entry_count = 1;
3375 ctio->entry_type = CTIO_TYPE7;
3376 ctio->dseg_count = 0;
3377 ctio->u.status1.flags &= ~cpu_to_le16(
3378 CTIO7_FLAGS_DATA_IN);
3379
3380 /* Real finish is ctio_m1's finish */
3381 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3382 pkt->u.status0.flags |= cpu_to_le16(
3383 CTIO7_FLAGS_DONT_RET_CTIO);
3384
3385 /* qlt_24xx_init_ctio_to_isp will correct
3386 * all neccessary fields that's part of CTIO7.
3387 * There should be no residual of CTIO-CRC2 data.
3388 */
3389 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3390 &prm);
3391 }
3392 } else
3393 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3394
3395
3396 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3397 cmd->cmd_sent_to_fw = 1;
3398 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3399
3400 /* Memory Barrier */
3401 wmb();
3402 if (qpair->reqq_start_iocbs)
3403 qpair->reqq_start_iocbs(qpair);
3404 else
3405 qla2x00_start_iocbs(vha, qpair->req);
3406 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3407
3408 return 0;
3409
3410 out_unmap_unlock:
3411 qlt_unmap_sg(vha, cmd);
3412 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3413
3414 return res;
3415 }
3416 EXPORT_SYMBOL(qlt_xmit_response);
3417
qlt_rdy_to_xfer(struct qla_tgt_cmd * cmd)3418 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3419 {
3420 struct ctio7_to_24xx *pkt;
3421 struct scsi_qla_host *vha = cmd->vha;
3422 struct qla_tgt *tgt = cmd->tgt;
3423 struct qla_tgt_prm prm;
3424 unsigned long flags = 0;
3425 int res = 0;
3426 struct qla_qpair *qpair = cmd->qpair;
3427
3428 memset(&prm, 0, sizeof(prm));
3429 prm.cmd = cmd;
3430 prm.tgt = tgt;
3431 prm.sg = NULL;
3432 prm.req_cnt = 1;
3433
3434 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3435 (cmd->sess && cmd->sess->deleted)) {
3436 /*
3437 * Either the port is not online or this request was from
3438 * previous life, just abort the processing.
3439 */
3440 cmd->aborted = 1;
3441 cmd->write_data_transferred = 0;
3442 cmd->state = QLA_TGT_STATE_DATA_IN;
3443 vha->hw->tgt.tgt_ops->handle_data(cmd);
3444 ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3445 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3446 vha->flags.online, qla2x00_reset_active(vha),
3447 cmd->reset_count, qpair->chip_reset);
3448 return 0;
3449 }
3450
3451 /* Calculate number of entries and segments required */
3452 if (qlt_pci_map_calc_cnt(&prm) != 0)
3453 return -EAGAIN;
3454
3455 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3456 /* Does F/W have an IOCBs for this request */
3457 res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3458 if (res != 0)
3459 goto out_unlock_free_unmap;
3460 if (cmd->se_cmd.prot_op)
3461 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3462 else
3463 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3464
3465 if (unlikely(res != 0)) {
3466 qpair->req->cnt += prm.req_cnt;
3467 goto out_unlock_free_unmap;
3468 }
3469
3470 pkt = (struct ctio7_to_24xx *)prm.pkt;
3471 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3472 CTIO7_FLAGS_STATUS_MODE_0);
3473
3474 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3475 qlt_load_data_segments(&prm);
3476
3477 cmd->state = QLA_TGT_STATE_NEED_DATA;
3478 cmd->cmd_sent_to_fw = 1;
3479 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3480
3481 /* Memory Barrier */
3482 wmb();
3483 if (qpair->reqq_start_iocbs)
3484 qpair->reqq_start_iocbs(qpair);
3485 else
3486 qla2x00_start_iocbs(vha, qpair->req);
3487 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3488
3489 return res;
3490
3491 out_unlock_free_unmap:
3492 qlt_unmap_sg(vha, cmd);
3493 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3494
3495 return res;
3496 }
3497 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3498
3499
3500 /*
3501 * it is assumed either hardware_lock or qpair lock is held.
3502 */
3503 static void
qlt_handle_dif_error(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct ctio_crc_from_fw * sts)3504 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3505 struct ctio_crc_from_fw *sts)
3506 {
3507 uint8_t *ap = &sts->actual_dif[0];
3508 uint8_t *ep = &sts->expected_dif[0];
3509 uint64_t lba = cmd->se_cmd.t_task_lba;
3510 uint8_t scsi_status, sense_key, asc, ascq;
3511 unsigned long flags;
3512 struct scsi_qla_host *vha = cmd->vha;
3513
3514 cmd->trc_flags |= TRC_DIF_ERR;
3515
3516 cmd->a_guard = get_unaligned_be16(ap + 0);
3517 cmd->a_app_tag = get_unaligned_be16(ap + 2);
3518 cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3519
3520 cmd->e_guard = get_unaligned_be16(ep + 0);
3521 cmd->e_app_tag = get_unaligned_be16(ep + 2);
3522 cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3523
3524 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3525 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3526
3527 scsi_status = sense_key = asc = ascq = 0;
3528
3529 /* check appl tag */
3530 if (cmd->e_app_tag != cmd->a_app_tag) {
3531 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3532 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3533 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3534 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3535 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3536 cmd->atio.u.isp24.fcp_hdr.ox_id);
3537
3538 cmd->dif_err_code = DIF_ERR_APP;
3539 scsi_status = SAM_STAT_CHECK_CONDITION;
3540 sense_key = ABORTED_COMMAND;
3541 asc = 0x10;
3542 ascq = 0x2;
3543 }
3544
3545 /* check ref tag */
3546 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3547 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3548 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3549 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3550 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3551 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3552 cmd->atio.u.isp24.fcp_hdr.ox_id);
3553
3554 cmd->dif_err_code = DIF_ERR_REF;
3555 scsi_status = SAM_STAT_CHECK_CONDITION;
3556 sense_key = ABORTED_COMMAND;
3557 asc = 0x10;
3558 ascq = 0x3;
3559 goto out;
3560 }
3561
3562 /* check guard */
3563 if (cmd->e_guard != cmd->a_guard) {
3564 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3565 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3566 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3567 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3568 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3569 cmd->atio.u.isp24.fcp_hdr.ox_id);
3570
3571 cmd->dif_err_code = DIF_ERR_GRD;
3572 scsi_status = SAM_STAT_CHECK_CONDITION;
3573 sense_key = ABORTED_COMMAND;
3574 asc = 0x10;
3575 ascq = 0x1;
3576 }
3577 out:
3578 switch (cmd->state) {
3579 case QLA_TGT_STATE_NEED_DATA:
3580 /* handle_data will load DIF error code */
3581 cmd->state = QLA_TGT_STATE_DATA_IN;
3582 vha->hw->tgt.tgt_ops->handle_data(cmd);
3583 break;
3584 default:
3585 spin_lock_irqsave(&cmd->cmd_lock, flags);
3586 if (cmd->aborted) {
3587 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3588 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3589 break;
3590 }
3591 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3592
3593 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3594 ascq);
3595 /* assume scsi status gets out on the wire.
3596 * Will not wait for completion.
3597 */
3598 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3599 break;
3600 }
3601 }
3602
3603 /* If hardware_lock held on entry, might drop it, then reaquire */
3604 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
__qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * ntfy)3605 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3606 struct imm_ntfy_from_isp *ntfy)
3607 {
3608 struct nack_to_isp *nack;
3609 struct qla_hw_data *ha = vha->hw;
3610 request_t *pkt;
3611 int ret = 0;
3612
3613 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3614 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3615
3616 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3617 if (pkt == NULL) {
3618 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3619 "qla_target(%d): %s failed: unable to allocate "
3620 "request packet\n", vha->vp_idx, __func__);
3621 return -ENOMEM;
3622 }
3623
3624 pkt->entry_type = NOTIFY_ACK_TYPE;
3625 pkt->entry_count = 1;
3626 pkt->handle = QLA_TGT_SKIP_HANDLE;
3627
3628 nack = (struct nack_to_isp *)pkt;
3629 nack->ox_id = ntfy->ox_id;
3630
3631 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3632 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3633 nack->u.isp24.flags = ntfy->u.isp24.flags &
3634 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3635 }
3636
3637 /* terminate */
3638 nack->u.isp24.flags |=
3639 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3640
3641 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3642 nack->u.isp24.status = ntfy->u.isp24.status;
3643 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3644 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3645 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3646 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3647 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3648 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3649
3650 qla2x00_start_iocbs(vha, vha->req);
3651 return ret;
3652 }
3653
qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * imm,int ha_locked)3654 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3655 struct imm_ntfy_from_isp *imm, int ha_locked)
3656 {
3657 int rc;
3658
3659 WARN_ON_ONCE(!ha_locked);
3660 rc = __qlt_send_term_imm_notif(vha, imm);
3661 pr_debug("rc = %d\n", rc);
3662 }
3663
3664 /*
3665 * If hardware_lock held on entry, might drop it, then reaquire
3666 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3667 */
__qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio)3668 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3669 struct qla_tgt_cmd *cmd,
3670 struct atio_from_isp *atio)
3671 {
3672 struct scsi_qla_host *vha = qpair->vha;
3673 struct ctio7_to_24xx *ctio24;
3674 struct qla_hw_data *ha = vha->hw;
3675 request_t *pkt;
3676 int ret = 0;
3677 uint16_t temp;
3678
3679 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3680
3681 if (cmd)
3682 vha = cmd->vha;
3683
3684 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3685 if (pkt == NULL) {
3686 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3687 "qla_target(%d): %s failed: unable to allocate "
3688 "request packet\n", vha->vp_idx, __func__);
3689 return -ENOMEM;
3690 }
3691
3692 if (cmd != NULL) {
3693 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3694 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3695 "qla_target(%d): Terminating cmd %p with "
3696 "incorrect state %d\n", vha->vp_idx, cmd,
3697 cmd->state);
3698 } else
3699 ret = 1;
3700 }
3701
3702 qpair->tgt_counters.num_term_xchg_sent++;
3703 pkt->entry_count = 1;
3704 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3705
3706 ctio24 = (struct ctio7_to_24xx *)pkt;
3707 ctio24->entry_type = CTIO_TYPE7;
3708 ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
3709 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3710 ctio24->vp_index = vha->vp_idx;
3711 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3712 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3713 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3714 CTIO7_FLAGS_TERMINATE;
3715 ctio24->u.status1.flags = cpu_to_le16(temp);
3716 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3717 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3718
3719 /* Memory Barrier */
3720 wmb();
3721 if (qpair->reqq_start_iocbs)
3722 qpair->reqq_start_iocbs(qpair);
3723 else
3724 qla2x00_start_iocbs(vha, qpair->req);
3725 return ret;
3726 }
3727
qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio,int ha_locked,int ul_abort)3728 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3729 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3730 int ul_abort)
3731 {
3732 struct scsi_qla_host *vha;
3733 unsigned long flags = 0;
3734 int rc;
3735
3736 /* why use different vha? NPIV */
3737 if (cmd)
3738 vha = cmd->vha;
3739 else
3740 vha = qpair->vha;
3741
3742 if (ha_locked) {
3743 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3744 if (rc == -ENOMEM)
3745 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3746 goto done;
3747 }
3748 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3749 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3750 if (rc == -ENOMEM)
3751 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3752
3753 done:
3754 if (cmd && !ul_abort && !cmd->aborted) {
3755 if (cmd->sg_mapped)
3756 qlt_unmap_sg(vha, cmd);
3757 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3758 }
3759
3760 if (!ha_locked)
3761 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3762
3763 return;
3764 }
3765
qlt_init_term_exchange(struct scsi_qla_host * vha)3766 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3767 {
3768 struct list_head free_list;
3769 struct qla_tgt_cmd *cmd, *tcmd;
3770
3771 vha->hw->tgt.leak_exchg_thresh_hold =
3772 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3773
3774 cmd = tcmd = NULL;
3775 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3776 INIT_LIST_HEAD(&free_list);
3777 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3778
3779 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3780 list_del(&cmd->cmd_list);
3781 /* This cmd was never sent to TCM. There is no need
3782 * to schedule free or call free_cmd
3783 */
3784 qlt_free_cmd(cmd);
3785 vha->hw->tgt.num_qfull_cmds_alloc--;
3786 }
3787 }
3788 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3789 }
3790
qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host * vha)3791 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3792 {
3793 uint32_t total_leaked;
3794
3795 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3796
3797 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3798 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3799
3800 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3801 "Chip reset due to exchange starvation: %d/%d.\n",
3802 total_leaked, vha->hw->cur_fw_xcb_count);
3803
3804 if (IS_P3P_TYPE(vha->hw))
3805 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3806 else
3807 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3808 qla2xxx_wake_dpc(vha);
3809 }
3810
3811 }
3812
qlt_abort_cmd(struct qla_tgt_cmd * cmd)3813 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3814 {
3815 struct qla_tgt *tgt = cmd->tgt;
3816 struct scsi_qla_host *vha = tgt->vha;
3817 struct se_cmd *se_cmd = &cmd->se_cmd;
3818 unsigned long flags;
3819
3820 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3821 "qla_target(%d): terminating exchange for aborted cmd=%p "
3822 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3823 se_cmd->tag);
3824
3825 spin_lock_irqsave(&cmd->cmd_lock, flags);
3826 if (cmd->aborted) {
3827 if (cmd->sg_mapped)
3828 qlt_unmap_sg(vha, cmd);
3829
3830 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3831 /*
3832 * It's normal to see 2 calls in this path:
3833 * 1) XFER Rdy completion + CMD_T_ABORT
3834 * 2) TCM TMR - drain_state_list
3835 */
3836 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3837 "multiple abort. %p transport_state %x, t_state %x, "
3838 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3839 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3840 return -EIO;
3841 }
3842 cmd->aborted = 1;
3843 cmd->trc_flags |= TRC_ABORT;
3844 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3845
3846 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3847 return 0;
3848 }
3849 EXPORT_SYMBOL(qlt_abort_cmd);
3850
qlt_free_cmd(struct qla_tgt_cmd * cmd)3851 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3852 {
3853 struct fc_port *sess = cmd->sess;
3854
3855 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3856 "%s: se_cmd[%p] ox_id %04x\n",
3857 __func__, &cmd->se_cmd,
3858 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3859
3860 BUG_ON(cmd->cmd_in_wq);
3861
3862 if (!cmd->q_full)
3863 qlt_decr_num_pend_cmds(cmd->vha);
3864
3865 BUG_ON(cmd->sg_mapped);
3866 cmd->jiffies_at_free = get_jiffies_64();
3867
3868 if (!sess || !sess->se_sess) {
3869 WARN_ON(1);
3870 return;
3871 }
3872 cmd->jiffies_at_free = get_jiffies_64();
3873 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3874 }
3875 EXPORT_SYMBOL(qlt_free_cmd);
3876
3877 /*
3878 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3879 */
qlt_term_ctio_exchange(struct qla_qpair * qpair,void * ctio,struct qla_tgt_cmd * cmd,uint32_t status)3880 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3881 struct qla_tgt_cmd *cmd, uint32_t status)
3882 {
3883 int term = 0;
3884 struct scsi_qla_host *vha = qpair->vha;
3885
3886 if (cmd->se_cmd.prot_op)
3887 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3888 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3889 "se_cmd=%p tag[%x] op %#x/%s",
3890 cmd->lba, cmd->lba,
3891 cmd->num_blks, &cmd->se_cmd,
3892 cmd->atio.u.isp24.exchange_addr,
3893 cmd->se_cmd.prot_op,
3894 prot_op_str(cmd->se_cmd.prot_op));
3895
3896 if (ctio != NULL) {
3897 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3898
3899 term = !(c->flags &
3900 cpu_to_le16(OF_TERM_EXCH));
3901 } else
3902 term = 1;
3903
3904 if (term)
3905 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3906
3907 return term;
3908 }
3909
3910
3911 /* ha->hardware_lock supposed to be held on entry */
qlt_ctio_to_cmd(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,void * ctio)3912 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3913 struct rsp_que *rsp, uint32_t handle, void *ctio)
3914 {
3915 void *cmd = NULL;
3916 struct req_que *req;
3917 int qid = GET_QID(handle);
3918 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3919
3920 if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3921 return NULL;
3922
3923 if (qid == rsp->req->id) {
3924 req = rsp->req;
3925 } else if (vha->hw->req_q_map[qid]) {
3926 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3927 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3928 vha->vp_idx, rsp->id, handle);
3929 req = vha->hw->req_q_map[qid];
3930 } else {
3931 return NULL;
3932 }
3933
3934 h &= QLA_CMD_HANDLE_MASK;
3935
3936 if (h != QLA_TGT_NULL_HANDLE) {
3937 if (unlikely(h >= req->num_outstanding_cmds)) {
3938 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3939 "qla_target(%d): Wrong handle %x received\n",
3940 vha->vp_idx, handle);
3941 return NULL;
3942 }
3943
3944 cmd = req->outstanding_cmds[h];
3945 if (unlikely(cmd == NULL)) {
3946 ql_dbg(ql_dbg_async, vha, 0xe053,
3947 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3948 vha->vp_idx, handle, req->id, rsp->id);
3949 return NULL;
3950 }
3951 req->outstanding_cmds[h] = NULL;
3952 } else if (ctio != NULL) {
3953 /* We can't get loop ID from CTIO7 */
3954 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3955 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3956 "support NULL handles\n", vha->vp_idx);
3957 return NULL;
3958 }
3959
3960 return cmd;
3961 }
3962
3963 /*
3964 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3965 */
qlt_do_ctio_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,uint32_t status,void * ctio)3966 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3967 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3968 {
3969 struct qla_hw_data *ha = vha->hw;
3970 struct se_cmd *se_cmd;
3971 struct qla_tgt_cmd *cmd;
3972 struct qla_qpair *qpair = rsp->qpair;
3973
3974 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3975 /* That could happen only in case of an error/reset/abort */
3976 if (status != CTIO_SUCCESS) {
3977 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3978 "Intermediate CTIO received"
3979 " (status %x)\n", status);
3980 }
3981 return;
3982 }
3983
3984 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3985 if (cmd == NULL)
3986 return;
3987
3988 if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
3989 cmd->sess) {
3990 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
3991 (struct ctio7_from_24xx *)ctio);
3992 }
3993
3994 se_cmd = &cmd->se_cmd;
3995 cmd->cmd_sent_to_fw = 0;
3996
3997 qlt_unmap_sg(vha, cmd);
3998
3999 if (unlikely(status != CTIO_SUCCESS)) {
4000 switch (status & 0xFFFF) {
4001 case CTIO_INVALID_RX_ID:
4002 if (printk_ratelimit())
4003 dev_info(&vha->hw->pdev->dev,
4004 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
4005 vha->vp_idx, cmd->atio.u.isp24.attr,
4006 ((cmd->ctio_flags >> 9) & 0xf),
4007 cmd->ctio_flags);
4008
4009 break;
4010 case CTIO_LIP_RESET:
4011 case CTIO_TARGET_RESET:
4012 case CTIO_ABORTED:
4013 /* driver request abort via Terminate exchange */
4014 case CTIO_TIMEOUT:
4015 /* They are OK */
4016 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
4017 "qla_target(%d): CTIO with "
4018 "status %#x received, state %x, se_cmd %p, "
4019 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
4020 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
4021 status, cmd->state, se_cmd);
4022 break;
4023
4024 case CTIO_PORT_LOGGED_OUT:
4025 case CTIO_PORT_UNAVAILABLE:
4026 {
4027 int logged_out =
4028 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
4029
4030 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4031 "qla_target(%d): CTIO with %s status %x "
4032 "received (state %x, se_cmd %p)\n", vha->vp_idx,
4033 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
4034 status, cmd->state, se_cmd);
4035
4036 if (logged_out && cmd->sess) {
4037 /*
4038 * Session is already logged out, but we need
4039 * to notify initiator, who's not aware of this
4040 */
4041 cmd->sess->send_els_logo = 1;
4042 ql_dbg(ql_dbg_disc, vha, 0x20f8,
4043 "%s %d %8phC post del sess\n",
4044 __func__, __LINE__, cmd->sess->port_name);
4045
4046 qlt_schedule_sess_for_deletion(cmd->sess);
4047 }
4048 break;
4049 }
4050 case CTIO_DIF_ERROR: {
4051 struct ctio_crc_from_fw *crc =
4052 (struct ctio_crc_from_fw *)ctio;
4053 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4054 "qla_target(%d): CTIO with DIF_ERROR status %x "
4055 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
4056 "expect_dif[0x%llx]\n",
4057 vha->vp_idx, status, cmd->state, se_cmd,
4058 *((u64 *)&crc->actual_dif[0]),
4059 *((u64 *)&crc->expected_dif[0]));
4060
4061 qlt_handle_dif_error(qpair, cmd, ctio);
4062 return;
4063 }
4064
4065 case CTIO_FAST_AUTH_ERR:
4066 case CTIO_FAST_INCOMP_PAD_LEN:
4067 case CTIO_FAST_INVALID_REQ:
4068 case CTIO_FAST_SPI_ERR:
4069 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4070 "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
4071 vha->vp_idx, status, cmd->state, se_cmd);
4072 break;
4073
4074 default:
4075 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4076 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4077 vha->vp_idx, status, cmd->state, se_cmd);
4078 break;
4079 }
4080
4081
4082 /* "cmd->aborted" means
4083 * cmd is already aborted/terminated, we don't
4084 * need to terminate again. The exchange is already
4085 * cleaned up/freed at FW level. Just cleanup at driver
4086 * level.
4087 */
4088 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4089 (!cmd->aborted)) {
4090 cmd->trc_flags |= TRC_CTIO_ERR;
4091 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4092 return;
4093 }
4094 }
4095
4096 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4097 cmd->trc_flags |= TRC_CTIO_DONE;
4098 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4099 cmd->state = QLA_TGT_STATE_DATA_IN;
4100
4101 if (status == CTIO_SUCCESS)
4102 cmd->write_data_transferred = 1;
4103
4104 ha->tgt.tgt_ops->handle_data(cmd);
4105 return;
4106 } else if (cmd->aborted) {
4107 cmd->trc_flags |= TRC_CTIO_ABORTED;
4108 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4109 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4110 } else {
4111 cmd->trc_flags |= TRC_CTIO_STRANGE;
4112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4113 "qla_target(%d): A command in state (%d) should "
4114 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4115 }
4116
4117 if (unlikely(status != CTIO_SUCCESS) &&
4118 !cmd->aborted) {
4119 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4120 dump_stack();
4121 }
4122
4123 ha->tgt.tgt_ops->free_cmd(cmd);
4124 }
4125
qlt_get_fcp_task_attr(struct scsi_qla_host * vha,uint8_t task_codes)4126 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4127 uint8_t task_codes)
4128 {
4129 int fcp_task_attr;
4130
4131 switch (task_codes) {
4132 case ATIO_SIMPLE_QUEUE:
4133 fcp_task_attr = TCM_SIMPLE_TAG;
4134 break;
4135 case ATIO_HEAD_OF_QUEUE:
4136 fcp_task_attr = TCM_HEAD_TAG;
4137 break;
4138 case ATIO_ORDERED_QUEUE:
4139 fcp_task_attr = TCM_ORDERED_TAG;
4140 break;
4141 case ATIO_ACA_QUEUE:
4142 fcp_task_attr = TCM_ACA_TAG;
4143 break;
4144 case ATIO_UNTAGGED:
4145 fcp_task_attr = TCM_SIMPLE_TAG;
4146 break;
4147 default:
4148 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4149 "qla_target: unknown task code %x, use ORDERED instead\n",
4150 task_codes);
4151 fcp_task_attr = TCM_ORDERED_TAG;
4152 break;
4153 }
4154
4155 return fcp_task_attr;
4156 }
4157
4158 /*
4159 * Process context for I/O path into tcm_qla2xxx code
4160 */
__qlt_do_work(struct qla_tgt_cmd * cmd)4161 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4162 {
4163 scsi_qla_host_t *vha = cmd->vha;
4164 struct qla_hw_data *ha = vha->hw;
4165 struct fc_port *sess = cmd->sess;
4166 struct atio_from_isp *atio = &cmd->atio;
4167 unsigned char *cdb;
4168 unsigned long flags;
4169 uint32_t data_length;
4170 int ret, fcp_task_attr, data_dir, bidi = 0;
4171 struct qla_qpair *qpair = cmd->qpair;
4172
4173 cmd->cmd_in_wq = 0;
4174 cmd->trc_flags |= TRC_DO_WORK;
4175
4176 if (cmd->aborted) {
4177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4178 "cmd with tag %u is aborted\n",
4179 cmd->atio.u.isp24.exchange_addr);
4180 goto out_term;
4181 }
4182
4183 spin_lock_init(&cmd->cmd_lock);
4184 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4185 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4186
4187 if (atio->u.isp24.fcp_cmnd.rddata &&
4188 atio->u.isp24.fcp_cmnd.wrdata) {
4189 bidi = 1;
4190 data_dir = DMA_TO_DEVICE;
4191 } else if (atio->u.isp24.fcp_cmnd.rddata)
4192 data_dir = DMA_FROM_DEVICE;
4193 else if (atio->u.isp24.fcp_cmnd.wrdata)
4194 data_dir = DMA_TO_DEVICE;
4195 else
4196 data_dir = DMA_NONE;
4197
4198 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4199 atio->u.isp24.fcp_cmnd.task_attr);
4200 data_length = get_datalen_for_atio(atio);
4201
4202 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4203 fcp_task_attr, data_dir, bidi);
4204 if (ret != 0)
4205 goto out_term;
4206 /*
4207 * Drop extra session reference from qlt_handle_cmd_for_atio().
4208 */
4209 ha->tgt.tgt_ops->put_sess(sess);
4210 return;
4211
4212 out_term:
4213 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4214 /*
4215 * cmd has not sent to target yet, so pass NULL as the second
4216 * argument to qlt_send_term_exchange() and free the memory here.
4217 */
4218 cmd->trc_flags |= TRC_DO_WORK_ERR;
4219 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4220 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4221
4222 qlt_decr_num_pend_cmds(vha);
4223 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4224 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4225
4226 ha->tgt.tgt_ops->put_sess(sess);
4227 }
4228
qlt_do_work(struct work_struct * work)4229 static void qlt_do_work(struct work_struct *work)
4230 {
4231 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4232 scsi_qla_host_t *vha = cmd->vha;
4233 unsigned long flags;
4234
4235 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4236 list_del(&cmd->cmd_list);
4237 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4238
4239 __qlt_do_work(cmd);
4240 }
4241
qlt_clr_qp_table(struct scsi_qla_host * vha)4242 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4243 {
4244 unsigned long flags;
4245 struct qla_hw_data *ha = vha->hw;
4246 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4247 void *node;
4248 u64 key = 0;
4249
4250 ql_log(ql_log_info, vha, 0x706c,
4251 "User update Number of Active Qpairs %d\n",
4252 ha->tgt.num_act_qpairs);
4253
4254 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4255
4256 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4257 btree_remove64(&tgt->lun_qpair_map, key);
4258
4259 ha->base_qpair->lun_cnt = 0;
4260 for (key = 0; key < ha->max_qpairs; key++)
4261 if (ha->queue_pair_map[key])
4262 ha->queue_pair_map[key]->lun_cnt = 0;
4263
4264 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4265 }
4266
qlt_assign_qpair(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)4267 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4268 struct qla_tgt_cmd *cmd)
4269 {
4270 struct qla_qpair *qpair, *qp;
4271 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4272 struct qla_qpair_hint *h;
4273
4274 if (vha->flags.qpairs_available) {
4275 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4276 if (unlikely(!h)) {
4277 /* spread lun to qpair ratio evently */
4278 int lcnt = 0, rc;
4279 struct scsi_qla_host *base_vha =
4280 pci_get_drvdata(vha->hw->pdev);
4281
4282 qpair = vha->hw->base_qpair;
4283 if (qpair->lun_cnt == 0) {
4284 qpair->lun_cnt++;
4285 h = qla_qpair_to_hint(tgt, qpair);
4286 BUG_ON(!h);
4287 rc = btree_insert64(&tgt->lun_qpair_map,
4288 cmd->unpacked_lun, h, GFP_ATOMIC);
4289 if (rc) {
4290 qpair->lun_cnt--;
4291 ql_log(ql_log_info, vha, 0xd037,
4292 "Unable to insert lun %llx into lun_qpair_map\n",
4293 cmd->unpacked_lun);
4294 }
4295 goto out;
4296 } else {
4297 lcnt = qpair->lun_cnt;
4298 }
4299
4300 h = NULL;
4301 list_for_each_entry(qp, &base_vha->qp_list,
4302 qp_list_elem) {
4303 if (qp->lun_cnt == 0) {
4304 qp->lun_cnt++;
4305 h = qla_qpair_to_hint(tgt, qp);
4306 BUG_ON(!h);
4307 rc = btree_insert64(&tgt->lun_qpair_map,
4308 cmd->unpacked_lun, h, GFP_ATOMIC);
4309 if (rc) {
4310 qp->lun_cnt--;
4311 ql_log(ql_log_info, vha, 0xd038,
4312 "Unable to insert lun %llx into lun_qpair_map\n",
4313 cmd->unpacked_lun);
4314 }
4315 qpair = qp;
4316 goto out;
4317 } else {
4318 if (qp->lun_cnt < lcnt) {
4319 lcnt = qp->lun_cnt;
4320 qpair = qp;
4321 continue;
4322 }
4323 }
4324 }
4325 BUG_ON(!qpair);
4326 qpair->lun_cnt++;
4327 h = qla_qpair_to_hint(tgt, qpair);
4328 BUG_ON(!h);
4329 rc = btree_insert64(&tgt->lun_qpair_map,
4330 cmd->unpacked_lun, h, GFP_ATOMIC);
4331 if (rc) {
4332 qpair->lun_cnt--;
4333 ql_log(ql_log_info, vha, 0xd039,
4334 "Unable to insert lun %llx into lun_qpair_map\n",
4335 cmd->unpacked_lun);
4336 }
4337 }
4338 } else {
4339 h = &tgt->qphints[0];
4340 }
4341 out:
4342 cmd->qpair = h->qpair;
4343 cmd->se_cmd.cpuid = h->cpuid;
4344 }
4345
qlt_get_tag(scsi_qla_host_t * vha,struct fc_port * sess,struct atio_from_isp * atio)4346 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4347 struct fc_port *sess,
4348 struct atio_from_isp *atio)
4349 {
4350 struct qla_tgt_cmd *cmd;
4351
4352 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4353 if (!cmd)
4354 return NULL;
4355
4356 cmd->cmd_type = TYPE_TGT_CMD;
4357 memcpy(&cmd->atio, atio, sizeof(*atio));
4358 INIT_LIST_HEAD(&cmd->sess_cmd_list);
4359 cmd->state = QLA_TGT_STATE_NEW;
4360 cmd->tgt = vha->vha_tgt.qla_tgt;
4361 qlt_incr_num_pend_cmds(vha);
4362 cmd->vha = vha;
4363 cmd->sess = sess;
4364 cmd->loop_id = sess->loop_id;
4365 cmd->conf_compl_supported = sess->conf_compl_supported;
4366
4367 cmd->trc_flags = 0;
4368 cmd->jiffies_at_alloc = get_jiffies_64();
4369
4370 cmd->unpacked_lun = scsilun_to_int(
4371 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4372 qlt_assign_qpair(vha, cmd);
4373 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4374 cmd->vp_idx = vha->vp_idx;
4375 cmd->edif = sess->edif.enable;
4376
4377 return cmd;
4378 }
4379
4380 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_cmd_for_atio(struct scsi_qla_host * vha,struct atio_from_isp * atio)4381 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4382 struct atio_from_isp *atio)
4383 {
4384 struct qla_hw_data *ha = vha->hw;
4385 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4386 struct fc_port *sess;
4387 struct qla_tgt_cmd *cmd;
4388 unsigned long flags;
4389 port_id_t id;
4390
4391 if (unlikely(tgt->tgt_stop)) {
4392 ql_dbg(ql_dbg_io, vha, 0x3061,
4393 "New command while device %p is shutting down\n", tgt);
4394 return -ENODEV;
4395 }
4396
4397 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4398 if (IS_SW_RESV_ADDR(id))
4399 return -EBUSY;
4400
4401 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4402 if (unlikely(!sess))
4403 return -EFAULT;
4404
4405 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4406 * session deletion, but it's still in sess_del_work wq */
4407 if (sess->deleted) {
4408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4409 "New command while old session %p is being deleted\n",
4410 sess);
4411 return -EFAULT;
4412 }
4413
4414 /*
4415 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4416 */
4417 if (!kref_get_unless_zero(&sess->sess_kref)) {
4418 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4419 "%s: kref_get fail, %8phC oxid %x \n",
4420 __func__, sess->port_name,
4421 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4422 return -EFAULT;
4423 }
4424
4425 cmd = qlt_get_tag(vha, sess, atio);
4426 if (!cmd) {
4427 ql_dbg(ql_dbg_io, vha, 0x3062,
4428 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4429 ha->tgt.tgt_ops->put_sess(sess);
4430 return -EBUSY;
4431 }
4432
4433 cmd->cmd_in_wq = 1;
4434 cmd->trc_flags |= TRC_NEW_CMD;
4435
4436 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4437 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4438 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4439
4440 INIT_WORK(&cmd->work, qlt_do_work);
4441 if (vha->flags.qpairs_available) {
4442 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4443 } else if (ha->msix_count) {
4444 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4445 queue_work_on(smp_processor_id(), qla_tgt_wq,
4446 &cmd->work);
4447 else
4448 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4449 &cmd->work);
4450 } else {
4451 queue_work(qla_tgt_wq, &cmd->work);
4452 }
4453
4454 return 0;
4455 }
4456
4457 /* ha->hardware_lock supposed to be held on entry */
qlt_issue_task_mgmt(struct fc_port * sess,u64 lun,int fn,void * iocb,int flags)4458 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4459 int fn, void *iocb, int flags)
4460 {
4461 struct scsi_qla_host *vha = sess->vha;
4462 struct qla_hw_data *ha = vha->hw;
4463 struct qla_tgt_mgmt_cmd *mcmd;
4464 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4465 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4466
4467 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4468 if (!mcmd) {
4469 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4470 "qla_target(%d): Allocation of management "
4471 "command failed, some commands and their data could "
4472 "leak\n", vha->vp_idx);
4473 return -ENOMEM;
4474 }
4475 memset(mcmd, 0, sizeof(*mcmd));
4476 mcmd->sess = sess;
4477
4478 if (iocb) {
4479 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4480 sizeof(mcmd->orig_iocb.imm_ntfy));
4481 }
4482 mcmd->tmr_func = fn;
4483 mcmd->flags = flags;
4484 mcmd->reset_count = ha->base_qpair->chip_reset;
4485 mcmd->qpair = h->qpair;
4486 mcmd->vha = vha;
4487 mcmd->se_cmd.cpuid = h->cpuid;
4488 mcmd->unpacked_lun = lun;
4489
4490 switch (fn) {
4491 case QLA_TGT_LUN_RESET:
4492 case QLA_TGT_CLEAR_TS:
4493 case QLA_TGT_ABORT_TS:
4494 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4495 fallthrough;
4496 case QLA_TGT_CLEAR_ACA:
4497 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4498 mcmd->qpair = h->qpair;
4499 mcmd->se_cmd.cpuid = h->cpuid;
4500 break;
4501
4502 case QLA_TGT_TARGET_RESET:
4503 case QLA_TGT_NEXUS_LOSS_SESS:
4504 case QLA_TGT_NEXUS_LOSS:
4505 case QLA_TGT_ABORT_ALL:
4506 default:
4507 /* no-op */
4508 break;
4509 }
4510
4511 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4512 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4513 &mcmd->work);
4514
4515 return 0;
4516 }
4517
4518 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_task_mgmt(struct scsi_qla_host * vha,void * iocb)4519 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4520 {
4521 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4522 struct qla_hw_data *ha = vha->hw;
4523 struct fc_port *sess;
4524 u64 unpacked_lun;
4525 int fn;
4526 unsigned long flags;
4527
4528 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4529
4530 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4531 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4532 a->u.isp24.fcp_hdr.s_id);
4533 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4534
4535 unpacked_lun =
4536 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4537
4538 if (sess == NULL || sess->deleted)
4539 return -EFAULT;
4540
4541 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4542 }
4543
4544 /* ha->hardware_lock supposed to be held on entry */
__qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb,struct fc_port * sess)4545 static int __qlt_abort_task(struct scsi_qla_host *vha,
4546 struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4547 {
4548 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4549 struct qla_hw_data *ha = vha->hw;
4550 struct qla_tgt_mgmt_cmd *mcmd;
4551 u64 unpacked_lun;
4552 int rc;
4553
4554 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4555 if (mcmd == NULL) {
4556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4557 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4558 vha->vp_idx, __func__);
4559 return -ENOMEM;
4560 }
4561 memset(mcmd, 0, sizeof(*mcmd));
4562
4563 mcmd->sess = sess;
4564 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4565 sizeof(mcmd->orig_iocb.imm_ntfy));
4566
4567 unpacked_lun =
4568 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4569 mcmd->reset_count = ha->base_qpair->chip_reset;
4570 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4571 mcmd->qpair = ha->base_qpair;
4572
4573 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4574 le16_to_cpu(iocb->u.isp2x.seq_id));
4575 if (rc != 0) {
4576 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4577 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4578 vha->vp_idx, rc);
4579 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4580 return -EFAULT;
4581 }
4582
4583 return 0;
4584 }
4585
4586 /* ha->hardware_lock supposed to be held on entry */
qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4587 static int qlt_abort_task(struct scsi_qla_host *vha,
4588 struct imm_ntfy_from_isp *iocb)
4589 {
4590 struct qla_hw_data *ha = vha->hw;
4591 struct fc_port *sess;
4592 int loop_id;
4593 unsigned long flags;
4594
4595 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4596
4597 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4598 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4599 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4600
4601 if (sess == NULL) {
4602 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4603 "qla_target(%d): task abort for unexisting "
4604 "session\n", vha->vp_idx);
4605 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4606 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4607 }
4608
4609 return __qlt_abort_task(vha, iocb, sess);
4610 }
4611
qlt_logo_completion_handler(fc_port_t * fcport,int rc)4612 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4613 {
4614 if (rc != MBS_COMMAND_COMPLETE) {
4615 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4616 "%s: se_sess %p / sess %p from"
4617 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4618 " LOGO failed: %#x\n",
4619 __func__,
4620 fcport->se_sess,
4621 fcport,
4622 fcport->port_name, fcport->loop_id,
4623 fcport->d_id.b.domain, fcport->d_id.b.area,
4624 fcport->d_id.b.al_pa, rc);
4625 }
4626
4627 fcport->logout_completed = 1;
4628 }
4629
4630 /*
4631 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4632 *
4633 * Schedules sessions with matching port_id/loop_id but different wwn for
4634 * deletion. Returns existing session with matching wwn if present.
4635 * Null otherwise.
4636 */
4637 struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t * vha,uint64_t wwn,port_id_t port_id,uint16_t loop_id,struct fc_port ** conflict_sess)4638 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4639 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4640 {
4641 struct fc_port *sess = NULL, *other_sess;
4642 uint64_t other_wwn;
4643
4644 *conflict_sess = NULL;
4645
4646 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4647
4648 other_wwn = wwn_to_u64(other_sess->port_name);
4649
4650 if (wwn == other_wwn) {
4651 WARN_ON(sess);
4652 sess = other_sess;
4653 continue;
4654 }
4655
4656 /* find other sess with nport_id collision */
4657 if (port_id.b24 == other_sess->d_id.b24) {
4658 if (loop_id != other_sess->loop_id) {
4659 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4660 "Invalidating sess %p loop_id %d wwn %llx.\n",
4661 other_sess, other_sess->loop_id, other_wwn);
4662
4663 /*
4664 * logout_on_delete is set by default, but another
4665 * session that has the same s_id/loop_id combo
4666 * might have cleared it when requested this session
4667 * deletion, so don't touch it
4668 */
4669 qlt_schedule_sess_for_deletion(other_sess);
4670 } else {
4671 /*
4672 * Another wwn used to have our s_id/loop_id
4673 * kill the session, but don't free the loop_id
4674 */
4675 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4676 "Invalidating sess %p loop_id %d wwn %llx.\n",
4677 other_sess, other_sess->loop_id, other_wwn);
4678
4679 other_sess->keep_nport_handle = 1;
4680 if (other_sess->disc_state != DSC_DELETED)
4681 *conflict_sess = other_sess;
4682 qlt_schedule_sess_for_deletion(other_sess);
4683 }
4684 continue;
4685 }
4686
4687 /* find other sess with nport handle collision */
4688 if ((loop_id == other_sess->loop_id) &&
4689 (loop_id != FC_NO_LOOP_ID)) {
4690 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4691 "Invalidating sess %p loop_id %d wwn %llx.\n",
4692 other_sess, other_sess->loop_id, other_wwn);
4693
4694 /* Same loop_id but different s_id
4695 * Ok to kill and logout */
4696 qlt_schedule_sess_for_deletion(other_sess);
4697 }
4698 }
4699
4700 return sess;
4701 }
4702
4703 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
abort_cmds_for_s_id(struct scsi_qla_host * vha,port_id_t * s_id)4704 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4705 {
4706 struct qla_tgt_sess_op *op;
4707 struct qla_tgt_cmd *cmd;
4708 uint32_t key;
4709 int count = 0;
4710 unsigned long flags;
4711
4712 key = (((u32)s_id->b.domain << 16) |
4713 ((u32)s_id->b.area << 8) |
4714 ((u32)s_id->b.al_pa));
4715
4716 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4717 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4718 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4719
4720 if (op_key == key) {
4721 op->aborted = true;
4722 count++;
4723 }
4724 }
4725
4726 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4727 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4728
4729 if (cmd_key == key) {
4730 cmd->aborted = 1;
4731 count++;
4732 }
4733 }
4734 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4735
4736 return count;
4737 }
4738
qlt_handle_login(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4739 static int qlt_handle_login(struct scsi_qla_host *vha,
4740 struct imm_ntfy_from_isp *iocb)
4741 {
4742 struct fc_port *sess = NULL, *conflict_sess = NULL;
4743 uint64_t wwn;
4744 port_id_t port_id;
4745 uint16_t loop_id, wd3_lo;
4746 int res = 0;
4747 struct qlt_plogi_ack_t *pla;
4748 unsigned long flags;
4749
4750 lockdep_assert_held(&vha->hw->hardware_lock);
4751
4752 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4753
4754 port_id.b.domain = iocb->u.isp24.port_id[2];
4755 port_id.b.area = iocb->u.isp24.port_id[1];
4756 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4757 port_id.b.rsvd_1 = 0;
4758
4759 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4760
4761 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4762 abort_cmds_for_s_id(vha, &port_id);
4763
4764 if (wwn) {
4765 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4766 sess = qlt_find_sess_invalidate_other(vha, wwn,
4767 port_id, loop_id, &conflict_sess);
4768 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4769 } else {
4770 ql_dbg(ql_dbg_disc, vha, 0xffff,
4771 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4772 __func__, __LINE__, loop_id, port_id.b24);
4773 qlt_send_term_imm_notif(vha, iocb, 1);
4774 goto out;
4775 }
4776
4777 if (IS_SW_RESV_ADDR(port_id)) {
4778 res = 1;
4779 goto out;
4780 }
4781
4782 if (vha->hw->flags.edif_enabled &&
4783 !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
4784 iocb->u.isp24.status_subcode == ELS_PLOGI &&
4785 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
4786 ql_dbg(ql_dbg_disc, vha, 0xffff,
4787 "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
4788 __func__, __LINE__, loop_id, port_id.b24);
4789 qlt_send_term_imm_notif(vha, iocb, 1);
4790 goto out;
4791 }
4792
4793 if (vha->hw->flags.edif_enabled) {
4794 if (DBELL_INACTIVE(vha)) {
4795 ql_dbg(ql_dbg_disc, vha, 0xffff,
4796 "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
4797 __func__, __LINE__, loop_id, port_id.b24);
4798 qlt_send_term_imm_notif(vha, iocb, 1);
4799 goto out;
4800 } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
4801 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
4802 ql_dbg(ql_dbg_disc, vha, 0xffff,
4803 "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
4804 __func__, __LINE__, loop_id, port_id.b24);
4805 qlt_send_term_imm_notif(vha, iocb, 1);
4806 goto out;
4807 }
4808 }
4809
4810 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4811 if (!pla) {
4812 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4813 "%s %d %8phC Term INOT due to mem alloc fail",
4814 __func__, __LINE__,
4815 iocb->u.isp24.port_name);
4816 qlt_send_term_imm_notif(vha, iocb, 1);
4817 goto out;
4818 }
4819
4820 if (conflict_sess) {
4821 conflict_sess->login_gen++;
4822 qlt_plogi_ack_link(vha, pla, conflict_sess,
4823 QLT_PLOGI_LINK_CONFLICT);
4824 }
4825
4826 if (!sess) {
4827 pla->ref_count++;
4828 ql_dbg(ql_dbg_disc, vha, 0xffff,
4829 "%s %d %8phC post new sess\n",
4830 __func__, __LINE__, iocb->u.isp24.port_name);
4831 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4832 qla24xx_post_newsess_work(vha, &port_id,
4833 iocb->u.isp24.port_name,
4834 iocb->u.isp24.u.plogi.node_name,
4835 pla, 0);
4836 else
4837 qla24xx_post_newsess_work(vha, &port_id,
4838 iocb->u.isp24.port_name, NULL,
4839 pla, 0);
4840
4841 goto out;
4842 }
4843
4844 if (sess->disc_state == DSC_UPD_FCPORT) {
4845 u16 sec;
4846
4847 /*
4848 * Remote port registration is still going on from
4849 * previous login. Allow it to finish before we
4850 * accept the new login.
4851 */
4852 sess->next_disc_state = DSC_DELETE_PEND;
4853 sec = jiffies_to_msecs(jiffies -
4854 sess->jiffies_at_registration) / 1000;
4855 if (sess->sec_since_registration < sec && sec &&
4856 !(sec % 5)) {
4857 sess->sec_since_registration = sec;
4858 ql_dbg(ql_dbg_disc, vha, 0xffff,
4859 "%s %8phC - Slow Rport registration (%d Sec)\n",
4860 __func__, sess->port_name, sec);
4861 }
4862
4863 if (!conflict_sess) {
4864 list_del(&pla->list);
4865 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4866 }
4867
4868 qlt_send_term_imm_notif(vha, iocb, 1);
4869 goto out;
4870 }
4871
4872 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4873 sess->d_id = port_id;
4874 sess->login_gen++;
4875 sess->loop_id = loop_id;
4876
4877 if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
4878 /* remote port has assigned Port ID */
4879 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
4880 vha->d_id = sess->d_id;
4881
4882 ql_dbg(ql_dbg_disc, vha, 0xffff,
4883 "%s %8phC - send port online\n",
4884 __func__, sess->port_name);
4885
4886 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
4887 sess->d_id.b24);
4888 }
4889
4890 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4891 sess->fw_login_state = DSC_LS_PRLI_PEND;
4892 sess->local = 0;
4893 sess->loop_id = loop_id;
4894 sess->d_id = port_id;
4895 sess->fw_login_state = DSC_LS_PRLI_PEND;
4896 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4897
4898 if (wd3_lo & BIT_7)
4899 sess->conf_compl_supported = 1;
4900
4901 if ((wd3_lo & BIT_4) == 0)
4902 sess->port_type = FCT_INITIATOR;
4903 else
4904 sess->port_type = FCT_TARGET;
4905
4906 } else
4907 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4908
4909
4910 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4911 "%s %d %8phC DS %d\n",
4912 __func__, __LINE__, sess->port_name, sess->disc_state);
4913
4914 switch (sess->disc_state) {
4915 case DSC_DELETED:
4916 case DSC_LOGIN_PEND:
4917 qlt_plogi_ack_unref(vha, pla);
4918 break;
4919
4920 default:
4921 /*
4922 * Under normal circumstances we want to release nport handle
4923 * during LOGO process to avoid nport handle leaks inside FW.
4924 * The exception is when LOGO is done while another PLOGI with
4925 * the same nport handle is waiting as might be the case here.
4926 * Note: there is always a possibily of a race where session
4927 * deletion has already started for other reasons (e.g. ACL
4928 * removal) and now PLOGI arrives:
4929 * 1. if PLOGI arrived in FW after nport handle has been freed,
4930 * FW must have assigned this PLOGI a new/same handle and we
4931 * can proceed ACK'ing it as usual when session deletion
4932 * completes.
4933 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4934 * bit reached it, the handle has now been released. We'll
4935 * get an error when we ACK this PLOGI. Nothing will be sent
4936 * back to initiator. Initiator should eventually retry
4937 * PLOGI and situation will correct itself.
4938 */
4939 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4940 (sess->d_id.b24 == port_id.b24));
4941
4942 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4943 "%s %d %8phC post del sess\n",
4944 __func__, __LINE__, sess->port_name);
4945
4946
4947 qlt_schedule_sess_for_deletion(sess);
4948 break;
4949 }
4950 out:
4951 return res;
4952 }
4953
4954 /*
4955 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4956 */
qlt_24xx_handle_els(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4957 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4958 struct imm_ntfy_from_isp *iocb)
4959 {
4960 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4961 struct qla_hw_data *ha = vha->hw;
4962 struct fc_port *sess = NULL, *conflict_sess = NULL;
4963 uint64_t wwn;
4964 port_id_t port_id;
4965 uint16_t loop_id;
4966 uint16_t wd3_lo;
4967 int res = 0;
4968 unsigned long flags;
4969
4970 lockdep_assert_held(&ha->hardware_lock);
4971
4972 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4973
4974 port_id.b.domain = iocb->u.isp24.port_id[2];
4975 port_id.b.area = iocb->u.isp24.port_id[1];
4976 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4977 port_id.b.rsvd_1 = 0;
4978
4979 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4980
4981 ql_dbg(ql_dbg_disc, vha, 0xf026,
4982 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4983 vha->vp_idx, iocb->u.isp24.port_id[2],
4984 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4985 iocb->u.isp24.status_subcode, loop_id,
4986 iocb->u.isp24.port_name);
4987
4988 /* res = 1 means ack at the end of thread
4989 * res = 0 means ack async/later.
4990 */
4991 switch (iocb->u.isp24.status_subcode) {
4992 case ELS_PLOGI:
4993 res = qlt_handle_login(vha, iocb);
4994 break;
4995
4996 case ELS_PRLI:
4997 if (N2N_TOPO(ha)) {
4998 sess = qla2x00_find_fcport_by_wwpn(vha,
4999 iocb->u.isp24.port_name, 1);
5000
5001 if (vha->hw->flags.edif_enabled && sess &&
5002 (!(sess->flags & FCF_FCSP_DEVICE) ||
5003 !sess->edif.authok)) {
5004 ql_dbg(ql_dbg_disc, vha, 0xffff,
5005 "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
5006 __func__, __LINE__, iocb->u.isp24.port_name);
5007 qlt_send_term_imm_notif(vha, iocb, 1);
5008 break;
5009 }
5010
5011 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
5012 ql_dbg(ql_dbg_disc, vha, 0xffff,
5013 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
5014 __func__, __LINE__,
5015 iocb->u.isp24.port_name);
5016 qlt_send_term_imm_notif(vha, iocb, 1);
5017 break;
5018 }
5019
5020 res = qlt_handle_login(vha, iocb);
5021 break;
5022 }
5023
5024 if (IS_SW_RESV_ADDR(port_id)) {
5025 res = 1;
5026 break;
5027 }
5028
5029 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
5030
5031 if (wwn) {
5032 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5033 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
5034 loop_id, &conflict_sess);
5035 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5036 }
5037
5038 if (conflict_sess) {
5039 switch (conflict_sess->disc_state) {
5040 case DSC_DELETED:
5041 case DSC_DELETE_PEND:
5042 break;
5043 default:
5044 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
5045 "PRLI with conflicting sess %p port %8phC\n",
5046 conflict_sess, conflict_sess->port_name);
5047 conflict_sess->fw_login_state =
5048 DSC_LS_PORT_UNAVAIL;
5049 qlt_send_term_imm_notif(vha, iocb, 1);
5050 res = 0;
5051 break;
5052 }
5053 }
5054
5055 if (sess != NULL) {
5056 bool delete = false;
5057 int sec;
5058
5059 if (vha->hw->flags.edif_enabled && sess &&
5060 (!(sess->flags & FCF_FCSP_DEVICE) ||
5061 !sess->edif.authok)) {
5062 ql_dbg(ql_dbg_disc, vha, 0xffff,
5063 "%s %d %8phC Term PRLI due to unauthorize prli\n",
5064 __func__, __LINE__, iocb->u.isp24.port_name);
5065 qlt_send_term_imm_notif(vha, iocb, 1);
5066 break;
5067 }
5068
5069 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5070 switch (sess->fw_login_state) {
5071 case DSC_LS_PLOGI_PEND:
5072 case DSC_LS_PLOGI_COMP:
5073 case DSC_LS_PRLI_COMP:
5074 break;
5075 default:
5076 delete = true;
5077 break;
5078 }
5079
5080 switch (sess->disc_state) {
5081 case DSC_UPD_FCPORT:
5082 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5083 flags);
5084
5085 sec = jiffies_to_msecs(jiffies -
5086 sess->jiffies_at_registration)/1000;
5087 if (sess->sec_since_registration < sec && sec &&
5088 !(sec % 5)) {
5089 sess->sec_since_registration = sec;
5090 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
5091 "%s %8phC : Slow Rport registration(%d Sec)\n",
5092 __func__, sess->port_name, sec);
5093 }
5094 qlt_send_term_imm_notif(vha, iocb, 1);
5095 return 0;
5096
5097 case DSC_LOGIN_PEND:
5098 case DSC_GPDB:
5099 case DSC_LOGIN_COMPLETE:
5100 case DSC_ADISC:
5101 delete = false;
5102 break;
5103 default:
5104 break;
5105 }
5106
5107 if (delete) {
5108 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5109 flags);
5110 /*
5111 * Impatient initiator sent PRLI before last
5112 * PLOGI could finish. Will force him to re-try,
5113 * while last one finishes.
5114 */
5115 ql_log(ql_log_warn, sess->vha, 0xf095,
5116 "sess %p PRLI received, before plogi ack.\n",
5117 sess);
5118 qlt_send_term_imm_notif(vha, iocb, 1);
5119 res = 0;
5120 break;
5121 }
5122
5123 /*
5124 * This shouldn't happen under normal circumstances,
5125 * since we have deleted the old session during PLOGI
5126 */
5127 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5128 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5129 sess->loop_id, sess, iocb->u.isp24.nport_handle);
5130
5131 sess->local = 0;
5132 sess->loop_id = loop_id;
5133 sess->d_id = port_id;
5134 sess->fw_login_state = DSC_LS_PRLI_PEND;
5135
5136 if (wd3_lo & BIT_7)
5137 sess->conf_compl_supported = 1;
5138
5139 if ((wd3_lo & BIT_4) == 0)
5140 sess->port_type = FCT_INITIATOR;
5141 else
5142 sess->port_type = FCT_TARGET;
5143
5144 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5145 }
5146 res = 1; /* send notify ack */
5147
5148 /* Make session global (not used in fabric mode) */
5149 if (ha->current_topology != ISP_CFG_F) {
5150 if (sess) {
5151 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5152 "%s %d %8phC post nack\n",
5153 __func__, __LINE__, sess->port_name);
5154 qla24xx_post_nack_work(vha, sess, iocb,
5155 SRB_NACK_PRLI);
5156 res = 0;
5157 } else {
5158 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5159 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5160 qla2xxx_wake_dpc(vha);
5161 }
5162 } else {
5163 if (sess) {
5164 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5165 "%s %d %8phC post nack\n",
5166 __func__, __LINE__, sess->port_name);
5167 qla24xx_post_nack_work(vha, sess, iocb,
5168 SRB_NACK_PRLI);
5169 res = 0;
5170 }
5171 }
5172 break;
5173
5174 case ELS_TPRLO:
5175 if (le16_to_cpu(iocb->u.isp24.flags) &
5176 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5177 loop_id = 0xFFFF;
5178 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5179 res = 1;
5180 break;
5181 }
5182 fallthrough;
5183 case ELS_LOGO:
5184 case ELS_PRLO:
5185 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5186 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5187 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5188
5189 if (sess) {
5190 sess->login_gen++;
5191 sess->fw_login_state = DSC_LS_LOGO_PEND;
5192 sess->logo_ack_needed = 1;
5193 memcpy(sess->iocb, iocb, IOCB_SIZE);
5194 }
5195
5196 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5197
5198 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5199 "%s: logo %llx res %d sess %p ",
5200 __func__, wwn, res, sess);
5201 if (res == 0) {
5202 /*
5203 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5204 * for LOGO_ACK & sess delete
5205 */
5206 BUG_ON(!sess);
5207 res = 0;
5208 } else {
5209 /* cmd did not go to upper layer. */
5210 if (sess) {
5211 qlt_schedule_sess_for_deletion(sess);
5212 res = 0;
5213 }
5214 /* else logo will be ack */
5215 }
5216 break;
5217 case ELS_PDISC:
5218 case ELS_ADISC:
5219 {
5220 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5221
5222 if (tgt->link_reinit_iocb_pending) {
5223 qlt_send_notify_ack(ha->base_qpair,
5224 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5225 tgt->link_reinit_iocb_pending = 0;
5226 }
5227
5228 sess = qla2x00_find_fcport_by_wwpn(vha,
5229 iocb->u.isp24.port_name, 1);
5230 if (sess) {
5231 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5232 "sess %p lid %d|%d DS %d LS %d\n",
5233 sess, sess->loop_id, loop_id,
5234 sess->disc_state, sess->fw_login_state);
5235 }
5236
5237 res = 1; /* send notify ack */
5238 break;
5239 }
5240
5241 case ELS_FLOGI: /* should never happen */
5242 default:
5243 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5244 "qla_target(%d): Unsupported ELS command %x "
5245 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5246 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5247 break;
5248 }
5249
5250 ql_dbg(ql_dbg_disc, vha, 0xf026,
5251 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5252 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5253
5254 return res;
5255 }
5256
5257 /*
5258 * ha->hardware_lock supposed to be held on entry.
5259 * Might drop it, then reacquire.
5260 */
qlt_handle_imm_notify(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)5261 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5262 struct imm_ntfy_from_isp *iocb)
5263 {
5264 struct qla_hw_data *ha = vha->hw;
5265 uint32_t add_flags = 0;
5266 int send_notify_ack = 1;
5267 uint16_t status;
5268
5269 lockdep_assert_held(&ha->hardware_lock);
5270
5271 status = le16_to_cpu(iocb->u.isp2x.status);
5272 switch (status) {
5273 case IMM_NTFY_LIP_RESET:
5274 {
5275 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5276 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5277 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5278 iocb->u.isp24.status_subcode);
5279
5280 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5281 send_notify_ack = 0;
5282 break;
5283 }
5284
5285 case IMM_NTFY_LIP_LINK_REINIT:
5286 {
5287 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5288
5289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5290 "qla_target(%d): LINK REINIT (loop %#x, "
5291 "subcode %x)\n", vha->vp_idx,
5292 le16_to_cpu(iocb->u.isp24.nport_handle),
5293 iocb->u.isp24.status_subcode);
5294 if (tgt->link_reinit_iocb_pending) {
5295 qlt_send_notify_ack(ha->base_qpair,
5296 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5297 }
5298 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5299 tgt->link_reinit_iocb_pending = 1;
5300 /*
5301 * QLogic requires to wait after LINK REINIT for possible
5302 * PDISC or ADISC ELS commands
5303 */
5304 send_notify_ack = 0;
5305 break;
5306 }
5307
5308 case IMM_NTFY_PORT_LOGOUT:
5309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5310 "qla_target(%d): Port logout (loop "
5311 "%#x, subcode %x)\n", vha->vp_idx,
5312 le16_to_cpu(iocb->u.isp24.nport_handle),
5313 iocb->u.isp24.status_subcode);
5314
5315 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5316 send_notify_ack = 0;
5317 /* The sessions will be cleared in the callback, if needed */
5318 break;
5319
5320 case IMM_NTFY_GLBL_TPRLO:
5321 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5322 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5323 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5324 send_notify_ack = 0;
5325 /* The sessions will be cleared in the callback, if needed */
5326 break;
5327
5328 case IMM_NTFY_PORT_CONFIG:
5329 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5330 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5331 status);
5332 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5333 send_notify_ack = 0;
5334 /* The sessions will be cleared in the callback, if needed */
5335 break;
5336
5337 case IMM_NTFY_GLBL_LOGO:
5338 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5339 "qla_target(%d): Link failure detected\n",
5340 vha->vp_idx);
5341 /* I_T nexus loss */
5342 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5343 send_notify_ack = 0;
5344 break;
5345
5346 case IMM_NTFY_IOCB_OVERFLOW:
5347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5348 "qla_target(%d): Cannot provide requested "
5349 "capability (IOCB overflowed the immediate notify "
5350 "resource count)\n", vha->vp_idx);
5351 break;
5352
5353 case IMM_NTFY_ABORT_TASK:
5354 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5355 "qla_target(%d): Abort Task (S %08x I %#x -> "
5356 "L %#x)\n", vha->vp_idx,
5357 le16_to_cpu(iocb->u.isp2x.seq_id),
5358 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5359 le16_to_cpu(iocb->u.isp2x.lun));
5360 if (qlt_abort_task(vha, iocb) == 0)
5361 send_notify_ack = 0;
5362 break;
5363
5364 case IMM_NTFY_RESOURCE:
5365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5366 "qla_target(%d): Out of resources, host %ld\n",
5367 vha->vp_idx, vha->host_no);
5368 break;
5369
5370 case IMM_NTFY_MSG_RX:
5371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5372 "qla_target(%d): Immediate notify task %x\n",
5373 vha->vp_idx, iocb->u.isp2x.task_flags);
5374 break;
5375
5376 case IMM_NTFY_ELS:
5377 if (qlt_24xx_handle_els(vha, iocb) == 0)
5378 send_notify_ack = 0;
5379 break;
5380 default:
5381 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5382 "qla_target(%d): Received unknown immediate "
5383 "notify status %x\n", vha->vp_idx, status);
5384 break;
5385 }
5386
5387 if (send_notify_ack)
5388 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5389 0, 0);
5390 }
5391
5392 /*
5393 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5394 * This function sends busy to ISP 2xxx or 24xx.
5395 */
__qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)5396 static int __qlt_send_busy(struct qla_qpair *qpair,
5397 struct atio_from_isp *atio, uint16_t status)
5398 {
5399 struct scsi_qla_host *vha = qpair->vha;
5400 struct ctio7_to_24xx *ctio24;
5401 struct qla_hw_data *ha = vha->hw;
5402 request_t *pkt;
5403 struct fc_port *sess = NULL;
5404 unsigned long flags;
5405 u16 temp;
5406 port_id_t id;
5407
5408 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5409
5410 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5411 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5412 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5413 if (!sess) {
5414 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5415 return 0;
5416 }
5417 /* Sending marker isn't necessary, since we called from ISR */
5418
5419 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5420 if (!pkt) {
5421 ql_dbg(ql_dbg_io, vha, 0x3063,
5422 "qla_target(%d): %s failed: unable to allocate "
5423 "request packet", vha->vp_idx, __func__);
5424 return -ENOMEM;
5425 }
5426
5427 qpair->tgt_counters.num_q_full_sent++;
5428 pkt->entry_count = 1;
5429 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5430
5431 ctio24 = (struct ctio7_to_24xx *)pkt;
5432 ctio24->entry_type = CTIO_TYPE7;
5433 ctio24->nport_handle = cpu_to_le16(sess->loop_id);
5434 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5435 ctio24->vp_index = vha->vp_idx;
5436 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5437 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5438 temp = (atio->u.isp24.attr << 9) |
5439 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5440 CTIO7_FLAGS_DONT_RET_CTIO;
5441 ctio24->u.status1.flags = cpu_to_le16(temp);
5442 /*
5443 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5444 * if the explicit conformation is used.
5445 */
5446 ctio24->u.status1.ox_id =
5447 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
5448 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5449
5450 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
5451
5452 if (ctio24->u.status1.residual != 0)
5453 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
5454
5455 /* Memory Barrier */
5456 wmb();
5457 if (qpair->reqq_start_iocbs)
5458 qpair->reqq_start_iocbs(qpair);
5459 else
5460 qla2x00_start_iocbs(vha, qpair->req);
5461 return 0;
5462 }
5463
5464 /*
5465 * This routine is used to allocate a command for either a QFull condition
5466 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5467 * out previously.
5468 */
5469 static void
qlt_alloc_qfull_cmd(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint16_t status,int qfull)5470 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5471 struct atio_from_isp *atio, uint16_t status, int qfull)
5472 {
5473 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5474 struct qla_hw_data *ha = vha->hw;
5475 struct fc_port *sess;
5476 struct qla_tgt_cmd *cmd;
5477 unsigned long flags;
5478
5479 if (unlikely(tgt->tgt_stop)) {
5480 ql_dbg(ql_dbg_io, vha, 0x300a,
5481 "New command while device %p is shutting down\n", tgt);
5482 return;
5483 }
5484
5485 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5486 vha->hw->tgt.num_qfull_cmds_dropped++;
5487 if (vha->hw->tgt.num_qfull_cmds_dropped >
5488 vha->qla_stats.stat_max_qfull_cmds_dropped)
5489 vha->qla_stats.stat_max_qfull_cmds_dropped =
5490 vha->hw->tgt.num_qfull_cmds_dropped;
5491
5492 ql_dbg(ql_dbg_io, vha, 0x3068,
5493 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5494 vha->vp_idx, __func__,
5495 vha->hw->tgt.num_qfull_cmds_dropped);
5496
5497 qlt_chk_exch_leak_thresh_hold(vha);
5498 return;
5499 }
5500
5501 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5502 (vha, atio->u.isp24.fcp_hdr.s_id);
5503 if (!sess)
5504 return;
5505
5506 cmd = ha->tgt.tgt_ops->get_cmd(sess);
5507 if (!cmd) {
5508 ql_dbg(ql_dbg_io, vha, 0x3009,
5509 "qla_target(%d): %s: Allocation of cmd failed\n",
5510 vha->vp_idx, __func__);
5511
5512 vha->hw->tgt.num_qfull_cmds_dropped++;
5513 if (vha->hw->tgt.num_qfull_cmds_dropped >
5514 vha->qla_stats.stat_max_qfull_cmds_dropped)
5515 vha->qla_stats.stat_max_qfull_cmds_dropped =
5516 vha->hw->tgt.num_qfull_cmds_dropped;
5517
5518 qlt_chk_exch_leak_thresh_hold(vha);
5519 return;
5520 }
5521
5522 qlt_incr_num_pend_cmds(vha);
5523 INIT_LIST_HEAD(&cmd->cmd_list);
5524 memcpy(&cmd->atio, atio, sizeof(*atio));
5525
5526 cmd->tgt = vha->vha_tgt.qla_tgt;
5527 cmd->vha = vha;
5528 cmd->reset_count = ha->base_qpair->chip_reset;
5529 cmd->q_full = 1;
5530 cmd->qpair = ha->base_qpair;
5531
5532 if (qfull) {
5533 cmd->q_full = 1;
5534 /* NOTE: borrowing the state field to carry the status */
5535 cmd->state = status;
5536 } else
5537 cmd->term_exchg = 1;
5538
5539 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5540 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5541
5542 vha->hw->tgt.num_qfull_cmds_alloc++;
5543 if (vha->hw->tgt.num_qfull_cmds_alloc >
5544 vha->qla_stats.stat_max_qfull_cmds_alloc)
5545 vha->qla_stats.stat_max_qfull_cmds_alloc =
5546 vha->hw->tgt.num_qfull_cmds_alloc;
5547 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5548 }
5549
5550 int
qlt_free_qfull_cmds(struct qla_qpair * qpair)5551 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5552 {
5553 struct scsi_qla_host *vha = qpair->vha;
5554 struct qla_hw_data *ha = vha->hw;
5555 unsigned long flags;
5556 struct qla_tgt_cmd *cmd, *tcmd;
5557 struct list_head free_list, q_full_list;
5558 int rc = 0;
5559
5560 if (list_empty(&ha->tgt.q_full_list))
5561 return 0;
5562
5563 INIT_LIST_HEAD(&free_list);
5564 INIT_LIST_HEAD(&q_full_list);
5565
5566 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5567 if (list_empty(&ha->tgt.q_full_list)) {
5568 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5569 return 0;
5570 }
5571
5572 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5573 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5574
5575 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5576 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5577 if (cmd->q_full)
5578 /* cmd->state is a borrowed field to hold status */
5579 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5580 else if (cmd->term_exchg)
5581 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5582
5583 if (rc == -ENOMEM)
5584 break;
5585
5586 if (cmd->q_full)
5587 ql_dbg(ql_dbg_io, vha, 0x3006,
5588 "%s: busy sent for ox_id[%04x]\n", __func__,
5589 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5590 else if (cmd->term_exchg)
5591 ql_dbg(ql_dbg_io, vha, 0x3007,
5592 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5593 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5594 else
5595 ql_dbg(ql_dbg_io, vha, 0x3008,
5596 "%s: Unexpected cmd in QFull list %p\n", __func__,
5597 cmd);
5598
5599 list_move_tail(&cmd->cmd_list, &free_list);
5600
5601 /* piggy back on hardware_lock for protection */
5602 vha->hw->tgt.num_qfull_cmds_alloc--;
5603 }
5604 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5605
5606 cmd = NULL;
5607
5608 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5609 list_del(&cmd->cmd_list);
5610 /* This cmd was never sent to TCM. There is no need
5611 * to schedule free or call free_cmd
5612 */
5613 qlt_free_cmd(cmd);
5614 }
5615
5616 if (!list_empty(&q_full_list)) {
5617 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5618 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5619 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5620 }
5621
5622 return rc;
5623 }
5624
5625 static void
qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)5626 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5627 uint16_t status)
5628 {
5629 int rc = 0;
5630 struct scsi_qla_host *vha = qpair->vha;
5631
5632 rc = __qlt_send_busy(qpair, atio, status);
5633 if (rc == -ENOMEM)
5634 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5635 }
5636
5637 static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct atio_from_isp * atio,uint8_t ha_locked)5638 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5639 struct atio_from_isp *atio, uint8_t ha_locked)
5640 {
5641 struct qla_hw_data *ha = vha->hw;
5642 unsigned long flags;
5643
5644 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5645 return 0;
5646
5647 if (!ha_locked)
5648 spin_lock_irqsave(&ha->hardware_lock, flags);
5649 qlt_send_busy(qpair, atio, qla_sam_status);
5650 if (!ha_locked)
5651 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5652
5653 return 1;
5654 }
5655
5656 /* ha->hardware_lock supposed to be held on entry */
5657 /* called via callback from qla2xxx */
qlt_24xx_atio_pkt(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)5658 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5659 struct atio_from_isp *atio, uint8_t ha_locked)
5660 {
5661 struct qla_hw_data *ha = vha->hw;
5662 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5663 int rc;
5664 unsigned long flags = 0;
5665
5666 if (unlikely(tgt == NULL)) {
5667 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5668 "ATIO pkt, but no tgt (ha %p)", ha);
5669 return;
5670 }
5671 /*
5672 * In tgt_stop mode we also should allow all requests to pass.
5673 * Otherwise, some commands can stuck.
5674 */
5675
5676 tgt->atio_irq_cmd_count++;
5677
5678 switch (atio->u.raw.entry_type) {
5679 case ATIO_TYPE7:
5680 if (unlikely(atio->u.isp24.exchange_addr ==
5681 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
5682 ql_dbg(ql_dbg_io, vha, 0x3065,
5683 "qla_target(%d): ATIO_TYPE7 "
5684 "received with UNKNOWN exchange address, "
5685 "sending QUEUE_FULL\n", vha->vp_idx);
5686 if (!ha_locked)
5687 spin_lock_irqsave(&ha->hardware_lock, flags);
5688 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5689 if (!ha_locked)
5690 spin_unlock_irqrestore(&ha->hardware_lock,
5691 flags);
5692 break;
5693 }
5694
5695 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5696 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5697 atio, ha_locked);
5698 if (rc != 0) {
5699 tgt->atio_irq_cmd_count--;
5700 return;
5701 }
5702 rc = qlt_handle_cmd_for_atio(vha, atio);
5703 } else {
5704 rc = qlt_handle_task_mgmt(vha, atio);
5705 }
5706 if (unlikely(rc != 0)) {
5707 if (!ha_locked)
5708 spin_lock_irqsave(&ha->hardware_lock, flags);
5709 switch (rc) {
5710 case -ENODEV:
5711 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5712 "qla_target: Unable to send command to target\n");
5713 break;
5714 case -EBADF:
5715 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5716 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5717 qlt_send_term_exchange(ha->base_qpair, NULL,
5718 atio, 1, 0);
5719 break;
5720 case -EBUSY:
5721 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5722 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5723 vha->vp_idx);
5724 qlt_send_busy(ha->base_qpair, atio,
5725 tc_sam_status);
5726 break;
5727 default:
5728 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5729 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5730 vha->vp_idx);
5731 qlt_send_busy(ha->base_qpair, atio,
5732 qla_sam_status);
5733 break;
5734 }
5735 if (!ha_locked)
5736 spin_unlock_irqrestore(&ha->hardware_lock,
5737 flags);
5738 }
5739 break;
5740
5741 case IMMED_NOTIFY_TYPE:
5742 {
5743 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5744 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5745 "qla_target(%d): Received ATIO packet %x "
5746 "with error status %x\n", vha->vp_idx,
5747 atio->u.raw.entry_type,
5748 atio->u.isp2x.entry_status);
5749 break;
5750 }
5751 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5752
5753 if (!ha_locked)
5754 spin_lock_irqsave(&ha->hardware_lock, flags);
5755 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5756 if (!ha_locked)
5757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5758 break;
5759 }
5760
5761 default:
5762 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5763 "qla_target(%d): Received unknown ATIO atio "
5764 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5765 break;
5766 }
5767
5768 tgt->atio_irq_cmd_count--;
5769 }
5770
5771 /*
5772 * qpair lock is assume to be held
5773 * rc = 0 : send terminate & abts respond
5774 * rc != 0: do not send term & abts respond
5775 */
qlt_chk_unresolv_exchg(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct abts_resp_from_24xx_fw * entry)5776 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5777 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5778 {
5779 struct qla_hw_data *ha = vha->hw;
5780 int rc = 0;
5781
5782 /*
5783 * Detect unresolved exchange. If the same ABTS is unable
5784 * to terminate an existing command and the same ABTS loops
5785 * between FW & Driver, then force FW dump. Under 1 jiff,
5786 * we should see multiple loops.
5787 */
5788 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5789 qpair->retry_term_jiff == jiffies) {
5790 /* found existing exchange */
5791 qpair->retry_term_cnt++;
5792 if (qpair->retry_term_cnt >= 5) {
5793 rc = -EIO;
5794 qpair->retry_term_cnt = 0;
5795 ql_log(ql_log_warn, vha, 0xffff,
5796 "Unable to send ABTS Respond. Dumping firmware.\n");
5797 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5798 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5799
5800 if (qpair == ha->base_qpair)
5801 ha->isp_ops->fw_dump(vha);
5802 else
5803 qla2xxx_dump_fw(vha);
5804
5805 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5806 qla2xxx_wake_dpc(vha);
5807 }
5808 } else if (qpair->retry_term_jiff != jiffies) {
5809 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5810 qpair->retry_term_cnt = 0;
5811 qpair->retry_term_jiff = jiffies;
5812 }
5813
5814 return rc;
5815 }
5816
5817
qlt_handle_abts_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)5818 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5819 struct rsp_que *rsp, response_t *pkt)
5820 {
5821 struct abts_resp_from_24xx_fw *entry =
5822 (struct abts_resp_from_24xx_fw *)pkt;
5823 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5824 struct qla_tgt_mgmt_cmd *mcmd;
5825 struct qla_hw_data *ha = vha->hw;
5826
5827 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5828 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5829 ql_dbg(ql_dbg_async, vha, 0xe064,
5830 "qla_target(%d): ABTS Comp without mcmd\n",
5831 vha->vp_idx);
5832 return;
5833 }
5834
5835 if (mcmd)
5836 vha = mcmd->vha;
5837 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5838
5839 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5840 "ABTS_RESP_24XX: compl_status %x\n",
5841 entry->compl_status);
5842
5843 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5844 if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
5845 le32_to_cpu(entry->error_subcode2) == 0) {
5846 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5847 ha->tgt.tgt_ops->free_mcmd(mcmd);
5848 return;
5849 }
5850 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5851 pkt, mcmd);
5852 } else {
5853 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5854 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5855 vha->vp_idx, entry->compl_status,
5856 entry->error_subcode1,
5857 entry->error_subcode2);
5858 ha->tgt.tgt_ops->free_mcmd(mcmd);
5859 }
5860 } else if (mcmd) {
5861 ha->tgt.tgt_ops->free_mcmd(mcmd);
5862 }
5863 }
5864
5865 /* ha->hardware_lock supposed to be held on entry */
5866 /* called via callback from qla2xxx */
qlt_response_pkt(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)5867 static void qlt_response_pkt(struct scsi_qla_host *vha,
5868 struct rsp_que *rsp, response_t *pkt)
5869 {
5870 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5871
5872 if (unlikely(tgt == NULL)) {
5873 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5874 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5875 vha->vp_idx, pkt->entry_type, vha->hw);
5876 return;
5877 }
5878
5879 /*
5880 * In tgt_stop mode we also should allow all requests to pass.
5881 * Otherwise, some commands can stuck.
5882 */
5883
5884 switch (pkt->entry_type) {
5885 case CTIO_CRC2:
5886 case CTIO_TYPE7:
5887 {
5888 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5889
5890 qlt_do_ctio_completion(vha, rsp, entry->handle,
5891 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5892 entry);
5893 break;
5894 }
5895
5896 case ACCEPT_TGT_IO_TYPE:
5897 {
5898 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5899 int rc;
5900
5901 if (atio->u.isp2x.status !=
5902 cpu_to_le16(ATIO_CDB_VALID)) {
5903 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5904 "qla_target(%d): ATIO with error "
5905 "status %x received\n", vha->vp_idx,
5906 le16_to_cpu(atio->u.isp2x.status));
5907 break;
5908 }
5909
5910 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5911 if (rc != 0)
5912 return;
5913
5914 rc = qlt_handle_cmd_for_atio(vha, atio);
5915 if (unlikely(rc != 0)) {
5916 switch (rc) {
5917 case -ENODEV:
5918 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5919 "qla_target: Unable to send command to target\n");
5920 break;
5921 case -EBADF:
5922 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5923 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5924 qlt_send_term_exchange(rsp->qpair, NULL,
5925 atio, 1, 0);
5926 break;
5927 case -EBUSY:
5928 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5929 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5930 vha->vp_idx);
5931 qlt_send_busy(rsp->qpair, atio,
5932 tc_sam_status);
5933 break;
5934 default:
5935 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5936 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5937 vha->vp_idx);
5938 qlt_send_busy(rsp->qpair, atio,
5939 qla_sam_status);
5940 break;
5941 }
5942 }
5943 }
5944 break;
5945
5946 case CONTINUE_TGT_IO_TYPE:
5947 {
5948 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5949
5950 qlt_do_ctio_completion(vha, rsp, entry->handle,
5951 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5952 entry);
5953 break;
5954 }
5955
5956 case CTIO_A64_TYPE:
5957 {
5958 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5959
5960 qlt_do_ctio_completion(vha, rsp, entry->handle,
5961 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5962 entry);
5963 break;
5964 }
5965
5966 case IMMED_NOTIFY_TYPE:
5967 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5968 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5969 break;
5970
5971 case NOTIFY_ACK_TYPE:
5972 if (tgt->notify_ack_expected > 0) {
5973 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5974
5975 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5976 "NOTIFY_ACK seq %08x status %x\n",
5977 le16_to_cpu(entry->u.isp2x.seq_id),
5978 le16_to_cpu(entry->u.isp2x.status));
5979 tgt->notify_ack_expected--;
5980 if (entry->u.isp2x.status !=
5981 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5982 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5983 "qla_target(%d): NOTIFY_ACK "
5984 "failed %x\n", vha->vp_idx,
5985 le16_to_cpu(entry->u.isp2x.status));
5986 }
5987 } else {
5988 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5989 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5990 vha->vp_idx);
5991 }
5992 break;
5993
5994 case ABTS_RECV_24XX:
5995 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5996 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5997 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5998 break;
5999
6000 case ABTS_RESP_24XX:
6001 if (tgt->abts_resp_expected > 0) {
6002 qlt_handle_abts_completion(vha, rsp, pkt);
6003 } else {
6004 ql_dbg(ql_dbg_tgt, vha, 0xe064,
6005 "qla_target(%d): Unexpected ABTS_RESP_24XX "
6006 "received\n", vha->vp_idx);
6007 }
6008 break;
6009
6010 default:
6011 ql_dbg(ql_dbg_tgt, vha, 0xe065,
6012 "qla_target(%d): Received unknown response pkt "
6013 "type %x\n", vha->vp_idx, pkt->entry_type);
6014 break;
6015 }
6016
6017 }
6018
6019 /*
6020 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
6021 */
qlt_async_event(uint16_t code,struct scsi_qla_host * vha,uint16_t * mailbox)6022 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
6023 uint16_t *mailbox)
6024 {
6025 struct qla_hw_data *ha = vha->hw;
6026 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6027 int login_code;
6028
6029 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
6030 return;
6031
6032 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
6033 IS_QLA2100(ha))
6034 return;
6035 /*
6036 * In tgt_stop mode we also should allow all requests to pass.
6037 * Otherwise, some commands can stuck.
6038 */
6039
6040
6041 switch (code) {
6042 case MBA_RESET: /* Reset */
6043 case MBA_SYSTEM_ERR: /* System Error */
6044 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
6045 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
6046 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
6047 "qla_target(%d): System error async event %#x "
6048 "occurred", vha->vp_idx, code);
6049 break;
6050 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
6051 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6052 break;
6053
6054 case MBA_LOOP_UP:
6055 {
6056 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6057 "qla_target(%d): Async LOOP_UP occurred "
6058 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
6059 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6060 if (tgt->link_reinit_iocb_pending) {
6061 qlt_send_notify_ack(ha->base_qpair,
6062 &tgt->link_reinit_iocb,
6063 0, 0, 0, 0, 0, 0);
6064 tgt->link_reinit_iocb_pending = 0;
6065 }
6066 break;
6067 }
6068
6069 case MBA_LIP_OCCURRED:
6070 case MBA_LOOP_DOWN:
6071 case MBA_LIP_RESET:
6072 case MBA_RSCN_UPDATE:
6073 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6074 "qla_target(%d): Async event %#x occurred "
6075 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6076 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6077 break;
6078
6079 case MBA_REJECTED_FCP_CMD:
6080 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
6081 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
6082 vha->vp_idx,
6083 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6084
6085 if (mailbox[3] == 1) {
6086 /* exchange starvation. */
6087 vha->hw->exch_starvation++;
6088 if (vha->hw->exch_starvation > 5) {
6089 ql_log(ql_log_warn, vha, 0xd03a,
6090 "Exchange starvation-. Resetting RISC\n");
6091
6092 vha->hw->exch_starvation = 0;
6093 if (IS_P3P_TYPE(vha->hw))
6094 set_bit(FCOE_CTX_RESET_NEEDED,
6095 &vha->dpc_flags);
6096 else
6097 set_bit(ISP_ABORT_NEEDED,
6098 &vha->dpc_flags);
6099 qla2xxx_wake_dpc(vha);
6100 }
6101 }
6102 break;
6103
6104 case MBA_PORT_UPDATE:
6105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
6106 "qla_target(%d): Port update async event %#x "
6107 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
6108 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6109 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6110
6111 login_code = mailbox[2];
6112 if (login_code == 0x4) {
6113 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
6114 "Async MB 2: Got PLOGI Complete\n");
6115 vha->hw->exch_starvation = 0;
6116 } else if (login_code == 0x7)
6117 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6118 "Async MB 2: Port Logged Out\n");
6119 break;
6120 default:
6121 break;
6122 }
6123
6124 }
6125
qlt_get_port_database(struct scsi_qla_host * vha,uint16_t loop_id)6126 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6127 uint16_t loop_id)
6128 {
6129 fc_port_t *fcport, *tfcp, *del;
6130 int rc;
6131 unsigned long flags;
6132 u8 newfcport = 0;
6133
6134 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6135 if (!fcport) {
6136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6137 "qla_target(%d): Allocation of tmp FC port failed",
6138 vha->vp_idx);
6139 return NULL;
6140 }
6141
6142 fcport->loop_id = loop_id;
6143
6144 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6145 if (rc != QLA_SUCCESS) {
6146 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6147 "qla_target(%d): Failed to retrieve fcport "
6148 "information -- get_port_database() returned %x "
6149 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6150 kfree(fcport);
6151 return NULL;
6152 }
6153
6154 del = NULL;
6155 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6156 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6157
6158 if (tfcp) {
6159 tfcp->d_id = fcport->d_id;
6160 tfcp->port_type = fcport->port_type;
6161 tfcp->supported_classes = fcport->supported_classes;
6162 tfcp->flags |= fcport->flags;
6163 tfcp->scan_state = QLA_FCPORT_FOUND;
6164
6165 del = fcport;
6166 fcport = tfcp;
6167 } else {
6168 if (vha->hw->current_topology == ISP_CFG_F)
6169 fcport->flags |= FCF_FABRIC_DEVICE;
6170
6171 list_add_tail(&fcport->list, &vha->vp_fcports);
6172 if (!IS_SW_RESV_ADDR(fcport->d_id))
6173 vha->fcport_count++;
6174 fcport->login_gen++;
6175 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6176 fcport->login_succ = 1;
6177 newfcport = 1;
6178 }
6179
6180 fcport->deleted = 0;
6181 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6182
6183 switch (vha->host->active_mode) {
6184 case MODE_INITIATOR:
6185 case MODE_DUAL:
6186 if (newfcport) {
6187 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6188 qla24xx_sched_upd_fcport(fcport);
6189 } else {
6190 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6191 "%s %d %8phC post gpsc fcp_cnt %d\n",
6192 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6193 qla24xx_post_gpsc_work(vha, fcport);
6194 }
6195 }
6196 break;
6197
6198 case MODE_TARGET:
6199 default:
6200 break;
6201 }
6202 if (del)
6203 qla2x00_free_fcport(del);
6204
6205 return fcport;
6206 }
6207
6208 /* Must be called under tgt_mutex */
qlt_make_local_sess(struct scsi_qla_host * vha,be_id_t s_id)6209 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6210 be_id_t s_id)
6211 {
6212 struct fc_port *sess = NULL;
6213 fc_port_t *fcport = NULL;
6214 int rc, global_resets;
6215 uint16_t loop_id = 0;
6216
6217 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6218 /*
6219 * This is Domain Controller, so it should be
6220 * OK to drop SCSI commands from it.
6221 */
6222 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6223 "Unable to find initiator with S_ID %x:%x:%x",
6224 s_id.domain, s_id.area, s_id.al_pa);
6225 return NULL;
6226 }
6227
6228 mutex_lock(&vha->vha_tgt.tgt_mutex);
6229
6230 retry:
6231 global_resets =
6232 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6233
6234 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6235 if (rc != 0) {
6236 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6237
6238 ql_log(ql_log_info, vha, 0xf071,
6239 "qla_target(%d): Unable to find "
6240 "initiator with S_ID %x:%x:%x",
6241 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6242
6243 if (rc == -ENOENT) {
6244 qlt_port_logo_t logo;
6245
6246 logo.id = be_to_port_id(s_id);
6247 logo.cmd_count = 1;
6248 qlt_send_first_logo(vha, &logo);
6249 }
6250
6251 return NULL;
6252 }
6253
6254 fcport = qlt_get_port_database(vha, loop_id);
6255 if (!fcport) {
6256 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6257 return NULL;
6258 }
6259
6260 if (global_resets !=
6261 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6263 "qla_target(%d): global reset during session discovery "
6264 "(counter was %d, new %d), retrying", vha->vp_idx,
6265 global_resets,
6266 atomic_read(&vha->vha_tgt.
6267 qla_tgt->tgt_global_resets_count));
6268 goto retry;
6269 }
6270
6271 sess = qlt_create_sess(vha, fcport, true);
6272
6273 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6274
6275 return sess;
6276 }
6277
qlt_abort_work(struct qla_tgt * tgt,struct qla_tgt_sess_work_param * prm)6278 static void qlt_abort_work(struct qla_tgt *tgt,
6279 struct qla_tgt_sess_work_param *prm)
6280 {
6281 struct scsi_qla_host *vha = tgt->vha;
6282 struct qla_hw_data *ha = vha->hw;
6283 struct fc_port *sess = NULL;
6284 unsigned long flags = 0, flags2 = 0;
6285 be_id_t s_id;
6286 int rc;
6287
6288 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6289
6290 if (tgt->tgt_stop)
6291 goto out_term2;
6292
6293 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6294
6295 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6296 if (!sess) {
6297 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6298
6299 sess = qlt_make_local_sess(vha, s_id);
6300 /* sess has got an extra creation ref */
6301
6302 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6303 if (!sess)
6304 goto out_term2;
6305 } else {
6306 if (sess->deleted) {
6307 sess = NULL;
6308 goto out_term2;
6309 }
6310
6311 if (!kref_get_unless_zero(&sess->sess_kref)) {
6312 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6313 "%s: kref_get fail %8phC \n",
6314 __func__, sess->port_name);
6315 sess = NULL;
6316 goto out_term2;
6317 }
6318 }
6319
6320 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6321 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6322
6323 ha->tgt.tgt_ops->put_sess(sess);
6324
6325 if (rc != 0)
6326 goto out_term;
6327 return;
6328
6329 out_term2:
6330 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6331
6332 out_term:
6333 spin_lock_irqsave(&ha->hardware_lock, flags);
6334 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6335 FCP_TMF_REJECTED, false);
6336 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6337 }
6338
qlt_sess_work_fn(struct work_struct * work)6339 static void qlt_sess_work_fn(struct work_struct *work)
6340 {
6341 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6342 struct scsi_qla_host *vha = tgt->vha;
6343 unsigned long flags;
6344
6345 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6346
6347 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6348 while (!list_empty(&tgt->sess_works_list)) {
6349 struct qla_tgt_sess_work_param *prm = list_entry(
6350 tgt->sess_works_list.next, typeof(*prm),
6351 sess_works_list_entry);
6352
6353 /*
6354 * This work can be scheduled on several CPUs at time, so we
6355 * must delete the entry to eliminate double processing
6356 */
6357 list_del(&prm->sess_works_list_entry);
6358
6359 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6360
6361 switch (prm->type) {
6362 case QLA_TGT_SESS_WORK_ABORT:
6363 qlt_abort_work(tgt, prm);
6364 break;
6365 default:
6366 BUG_ON(1);
6367 break;
6368 }
6369
6370 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6371
6372 kfree(prm);
6373 }
6374 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6375 }
6376
6377 /* Must be called under tgt_host_action_mutex */
qlt_add_target(struct qla_hw_data * ha,struct scsi_qla_host * base_vha)6378 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6379 {
6380 struct qla_tgt *tgt;
6381 int rc, i;
6382 struct qla_qpair_hint *h;
6383
6384 if (!QLA_TGT_MODE_ENABLED())
6385 return 0;
6386
6387 if (!IS_TGT_MODE_CAPABLE(ha)) {
6388 ql_log(ql_log_warn, base_vha, 0xe070,
6389 "This adapter does not support target mode.\n");
6390 return 0;
6391 }
6392
6393 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6394 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6395
6396 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6397
6398 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6399 if (!tgt) {
6400 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6401 "Unable to allocate struct qla_tgt\n");
6402 return -ENOMEM;
6403 }
6404
6405 tgt->qphints = kcalloc(ha->max_qpairs + 1,
6406 sizeof(struct qla_qpair_hint),
6407 GFP_KERNEL);
6408 if (!tgt->qphints) {
6409 kfree(tgt);
6410 ql_log(ql_log_warn, base_vha, 0x0197,
6411 "Unable to allocate qpair hints.\n");
6412 return -ENOMEM;
6413 }
6414
6415 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6416 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6417
6418 rc = btree_init64(&tgt->lun_qpair_map);
6419 if (rc) {
6420 kfree(tgt->qphints);
6421 kfree(tgt);
6422 ql_log(ql_log_info, base_vha, 0x0198,
6423 "Unable to initialize lun_qpair_map btree\n");
6424 return -EIO;
6425 }
6426 h = &tgt->qphints[0];
6427 h->qpair = ha->base_qpair;
6428 INIT_LIST_HEAD(&h->hint_elem);
6429 h->cpuid = ha->base_qpair->cpuid;
6430 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6431
6432 for (i = 0; i < ha->max_qpairs; i++) {
6433 unsigned long flags;
6434
6435 struct qla_qpair *qpair = ha->queue_pair_map[i];
6436
6437 h = &tgt->qphints[i + 1];
6438 INIT_LIST_HEAD(&h->hint_elem);
6439 if (qpair) {
6440 h->qpair = qpair;
6441 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6442 list_add_tail(&h->hint_elem, &qpair->hints_list);
6443 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6444 h->cpuid = qpair->cpuid;
6445 }
6446 }
6447
6448 tgt->ha = ha;
6449 tgt->vha = base_vha;
6450 init_waitqueue_head(&tgt->waitQ);
6451 spin_lock_init(&tgt->sess_work_lock);
6452 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6453 INIT_LIST_HEAD(&tgt->sess_works_list);
6454 atomic_set(&tgt->tgt_global_resets_count, 0);
6455
6456 base_vha->vha_tgt.qla_tgt = tgt;
6457
6458 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6459 "qla_target(%d): using 64 Bit PCI addressing",
6460 base_vha->vp_idx);
6461 /* 3 is reserved */
6462 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6463
6464 mutex_lock(&qla_tgt_mutex);
6465 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6466 mutex_unlock(&qla_tgt_mutex);
6467
6468 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6469 ha->tgt.tgt_ops->add_target(base_vha);
6470
6471 return 0;
6472 }
6473
6474 /* Must be called under tgt_host_action_mutex */
qlt_remove_target(struct qla_hw_data * ha,struct scsi_qla_host * vha)6475 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6476 {
6477 if (!vha->vha_tgt.qla_tgt)
6478 return 0;
6479
6480 if (vha->fc_vport) {
6481 qlt_release(vha->vha_tgt.qla_tgt);
6482 return 0;
6483 }
6484
6485 /* free left over qfull cmds */
6486 qlt_init_term_exchange(vha);
6487
6488 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6489 vha->host_no, ha);
6490 qlt_release(vha->vha_tgt.qla_tgt);
6491
6492 return 0;
6493 }
6494
qla_remove_hostmap(struct qla_hw_data * ha)6495 void qla_remove_hostmap(struct qla_hw_data *ha)
6496 {
6497 struct scsi_qla_host *node;
6498 u32 key = 0;
6499
6500 btree_for_each_safe32(&ha->host_map, key, node)
6501 btree_remove32(&ha->host_map, key);
6502
6503 btree_destroy32(&ha->host_map);
6504 }
6505
qlt_lport_dump(struct scsi_qla_host * vha,u64 wwpn,unsigned char * b)6506 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6507 unsigned char *b)
6508 {
6509 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6510 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6511 put_unaligned_be64(wwpn, b);
6512 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6513 }
6514
6515 /**
6516 * qlt_lport_register - register lport with external module
6517 *
6518 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6519 * @phys_wwpn: physical port WWPN
6520 * @npiv_wwpn: NPIV WWPN
6521 * @npiv_wwnn: NPIV WWNN
6522 * @callback: lport initialization callback for tcm_qla2xxx code
6523 */
qlt_lport_register(void * target_lport_ptr,u64 phys_wwpn,u64 npiv_wwpn,u64 npiv_wwnn,int (* callback)(struct scsi_qla_host *,void *,u64,u64))6524 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6525 u64 npiv_wwpn, u64 npiv_wwnn,
6526 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6527 {
6528 struct qla_tgt *tgt;
6529 struct scsi_qla_host *vha;
6530 struct qla_hw_data *ha;
6531 struct Scsi_Host *host;
6532 unsigned long flags;
6533 int rc;
6534 u8 b[WWN_SIZE];
6535
6536 mutex_lock(&qla_tgt_mutex);
6537 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6538 vha = tgt->vha;
6539 ha = vha->hw;
6540
6541 host = vha->host;
6542 if (!host)
6543 continue;
6544
6545 if (!(host->hostt->supported_mode & MODE_TARGET))
6546 continue;
6547
6548 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6549 continue;
6550
6551 spin_lock_irqsave(&ha->hardware_lock, flags);
6552 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6553 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6554 host->host_no);
6555 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6556 continue;
6557 }
6558 if (tgt->tgt_stop) {
6559 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6560 host->host_no);
6561 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6562 continue;
6563 }
6564 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6565
6566 if (!scsi_host_get(host)) {
6567 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6568 "Unable to scsi_host_get() for"
6569 " qla2xxx scsi_host\n");
6570 continue;
6571 }
6572 qlt_lport_dump(vha, phys_wwpn, b);
6573
6574 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6575 scsi_host_put(host);
6576 continue;
6577 }
6578 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6579 if (rc != 0)
6580 scsi_host_put(host);
6581
6582 mutex_unlock(&qla_tgt_mutex);
6583 return rc;
6584 }
6585 mutex_unlock(&qla_tgt_mutex);
6586
6587 return -ENODEV;
6588 }
6589 EXPORT_SYMBOL(qlt_lport_register);
6590
6591 /**
6592 * qlt_lport_deregister - Degister lport
6593 *
6594 * @vha: Registered scsi_qla_host pointer
6595 */
qlt_lport_deregister(struct scsi_qla_host * vha)6596 void qlt_lport_deregister(struct scsi_qla_host *vha)
6597 {
6598 struct qla_hw_data *ha = vha->hw;
6599 struct Scsi_Host *sh = vha->host;
6600 /*
6601 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6602 */
6603 vha->vha_tgt.target_lport_ptr = NULL;
6604 ha->tgt.tgt_ops = NULL;
6605 /*
6606 * Release the Scsi_Host reference for the underlying qla2xxx host
6607 */
6608 scsi_host_put(sh);
6609 }
6610 EXPORT_SYMBOL(qlt_lport_deregister);
6611
6612 /* Must be called under HW lock */
qlt_set_mode(struct scsi_qla_host * vha)6613 void qlt_set_mode(struct scsi_qla_host *vha)
6614 {
6615 switch (vha->qlini_mode) {
6616 case QLA2XXX_INI_MODE_DISABLED:
6617 case QLA2XXX_INI_MODE_EXCLUSIVE:
6618 vha->host->active_mode = MODE_TARGET;
6619 break;
6620 case QLA2XXX_INI_MODE_ENABLED:
6621 vha->host->active_mode = MODE_INITIATOR;
6622 break;
6623 case QLA2XXX_INI_MODE_DUAL:
6624 vha->host->active_mode = MODE_DUAL;
6625 break;
6626 default:
6627 break;
6628 }
6629 }
6630
6631 /* Must be called under HW lock */
qlt_clear_mode(struct scsi_qla_host * vha)6632 static void qlt_clear_mode(struct scsi_qla_host *vha)
6633 {
6634 switch (vha->qlini_mode) {
6635 case QLA2XXX_INI_MODE_DISABLED:
6636 vha->host->active_mode = MODE_UNKNOWN;
6637 break;
6638 case QLA2XXX_INI_MODE_EXCLUSIVE:
6639 vha->host->active_mode = MODE_INITIATOR;
6640 break;
6641 case QLA2XXX_INI_MODE_ENABLED:
6642 case QLA2XXX_INI_MODE_DUAL:
6643 vha->host->active_mode = MODE_INITIATOR;
6644 break;
6645 default:
6646 break;
6647 }
6648 }
6649
6650 /*
6651 * qla_tgt_enable_vha - NO LOCK HELD
6652 *
6653 * host_reset, bring up w/ Target Mode Enabled
6654 */
6655 void
qlt_enable_vha(struct scsi_qla_host * vha)6656 qlt_enable_vha(struct scsi_qla_host *vha)
6657 {
6658 struct qla_hw_data *ha = vha->hw;
6659 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6660 unsigned long flags;
6661 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6662
6663 if (!tgt) {
6664 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6665 "Unable to locate qla_tgt pointer from"
6666 " struct qla_hw_data\n");
6667 dump_stack();
6668 return;
6669 }
6670 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6671 return;
6672
6673 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6674 ha->tgt.num_act_qpairs = ha->max_qpairs;
6675 spin_lock_irqsave(&ha->hardware_lock, flags);
6676 tgt->tgt_stopped = 0;
6677 qlt_set_mode(vha);
6678 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6679
6680 mutex_lock(&ha->optrom_mutex);
6681 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6682 "%s.\n", __func__);
6683 if (vha->vp_idx) {
6684 qla24xx_disable_vp(vha);
6685 qla24xx_enable_vp(vha);
6686 } else {
6687 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6688 qla2xxx_wake_dpc(base_vha);
6689 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6690 QLA_SUCCESS);
6691 }
6692 mutex_unlock(&ha->optrom_mutex);
6693 }
6694 EXPORT_SYMBOL(qlt_enable_vha);
6695
6696 /*
6697 * qla_tgt_disable_vha - NO LOCK HELD
6698 *
6699 * Disable Target Mode and reset the adapter
6700 */
qlt_disable_vha(struct scsi_qla_host * vha)6701 static void qlt_disable_vha(struct scsi_qla_host *vha)
6702 {
6703 struct qla_hw_data *ha = vha->hw;
6704 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6705 unsigned long flags;
6706
6707 if (!tgt) {
6708 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6709 "Unable to locate qla_tgt pointer from"
6710 " struct qla_hw_data\n");
6711 dump_stack();
6712 return;
6713 }
6714
6715 spin_lock_irqsave(&ha->hardware_lock, flags);
6716 qlt_clear_mode(vha);
6717 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6718
6719 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6720 qla2xxx_wake_dpc(vha);
6721
6722 /*
6723 * We are expecting the offline state.
6724 * QLA_FUNCTION_FAILED means that adapter is offline.
6725 */
6726 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6727 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6728 "adapter is offline\n");
6729 }
6730
6731 /*
6732 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6733 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6734 * members.
6735 */
6736 void
qlt_vport_create(struct scsi_qla_host * vha,struct qla_hw_data * ha)6737 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6738 {
6739 vha->vha_tgt.qla_tgt = NULL;
6740
6741 mutex_init(&vha->vha_tgt.tgt_mutex);
6742 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6743
6744 qlt_clear_mode(vha);
6745
6746 /*
6747 * NOTE: Currently the value is kept the same for <24xx and
6748 * >=24xx ISPs. If it is necessary to change it,
6749 * the check should be added for specific ISPs,
6750 * assigning the value appropriately.
6751 */
6752 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6753
6754 qlt_add_target(ha, vha);
6755 }
6756
6757 u8
qlt_rff_id(struct scsi_qla_host * vha)6758 qlt_rff_id(struct scsi_qla_host *vha)
6759 {
6760 u8 fc4_feature = 0;
6761 /*
6762 * FC-4 Feature bit 0 indicates target functionality to the name server.
6763 */
6764 if (qla_tgt_mode_enabled(vha)) {
6765 fc4_feature = BIT_0;
6766 } else if (qla_ini_mode_enabled(vha)) {
6767 fc4_feature = BIT_1;
6768 } else if (qla_dual_mode_enabled(vha))
6769 fc4_feature = BIT_0 | BIT_1;
6770
6771 return fc4_feature;
6772 }
6773
6774 /*
6775 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6776 * @ha: HA context
6777 *
6778 * Beginning of ATIO ring has initialization control block already built
6779 * by nvram config routine.
6780 *
6781 * Returns 0 on success.
6782 */
6783 void
qlt_init_atio_q_entries(struct scsi_qla_host * vha)6784 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6785 {
6786 struct qla_hw_data *ha = vha->hw;
6787 uint16_t cnt;
6788 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6789
6790 if (qla_ini_mode_enabled(vha))
6791 return;
6792
6793 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6794 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6795 pkt++;
6796 }
6797
6798 }
6799
6800 /*
6801 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6802 * @ha: SCSI driver HA context
6803 */
6804 void
qlt_24xx_process_atio_queue(struct scsi_qla_host * vha,uint8_t ha_locked)6805 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6806 {
6807 struct qla_hw_data *ha = vha->hw;
6808 struct atio_from_isp *pkt;
6809 int cnt, i;
6810
6811 if (!ha->flags.fw_started)
6812 return;
6813
6814 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6815 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6816 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6817 cnt = pkt->u.raw.entry_count;
6818
6819 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6820 /*
6821 * This packet is corrupted. The header + payload
6822 * can not be trusted. There is no point in passing
6823 * it further up.
6824 */
6825 ql_log(ql_log_warn, vha, 0xd03c,
6826 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6827 &pkt->u.isp24.fcp_hdr.s_id,
6828 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6829 pkt->u.isp24.exchange_addr, pkt);
6830
6831 adjust_corrupted_atio(pkt);
6832 qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6833 ha_locked, 0);
6834 } else {
6835 qlt_24xx_atio_pkt_all_vps(vha,
6836 (struct atio_from_isp *)pkt, ha_locked);
6837 }
6838
6839 for (i = 0; i < cnt; i++) {
6840 ha->tgt.atio_ring_index++;
6841 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6842 ha->tgt.atio_ring_index = 0;
6843 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6844 } else
6845 ha->tgt.atio_ring_ptr++;
6846
6847 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6848 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6849 }
6850 wmb();
6851 }
6852
6853 /* Adjust ring index */
6854 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6855 }
6856
6857 void
qlt_24xx_config_rings(struct scsi_qla_host * vha)6858 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6859 {
6860 struct qla_hw_data *ha = vha->hw;
6861 struct qla_msix_entry *msix = &ha->msix_entries[2];
6862 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6863
6864 if (!QLA_TGT_MODE_ENABLED())
6865 return;
6866
6867 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6868 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6869 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6870
6871 if (ha->flags.msix_enabled) {
6872 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6873 icb->msix_atio = cpu_to_le16(msix->entry);
6874 icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
6875 ql_dbg(ql_dbg_init, vha, 0xf072,
6876 "Registering ICB vector 0x%x for atio que.\n",
6877 msix->entry);
6878 }
6879 } else {
6880 /* INTx|MSI */
6881 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6882 icb->msix_atio = 0;
6883 icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6884 ql_dbg(ql_dbg_init, vha, 0xf072,
6885 "%s: Use INTx for ATIOQ.\n", __func__);
6886 }
6887 }
6888 }
6889
6890 void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_24xx * nv)6891 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6892 {
6893 struct qla_hw_data *ha = vha->hw;
6894 u32 tmp;
6895
6896 if (!QLA_TGT_MODE_ENABLED())
6897 return;
6898
6899 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6900 if (!ha->tgt.saved_set) {
6901 /* We save only once */
6902 ha->tgt.saved_exchange_count = nv->exchange_count;
6903 ha->tgt.saved_firmware_options_1 =
6904 nv->firmware_options_1;
6905 ha->tgt.saved_firmware_options_2 =
6906 nv->firmware_options_2;
6907 ha->tgt.saved_firmware_options_3 =
6908 nv->firmware_options_3;
6909 ha->tgt.saved_set = 1;
6910 }
6911
6912 if (qla_tgt_mode_enabled(vha))
6913 nv->exchange_count = cpu_to_le16(0xFFFF);
6914 else /* dual */
6915 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6916
6917 /* Enable target mode */
6918 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6919
6920 /* Disable ini mode, if requested */
6921 if (qla_tgt_mode_enabled(vha))
6922 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6923
6924 /* Disable Full Login after LIP */
6925 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6926 /* Enable initial LIP */
6927 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6928 if (ql2xtgt_tape_enable)
6929 /* Enable FC Tape support */
6930 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6931 else
6932 /* Disable FC Tape support */
6933 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6934
6935 /* Disable Full Login after LIP */
6936 nv->host_p &= cpu_to_le32(~BIT_10);
6937
6938 /*
6939 * clear BIT 15 explicitly as we have seen at least
6940 * a couple of instances where this was set and this
6941 * was causing the firmware to not be initialized.
6942 */
6943 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6944 /* Enable target PRLI control */
6945 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6946
6947 if (IS_QLA25XX(ha)) {
6948 /* Change Loop-prefer to Pt-Pt */
6949 tmp = ~(BIT_4|BIT_5|BIT_6);
6950 nv->firmware_options_2 &= cpu_to_le32(tmp);
6951 tmp = P2P << 4;
6952 nv->firmware_options_2 |= cpu_to_le32(tmp);
6953 }
6954 } else {
6955 if (ha->tgt.saved_set) {
6956 nv->exchange_count = ha->tgt.saved_exchange_count;
6957 nv->firmware_options_1 =
6958 ha->tgt.saved_firmware_options_1;
6959 nv->firmware_options_2 =
6960 ha->tgt.saved_firmware_options_2;
6961 nv->firmware_options_3 =
6962 ha->tgt.saved_firmware_options_3;
6963 }
6964 return;
6965 }
6966
6967 if (ha->base_qpair->enable_class_2) {
6968 if (vha->flags.init_done)
6969 fc_host_supported_classes(vha->host) =
6970 FC_COS_CLASS2 | FC_COS_CLASS3;
6971
6972 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6973 } else {
6974 if (vha->flags.init_done)
6975 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6976
6977 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6978 }
6979 }
6980
6981 void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_24xx * icb)6982 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6983 struct init_cb_24xx *icb)
6984 {
6985 struct qla_hw_data *ha = vha->hw;
6986
6987 if (!QLA_TGT_MODE_ENABLED())
6988 return;
6989
6990 if (ha->tgt.node_name_set) {
6991 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6992 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6993 }
6994 }
6995
6996 void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_81xx * nv)6997 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6998 {
6999 struct qla_hw_data *ha = vha->hw;
7000 u32 tmp;
7001
7002 if (!QLA_TGT_MODE_ENABLED())
7003 return;
7004
7005 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7006 if (!ha->tgt.saved_set) {
7007 /* We save only once */
7008 ha->tgt.saved_exchange_count = nv->exchange_count;
7009 ha->tgt.saved_firmware_options_1 =
7010 nv->firmware_options_1;
7011 ha->tgt.saved_firmware_options_2 =
7012 nv->firmware_options_2;
7013 ha->tgt.saved_firmware_options_3 =
7014 nv->firmware_options_3;
7015 ha->tgt.saved_set = 1;
7016 }
7017
7018 if (qla_tgt_mode_enabled(vha))
7019 nv->exchange_count = cpu_to_le16(0xFFFF);
7020 else /* dual */
7021 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7022
7023 /* Enable target mode */
7024 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
7025
7026 /* Disable ini mode, if requested */
7027 if (qla_tgt_mode_enabled(vha))
7028 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
7029 /* Disable Full Login after LIP */
7030 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7031 /* Enable initial LIP */
7032 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
7033 /*
7034 * clear BIT 15 explicitly as we have seen at
7035 * least a couple of instances where this was set
7036 * and this was causing the firmware to not be
7037 * initialized.
7038 */
7039 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7040 if (ql2xtgt_tape_enable)
7041 /* Enable FC tape support */
7042 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7043 else
7044 /* Disable FC tape support */
7045 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
7046
7047 /* Disable Full Login after LIP */
7048 nv->host_p &= cpu_to_le32(~BIT_10);
7049 /* Enable target PRLI control */
7050 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7051
7052 /* Change Loop-prefer to Pt-Pt */
7053 tmp = ~(BIT_4|BIT_5|BIT_6);
7054 nv->firmware_options_2 &= cpu_to_le32(tmp);
7055 tmp = P2P << 4;
7056 nv->firmware_options_2 |= cpu_to_le32(tmp);
7057 } else {
7058 if (ha->tgt.saved_set) {
7059 nv->exchange_count = ha->tgt.saved_exchange_count;
7060 nv->firmware_options_1 =
7061 ha->tgt.saved_firmware_options_1;
7062 nv->firmware_options_2 =
7063 ha->tgt.saved_firmware_options_2;
7064 nv->firmware_options_3 =
7065 ha->tgt.saved_firmware_options_3;
7066 }
7067 return;
7068 }
7069
7070 if (ha->base_qpair->enable_class_2) {
7071 if (vha->flags.init_done)
7072 fc_host_supported_classes(vha->host) =
7073 FC_COS_CLASS2 | FC_COS_CLASS3;
7074
7075 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7076 } else {
7077 if (vha->flags.init_done)
7078 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7079
7080 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7081 }
7082 }
7083
7084 void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_81xx * icb)7085 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7086 struct init_cb_81xx *icb)
7087 {
7088 struct qla_hw_data *ha = vha->hw;
7089
7090 if (!QLA_TGT_MODE_ENABLED())
7091 return;
7092
7093 if (ha->tgt.node_name_set) {
7094 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7095 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7096 }
7097 }
7098
7099 void
qlt_83xx_iospace_config(struct qla_hw_data * ha)7100 qlt_83xx_iospace_config(struct qla_hw_data *ha)
7101 {
7102 if (!QLA_TGT_MODE_ENABLED())
7103 return;
7104
7105 ha->msix_count += 1; /* For ATIO Q */
7106 }
7107
7108
7109 void
qlt_modify_vp_config(struct scsi_qla_host * vha,struct vp_config_entry_24xx * vpmod)7110 qlt_modify_vp_config(struct scsi_qla_host *vha,
7111 struct vp_config_entry_24xx *vpmod)
7112 {
7113 /* enable target mode. Bit5 = 1 => disable */
7114 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7115 vpmod->options_idx1 &= ~BIT_5;
7116
7117 /* Disable ini mode, if requested. bit4 = 1 => disable */
7118 if (qla_tgt_mode_enabled(vha))
7119 vpmod->options_idx1 &= ~BIT_4;
7120 }
7121
7122 void
qlt_probe_one_stage1(struct scsi_qla_host * base_vha,struct qla_hw_data * ha)7123 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7124 {
7125 mutex_init(&base_vha->vha_tgt.tgt_mutex);
7126 if (!QLA_TGT_MODE_ENABLED())
7127 return;
7128
7129 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
7130 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7131 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7132 } else {
7133 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7134 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7135 }
7136
7137 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7138
7139 INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7140 INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7141 qlt_unknown_atio_work_fn);
7142
7143 qlt_clear_mode(base_vha);
7144
7145 qlt_update_vp_map(base_vha, SET_VP_IDX);
7146 }
7147
7148 irqreturn_t
qla83xx_msix_atio_q(int irq,void * dev_id)7149 qla83xx_msix_atio_q(int irq, void *dev_id)
7150 {
7151 struct rsp_que *rsp;
7152 scsi_qla_host_t *vha;
7153 struct qla_hw_data *ha;
7154 unsigned long flags;
7155
7156 rsp = (struct rsp_que *) dev_id;
7157 ha = rsp->hw;
7158 vha = pci_get_drvdata(ha->pdev);
7159
7160 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7161
7162 qlt_24xx_process_atio_queue(vha, 0);
7163
7164 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7165
7166 return IRQ_HANDLED;
7167 }
7168
7169 static void
qlt_handle_abts_recv_work(struct work_struct * work)7170 qlt_handle_abts_recv_work(struct work_struct *work)
7171 {
7172 struct qla_tgt_sess_op *op = container_of(work,
7173 struct qla_tgt_sess_op, work);
7174 scsi_qla_host_t *vha = op->vha;
7175 struct qla_hw_data *ha = vha->hw;
7176 unsigned long flags;
7177
7178 if (qla2x00_reset_active(vha) ||
7179 (op->chip_reset != ha->base_qpair->chip_reset))
7180 return;
7181
7182 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7183 qlt_24xx_process_atio_queue(vha, 0);
7184 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7185
7186 spin_lock_irqsave(&ha->hardware_lock, flags);
7187 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7188 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7189
7190 kfree(op);
7191 }
7192
7193 void
qlt_handle_abts_recv(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)7194 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7195 response_t *pkt)
7196 {
7197 struct qla_tgt_sess_op *op;
7198
7199 op = kzalloc(sizeof(*op), GFP_ATOMIC);
7200
7201 if (!op) {
7202 /* do not reach for ATIO queue here. This is best effort err
7203 * recovery at this point.
7204 */
7205 qlt_response_pkt_all_vps(vha, rsp, pkt);
7206 return;
7207 }
7208
7209 memcpy(&op->atio, pkt, sizeof(*pkt));
7210 op->vha = vha;
7211 op->chip_reset = vha->hw->base_qpair->chip_reset;
7212 op->rsp = rsp;
7213 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7214 queue_work(qla_tgt_wq, &op->work);
7215 return;
7216 }
7217
7218 int
qlt_mem_alloc(struct qla_hw_data * ha)7219 qlt_mem_alloc(struct qla_hw_data *ha)
7220 {
7221 if (!QLA_TGT_MODE_ENABLED())
7222 return 0;
7223
7224 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7225 sizeof(struct qla_tgt_vp_map),
7226 GFP_KERNEL);
7227 if (!ha->tgt.tgt_vp_map)
7228 return -ENOMEM;
7229
7230 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7231 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7232 &ha->tgt.atio_dma, GFP_KERNEL);
7233 if (!ha->tgt.atio_ring) {
7234 kfree(ha->tgt.tgt_vp_map);
7235 return -ENOMEM;
7236 }
7237 return 0;
7238 }
7239
7240 void
qlt_mem_free(struct qla_hw_data * ha)7241 qlt_mem_free(struct qla_hw_data *ha)
7242 {
7243 if (!QLA_TGT_MODE_ENABLED())
7244 return;
7245
7246 if (ha->tgt.atio_ring) {
7247 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7248 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7249 ha->tgt.atio_dma);
7250 }
7251 ha->tgt.atio_ring = NULL;
7252 ha->tgt.atio_dma = 0;
7253 kfree(ha->tgt.tgt_vp_map);
7254 ha->tgt.tgt_vp_map = NULL;
7255 }
7256
7257 /* vport_slock to be held by the caller */
7258 void
qlt_update_vp_map(struct scsi_qla_host * vha,int cmd)7259 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7260 {
7261 void *slot;
7262 u32 key;
7263 int rc;
7264
7265 key = vha->d_id.b24;
7266
7267 switch (cmd) {
7268 case SET_VP_IDX:
7269 if (!QLA_TGT_MODE_ENABLED())
7270 return;
7271 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7272 break;
7273 case SET_AL_PA:
7274 slot = btree_lookup32(&vha->hw->host_map, key);
7275 if (!slot) {
7276 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7277 "Save vha in host_map %p %06x\n", vha, key);
7278 rc = btree_insert32(&vha->hw->host_map,
7279 key, vha, GFP_ATOMIC);
7280 if (rc)
7281 ql_log(ql_log_info, vha, 0xd03e,
7282 "Unable to insert s_id into host_map: %06x\n",
7283 key);
7284 return;
7285 }
7286 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7287 "replace existing vha in host_map %p %06x\n", vha, key);
7288 btree_update32(&vha->hw->host_map, key, vha);
7289 break;
7290 case RESET_VP_IDX:
7291 if (!QLA_TGT_MODE_ENABLED())
7292 return;
7293 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7294 break;
7295 case RESET_AL_PA:
7296 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7297 "clear vha in host_map %p %06x\n", vha, key);
7298 slot = btree_lookup32(&vha->hw->host_map, key);
7299 if (slot)
7300 btree_remove32(&vha->hw->host_map, key);
7301 vha->d_id.b24 = 0;
7302 break;
7303 }
7304 }
7305
qlt_update_host_map(struct scsi_qla_host * vha,port_id_t id)7306 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7307 {
7308
7309 if (!vha->d_id.b24) {
7310 vha->d_id = id;
7311 qlt_update_vp_map(vha, SET_AL_PA);
7312 } else if (vha->d_id.b24 != id.b24) {
7313 qlt_update_vp_map(vha, RESET_AL_PA);
7314 vha->d_id = id;
7315 qlt_update_vp_map(vha, SET_AL_PA);
7316 }
7317 }
7318
qlt_parse_ini_mode(void)7319 static int __init qlt_parse_ini_mode(void)
7320 {
7321 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7322 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7323 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7324 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7325 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7326 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7327 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7328 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7329 else
7330 return false;
7331
7332 return true;
7333 }
7334
qlt_init(void)7335 int __init qlt_init(void)
7336 {
7337 int ret;
7338
7339 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7340 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7341
7342 if (!qlt_parse_ini_mode()) {
7343 ql_log(ql_log_fatal, NULL, 0xe06b,
7344 "qlt_parse_ini_mode() failed\n");
7345 return -EINVAL;
7346 }
7347
7348 if (!QLA_TGT_MODE_ENABLED())
7349 return 0;
7350
7351 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7352 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7353 qla_tgt_mgmt_cmd), 0, NULL);
7354 if (!qla_tgt_mgmt_cmd_cachep) {
7355 ql_log(ql_log_fatal, NULL, 0xd04b,
7356 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7357 return -ENOMEM;
7358 }
7359
7360 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7361 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7362 0, NULL);
7363
7364 if (!qla_tgt_plogi_cachep) {
7365 ql_log(ql_log_fatal, NULL, 0xe06d,
7366 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7367 ret = -ENOMEM;
7368 goto out_mgmt_cmd_cachep;
7369 }
7370
7371 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7372 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7373 if (!qla_tgt_mgmt_cmd_mempool) {
7374 ql_log(ql_log_fatal, NULL, 0xe06e,
7375 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7376 ret = -ENOMEM;
7377 goto out_plogi_cachep;
7378 }
7379
7380 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7381 if (!qla_tgt_wq) {
7382 ql_log(ql_log_fatal, NULL, 0xe06f,
7383 "alloc_workqueue for qla_tgt_wq failed\n");
7384 ret = -ENOMEM;
7385 goto out_cmd_mempool;
7386 }
7387 /*
7388 * Return 1 to signal that initiator-mode is being disabled
7389 */
7390 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7391
7392 out_cmd_mempool:
7393 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7394 out_plogi_cachep:
7395 kmem_cache_destroy(qla_tgt_plogi_cachep);
7396 out_mgmt_cmd_cachep:
7397 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7398 return ret;
7399 }
7400
qlt_exit(void)7401 void qlt_exit(void)
7402 {
7403 if (!QLA_TGT_MODE_ENABLED())
7404 return;
7405
7406 destroy_workqueue(qla_tgt_wq);
7407 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7408 kmem_cache_destroy(qla_tgt_plogi_cachep);
7409 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7410 }
7411