1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
5 */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
13
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
15 static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
16 struct qla_qpair *qp,
17 struct qla_nvme_lsrjt_pt_arg *a,
18 bool is_xchg_terminate);
19
20 struct qla_nvme_unsol_ctx {
21 struct list_head elem;
22 struct scsi_qla_host *vha;
23 struct fc_port *fcport;
24 struct srb *sp;
25 struct nvmefc_ls_rsp lsrsp;
26 struct nvmefc_ls_rsp *fd_rsp;
27 struct work_struct lsrsp_work;
28 struct work_struct abort_work;
29 __le32 exchange_address;
30 __le16 nport_handle;
31 __le16 ox_id;
32 int comp_status;
33 spinlock_t cmd_lock;
34 };
35
qla_nvme_register_remote(struct scsi_qla_host * vha,struct fc_port * fcport)36 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
37 {
38 struct qla_nvme_rport *rport;
39 struct nvme_fc_port_info req;
40 int ret;
41
42 if (!IS_ENABLED(CONFIG_NVME_FC))
43 return 0;
44
45 if (!vha->flags.nvme_enabled) {
46 ql_log(ql_log_info, vha, 0x2100,
47 "%s: Not registering target since Host NVME is not enabled\n",
48 __func__);
49 return 0;
50 }
51
52 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
53 return 0;
54
55 if (!(fcport->nvme_prli_service_param &
56 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
57 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
58 return 0;
59
60 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
61
62 memset(&req, 0, sizeof(struct nvme_fc_port_info));
63 req.port_name = wwn_to_u64(fcport->port_name);
64 req.node_name = wwn_to_u64(fcport->node_name);
65 req.port_role = 0;
66 req.dev_loss_tmo = fcport->dev_loss_tmo;
67
68 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
69 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
70
71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
72 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
73
74 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
75 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
76
77 req.port_id = fcport->d_id.b24;
78
79 ql_log(ql_log_info, vha, 0x2102,
80 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
81 __func__, req.node_name, req.port_name,
82 req.port_id);
83
84 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
85 &fcport->nvme_remote_port);
86 if (ret) {
87 ql_log(ql_log_warn, vha, 0x212e,
88 "Failed to register remote port. Transport returned %d\n",
89 ret);
90 return ret;
91 }
92
93 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
94 fcport->dev_loss_tmo);
95
96 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
97 ql_log(ql_log_info, vha, 0x212a,
98 "PortID:%06x Supports SLER\n", req.port_id);
99
100 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
101 ql_log(ql_log_info, vha, 0x212b,
102 "PortID:%06x Supports PI control\n", req.port_id);
103
104 rport = fcport->nvme_remote_port->private;
105 rport->fcport = fcport;
106
107 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
108 return 0;
109 }
110
111 /* Allocate a queue for NVMe traffic */
qla_nvme_alloc_queue(struct nvme_fc_local_port * lport,unsigned int qidx,u16 qsize,void ** handle)112 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
113 unsigned int qidx, u16 qsize, void **handle)
114 {
115 struct scsi_qla_host *vha;
116 struct qla_hw_data *ha;
117 struct qla_qpair *qpair;
118
119 /* Map admin queue and 1st IO queue to index 0 */
120 if (qidx)
121 qidx--;
122
123 vha = (struct scsi_qla_host *)lport->private;
124 ha = vha->hw;
125
126 ql_log(ql_log_info, vha, 0x2104,
127 "%s: handle %p, idx =%d, qsize %d\n",
128 __func__, handle, qidx, qsize);
129
130 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
131 ql_log(ql_log_warn, vha, 0x212f,
132 "%s: Illegal qidx=%d. Max=%d\n",
133 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
134 return -EINVAL;
135 }
136
137 /* Use base qpair if max_qpairs is 0 */
138 if (!ha->max_qpairs) {
139 qpair = ha->base_qpair;
140 } else {
141 if (ha->queue_pair_map[qidx]) {
142 *handle = ha->queue_pair_map[qidx];
143 ql_log(ql_log_info, vha, 0x2121,
144 "Returning existing qpair of %p for idx=%x\n",
145 *handle, qidx);
146 return 0;
147 }
148
149 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
150 if (!qpair) {
151 ql_log(ql_log_warn, vha, 0x2122,
152 "Failed to allocate qpair\n");
153 return -EINVAL;
154 }
155 qla_adjust_iocb_limit(vha);
156 }
157 *handle = qpair;
158
159 return 0;
160 }
161
qla_nvme_release_fcp_cmd_kref(struct kref * kref)162 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
163 {
164 struct srb *sp = container_of(kref, struct srb, cmd_kref);
165 struct nvme_private *priv = (struct nvme_private *)sp->priv;
166 struct nvmefc_fcp_req *fd;
167 struct srb_iocb *nvme;
168 unsigned long flags;
169
170 if (!priv)
171 goto out;
172
173 nvme = &sp->u.iocb_cmd;
174 fd = nvme->u.nvme.desc;
175
176 spin_lock_irqsave(&priv->cmd_lock, flags);
177 priv->sp = NULL;
178 sp->priv = NULL;
179 if (priv->comp_status == QLA_SUCCESS) {
180 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
181 fd->status = NVME_SC_SUCCESS;
182 } else {
183 fd->rcv_rsplen = 0;
184 fd->transferred_length = 0;
185 fd->status = NVME_SC_INTERNAL;
186 }
187 spin_unlock_irqrestore(&priv->cmd_lock, flags);
188
189 fd->done(fd);
190 out:
191 qla2xxx_rel_qpair_sp(sp->qpair, sp);
192 }
193
qla_nvme_release_ls_cmd_kref(struct kref * kref)194 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
195 {
196 struct srb *sp = container_of(kref, struct srb, cmd_kref);
197 struct nvme_private *priv = (struct nvme_private *)sp->priv;
198 struct nvmefc_ls_req *fd;
199 unsigned long flags;
200
201 if (!priv)
202 goto out;
203
204 spin_lock_irqsave(&priv->cmd_lock, flags);
205 priv->sp = NULL;
206 sp->priv = NULL;
207 spin_unlock_irqrestore(&priv->cmd_lock, flags);
208
209 fd = priv->fd;
210
211 fd->done(fd, priv->comp_status);
212 out:
213 qla2x00_rel_sp(sp);
214 }
215
qla_nvme_ls_complete(struct work_struct * work)216 static void qla_nvme_ls_complete(struct work_struct *work)
217 {
218 struct nvme_private *priv =
219 container_of(work, struct nvme_private, ls_work);
220
221 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
222 }
223
qla_nvme_sp_ls_done(srb_t * sp,int res)224 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
225 {
226 struct nvme_private *priv = sp->priv;
227
228 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
229 return;
230
231 if (res)
232 res = -EINVAL;
233
234 priv->comp_status = res;
235 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
236 schedule_work(&priv->ls_work);
237 }
238
qla_nvme_release_lsrsp_cmd_kref(struct kref * kref)239 static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
240 {
241 struct srb *sp = container_of(kref, struct srb, cmd_kref);
242 struct qla_nvme_unsol_ctx *uctx = sp->priv;
243 struct nvmefc_ls_rsp *fd_rsp;
244 unsigned long flags;
245
246 if (!uctx) {
247 qla2x00_rel_sp(sp);
248 return;
249 }
250
251 spin_lock_irqsave(&uctx->cmd_lock, flags);
252 uctx->sp = NULL;
253 sp->priv = NULL;
254 spin_unlock_irqrestore(&uctx->cmd_lock, flags);
255
256 fd_rsp = uctx->fd_rsp;
257
258 list_del(&uctx->elem);
259
260 fd_rsp->done(fd_rsp);
261 kfree(uctx);
262 qla2x00_rel_sp(sp);
263 }
264
qla_nvme_lsrsp_complete(struct work_struct * work)265 static void qla_nvme_lsrsp_complete(struct work_struct *work)
266 {
267 struct qla_nvme_unsol_ctx *uctx =
268 container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
269
270 kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
271 }
272
qla_nvme_sp_lsrsp_done(srb_t * sp,int res)273 static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
274 {
275 struct qla_nvme_unsol_ctx *uctx = sp->priv;
276
277 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
278 return;
279
280 if (res)
281 res = -EINVAL;
282
283 uctx->comp_status = res;
284 INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
285 schedule_work(&uctx->lsrsp_work);
286 }
287
288 /* it assumed that QPair lock is held. */
qla_nvme_sp_done(srb_t * sp,int res)289 static void qla_nvme_sp_done(srb_t *sp, int res)
290 {
291 struct nvme_private *priv = sp->priv;
292
293 priv->comp_status = res;
294 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
295
296 return;
297 }
298
qla_nvme_abort_work(struct work_struct * work)299 static void qla_nvme_abort_work(struct work_struct *work)
300 {
301 struct nvme_private *priv =
302 container_of(work, struct nvme_private, abort_work);
303 srb_t *sp = priv->sp;
304 fc_port_t *fcport = sp->fcport;
305 struct qla_hw_data *ha = fcport->vha->hw;
306 int rval, abts_done_called = 1;
307 bool io_wait_for_abort_done;
308 uint32_t handle;
309
310 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
311 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
312 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
313
314 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
315 goto out;
316
317 if (ha->flags.host_shutting_down) {
318 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
319 "%s Calling done on sp: %p, type: 0x%x\n",
320 __func__, sp, sp->type);
321 sp->done(sp, 0);
322 goto out;
323 }
324
325 /*
326 * sp may not be valid after abort_command if return code is either
327 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
328 */
329 io_wait_for_abort_done = ql2xabts_wait_nvme &&
330 QLA_ABTS_WAIT_ENABLED(sp);
331 handle = sp->handle;
332
333 rval = ha->isp_ops->abort_command(sp);
334
335 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
336 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
337 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
338 sp, handle, fcport, rval);
339
340 /*
341 * If async tmf is enabled, the abort callback is called only on
342 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
343 */
344 if (ql2xasynctmfenable &&
345 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
346 abts_done_called = 0;
347
348 /*
349 * Returned before decreasing kref so that I/O requests
350 * are waited until ABTS complete. This kref is decreased
351 * at qla24xx_abort_sp_done function.
352 */
353 if (abts_done_called && io_wait_for_abort_done)
354 return;
355 out:
356 /* kref_get was done before work was schedule. */
357 kref_put(&sp->cmd_kref, sp->put_fn);
358 }
359
qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_rsp * fd_resp)360 static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
361 struct nvme_fc_remote_port *rport,
362 struct nvmefc_ls_rsp *fd_resp)
363 {
364 struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
365 struct qla_nvme_unsol_ctx, lsrsp);
366 struct qla_nvme_rport *qla_rport = rport->private;
367 fc_port_t *fcport = qla_rport->fcport;
368 struct scsi_qla_host *vha = uctx->vha;
369 struct qla_hw_data *ha = vha->hw;
370 struct qla_nvme_lsrjt_pt_arg a;
371 struct srb_iocb *nvme;
372 srb_t *sp;
373 int rval = QLA_FUNCTION_FAILED;
374 uint8_t cnt = 0;
375
376 if (!fcport || fcport->deleted)
377 goto out;
378
379 if (!ha->flags.fw_started)
380 goto out;
381
382 /* Alloc SRB structure */
383 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
384 if (!sp)
385 goto out;
386
387 sp->type = SRB_NVME_LS;
388 sp->name = "nvme_ls";
389 sp->done = qla_nvme_sp_lsrsp_done;
390 sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
391 sp->priv = (void *)uctx;
392 sp->unsol_rsp = 1;
393 uctx->sp = sp;
394 spin_lock_init(&uctx->cmd_lock);
395 nvme = &sp->u.iocb_cmd;
396 uctx->fd_rsp = fd_resp;
397 nvme->u.nvme.desc = fd_resp;
398 nvme->u.nvme.dir = 0;
399 nvme->u.nvme.dl = 0;
400 nvme->u.nvme.timeout_sec = 0;
401 nvme->u.nvme.cmd_dma = fd_resp->rspdma;
402 nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
403 nvme->u.nvme.rsp_len = 0;
404 nvme->u.nvme.rsp_dma = 0;
405 nvme->u.nvme.exchange_address = uctx->exchange_address;
406 nvme->u.nvme.nport_handle = uctx->nport_handle;
407 nvme->u.nvme.ox_id = uctx->ox_id;
408 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
409 fd_resp->rsplen, DMA_TO_DEVICE);
410
411 ql_dbg(ql_dbg_unsol, vha, 0x2122,
412 "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
413 fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
414 uctx->ox_id, uctx->nport_handle);
415 retry:
416 rval = qla2x00_start_sp(sp);
417 switch (rval) {
418 case QLA_SUCCESS:
419 break;
420 case EAGAIN:
421 msleep(PURLS_MSLEEP_INTERVAL);
422 cnt++;
423 if (cnt < PURLS_RETRY_COUNT)
424 goto retry;
425
426 fallthrough;
427 default:
428 ql_dbg(ql_log_warn, vha, 0x2123,
429 "Failed to xmit Unsol ls response = %d\n", rval);
430 rval = -EIO;
431 qla2x00_rel_sp(sp);
432 goto out;
433 }
434
435 return 0;
436 out:
437 memset((void *)&a, 0, sizeof(a));
438 a.vp_idx = vha->vp_idx;
439 a.nport_handle = uctx->nport_handle;
440 a.xchg_address = uctx->exchange_address;
441 qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
442 kfree(uctx);
443 return rval;
444 }
445
qla_nvme_ls_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)446 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
447 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
448 {
449 struct nvme_private *priv = fd->private;
450 unsigned long flags;
451
452 spin_lock_irqsave(&priv->cmd_lock, flags);
453 if (!priv->sp) {
454 spin_unlock_irqrestore(&priv->cmd_lock, flags);
455 return;
456 }
457
458 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
459 spin_unlock_irqrestore(&priv->cmd_lock, flags);
460 return;
461 }
462 spin_unlock_irqrestore(&priv->cmd_lock, flags);
463
464 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
465 schedule_work(&priv->abort_work);
466 }
467
qla_nvme_ls_req(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)468 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
469 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
470 {
471 struct qla_nvme_rport *qla_rport = rport->private;
472 fc_port_t *fcport = qla_rport->fcport;
473 struct srb_iocb *nvme;
474 struct nvme_private *priv = fd->private;
475 struct scsi_qla_host *vha;
476 int rval = QLA_FUNCTION_FAILED;
477 struct qla_hw_data *ha;
478 srb_t *sp;
479
480 if (!fcport || fcport->deleted)
481 return rval;
482
483 vha = fcport->vha;
484 ha = vha->hw;
485
486 if (!ha->flags.fw_started)
487 return rval;
488
489 /* Alloc SRB structure */
490 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
491 if (!sp)
492 return rval;
493
494 sp->type = SRB_NVME_LS;
495 sp->name = "nvme_ls";
496 sp->done = qla_nvme_sp_ls_done;
497 sp->put_fn = qla_nvme_release_ls_cmd_kref;
498 sp->priv = priv;
499 priv->sp = sp;
500 kref_init(&sp->cmd_kref);
501 spin_lock_init(&priv->cmd_lock);
502 nvme = &sp->u.iocb_cmd;
503 priv->fd = fd;
504 nvme->u.nvme.desc = fd;
505 nvme->u.nvme.dir = 0;
506 nvme->u.nvme.dl = 0;
507 nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
508 nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
509 nvme->u.nvme.rsp_dma = fd->rspdma;
510 nvme->u.nvme.timeout_sec = fd->timeout;
511 nvme->u.nvme.cmd_dma = fd->rqstdma;
512 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
513 fd->rqstlen, DMA_TO_DEVICE);
514
515 rval = qla2x00_start_sp(sp);
516 if (rval != QLA_SUCCESS) {
517 ql_log(ql_log_warn, vha, 0x700e,
518 "qla2x00_start_sp failed = %d\n", rval);
519 sp->priv = NULL;
520 priv->sp = NULL;
521 qla2x00_rel_sp(sp);
522 return rval;
523 }
524
525 return rval;
526 }
527
qla_nvme_fcp_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)528 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
529 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
530 struct nvmefc_fcp_req *fd)
531 {
532 struct nvme_private *priv = fd->private;
533 unsigned long flags;
534
535 spin_lock_irqsave(&priv->cmd_lock, flags);
536 if (!priv->sp) {
537 spin_unlock_irqrestore(&priv->cmd_lock, flags);
538 return;
539 }
540 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
541 spin_unlock_irqrestore(&priv->cmd_lock, flags);
542 return;
543 }
544 spin_unlock_irqrestore(&priv->cmd_lock, flags);
545
546 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
547 schedule_work(&priv->abort_work);
548 }
549
qla2x00_start_nvme_mq(srb_t * sp)550 static inline int qla2x00_start_nvme_mq(srb_t *sp)
551 {
552 unsigned long flags;
553 uint32_t *clr_ptr;
554 uint32_t handle;
555 struct cmd_nvme *cmd_pkt;
556 uint16_t cnt, i;
557 uint16_t req_cnt;
558 uint16_t tot_dsds;
559 uint16_t avail_dsds;
560 struct dsd64 *cur_dsd;
561 struct req_que *req = NULL;
562 struct rsp_que *rsp = NULL;
563 struct scsi_qla_host *vha = sp->fcport->vha;
564 struct qla_hw_data *ha = vha->hw;
565 struct qla_qpair *qpair = sp->qpair;
566 struct srb_iocb *nvme = &sp->u.iocb_cmd;
567 struct scatterlist *sgl, *sg;
568 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
569 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
570 uint32_t rval = QLA_SUCCESS;
571
572 /* Setup qpair pointers */
573 req = qpair->req;
574 rsp = qpair->rsp;
575 tot_dsds = fd->sg_cnt;
576
577 /* Acquire qpair specific lock */
578 spin_lock_irqsave(&qpair->qp_lock, flags);
579
580 handle = qla2xxx_get_next_handle(req);
581 if (handle == 0) {
582 rval = -EBUSY;
583 goto queuing_error;
584 }
585 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
586
587 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
588 sp->iores.exch_cnt = 1;
589 sp->iores.iocb_cnt = req_cnt;
590 if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
591 rval = -EBUSY;
592 goto queuing_error;
593 }
594
595 if (req->cnt < (req_cnt + 2)) {
596 if (IS_SHADOW_REG_CAPABLE(ha)) {
597 cnt = *req->out_ptr;
598 } else {
599 cnt = rd_reg_dword_relaxed(req->req_q_out);
600 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
601 rval = -EBUSY;
602 goto queuing_error;
603 }
604 }
605
606 if (req->ring_index < cnt)
607 req->cnt = cnt - req->ring_index;
608 else
609 req->cnt = req->length - (req->ring_index - cnt);
610
611 if (req->cnt < (req_cnt + 2)){
612 rval = -EBUSY;
613 goto queuing_error;
614 }
615 }
616
617 if (unlikely(!fd->sqid)) {
618 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
619 nvme->u.nvme.aen_op = 1;
620 atomic_inc(&ha->nvme_active_aen_cnt);
621 }
622 }
623
624 /* Build command packet. */
625 req->current_outstanding_cmd = handle;
626 req->outstanding_cmds[handle] = sp;
627 sp->handle = handle;
628 req->cnt -= req_cnt;
629
630 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
631 cmd_pkt->handle = make_handle(req->id, handle);
632
633 /* Zero out remaining portion of packet. */
634 clr_ptr = (uint32_t *)cmd_pkt + 2;
635 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
636
637 cmd_pkt->entry_status = 0;
638
639 /* Update entry type to indicate Command NVME IOCB */
640 cmd_pkt->entry_type = COMMAND_NVME;
641
642 /* No data transfer how do we check buffer len == 0?? */
643 if (fd->io_dir == NVMEFC_FCP_READ) {
644 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
645 qpair->counters.input_bytes += fd->payload_length;
646 qpair->counters.input_requests++;
647 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
648 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
649 if ((vha->flags.nvme_first_burst) &&
650 (sp->fcport->nvme_prli_service_param &
651 NVME_PRLI_SP_FIRST_BURST)) {
652 if ((fd->payload_length <=
653 sp->fcport->nvme_first_burst_size) ||
654 (sp->fcport->nvme_first_burst_size == 0))
655 cmd_pkt->control_flags |=
656 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
657 }
658 qpair->counters.output_bytes += fd->payload_length;
659 qpair->counters.output_requests++;
660 } else if (fd->io_dir == 0) {
661 cmd_pkt->control_flags = 0;
662 }
663
664 if (sp->fcport->edif.enable && fd->io_dir != 0)
665 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
666
667 /* Set BIT_13 of control flags for Async event */
668 if (vha->flags.nvme2_enabled &&
669 cmd->sqe.common.opcode == nvme_admin_async_event) {
670 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
671 }
672
673 /* Set NPORT-ID */
674 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
675 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
676 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
677 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
678 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
679
680 /* NVME RSP IU */
681 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
682 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
683
684 /* NVME CNMD IU */
685 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
686 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
687
688 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
689 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
690
691 /* One DSD is available in the Command Type NVME IOCB */
692 avail_dsds = 1;
693 cur_dsd = &cmd_pkt->nvme_dsd;
694 sgl = fd->first_sgl;
695
696 /* Load data segments */
697 for_each_sg(sgl, sg, tot_dsds, i) {
698 cont_a64_entry_t *cont_pkt;
699
700 /* Allocate additional continuation packets? */
701 if (avail_dsds == 0) {
702 /*
703 * Five DSDs are available in the Continuation
704 * Type 1 IOCB.
705 */
706
707 /* Adjust ring index */
708 req->ring_index++;
709 if (req->ring_index == req->length) {
710 req->ring_index = 0;
711 req->ring_ptr = req->ring;
712 } else {
713 req->ring_ptr++;
714 }
715 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
716 put_unaligned_le32(CONTINUE_A64_TYPE,
717 &cont_pkt->entry_type);
718
719 cur_dsd = cont_pkt->dsd;
720 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
721 }
722
723 append_dsd64(&cur_dsd, sg);
724 avail_dsds--;
725 }
726
727 /* Set total entry count. */
728 cmd_pkt->entry_count = (uint8_t)req_cnt;
729 wmb();
730
731 /* Adjust ring index. */
732 req->ring_index++;
733 if (req->ring_index == req->length) {
734 req->ring_index = 0;
735 req->ring_ptr = req->ring;
736 } else {
737 req->ring_ptr++;
738 }
739
740 /* ignore nvme async cmd due to long timeout */
741 if (!nvme->u.nvme.aen_op)
742 sp->qpair->cmd_cnt++;
743
744 /* Set chip new ring index. */
745 wrt_reg_dword(req->req_q_in, req->ring_index);
746
747 if (vha->flags.process_response_queue &&
748 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
749 qla24xx_process_response_queue(vha, rsp);
750
751 queuing_error:
752 if (rval)
753 qla_put_fw_resources(sp->qpair, &sp->iores);
754 spin_unlock_irqrestore(&qpair->qp_lock, flags);
755
756 return rval;
757 }
758
759 /* Post a command */
qla_nvme_post_cmd(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)760 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
761 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
762 struct nvmefc_fcp_req *fd)
763 {
764 fc_port_t *fcport;
765 struct srb_iocb *nvme;
766 struct scsi_qla_host *vha;
767 struct qla_hw_data *ha;
768 int rval;
769 srb_t *sp;
770 struct qla_qpair *qpair = hw_queue_handle;
771 struct nvme_private *priv = fd->private;
772 struct qla_nvme_rport *qla_rport = rport->private;
773
774 if (!priv) {
775 /* nvme association has been torn down */
776 return -ENODEV;
777 }
778
779 fcport = qla_rport->fcport;
780
781 if (unlikely(!qpair || !fcport || fcport->deleted))
782 return -EBUSY;
783
784 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
785 return -ENODEV;
786
787 vha = fcport->vha;
788 ha = vha->hw;
789
790 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
791 return -EBUSY;
792
793 /*
794 * If we know the dev is going away while the transport is still sending
795 * IO's return busy back to stall the IO Q. This happens when the
796 * link goes away and fw hasn't notified us yet, but IO's are being
797 * returned. If the dev comes back quickly we won't exhaust the IO
798 * retry count at the core.
799 */
800 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
801 return -EBUSY;
802
803 qpair = qla_mapq_nvme_select_qpair(ha, qpair);
804
805 /* Alloc SRB structure */
806 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
807 if (!sp)
808 return -EBUSY;
809
810 kref_init(&sp->cmd_kref);
811 spin_lock_init(&priv->cmd_lock);
812 sp->priv = priv;
813 priv->sp = sp;
814 sp->type = SRB_NVME_CMD;
815 sp->name = "nvme_cmd";
816 sp->done = qla_nvme_sp_done;
817 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
818 sp->qpair = qpair;
819 sp->vha = vha;
820 sp->cmd_sp = sp;
821 nvme = &sp->u.iocb_cmd;
822 nvme->u.nvme.desc = fd;
823
824 rval = qla2x00_start_nvme_mq(sp);
825 if (rval != QLA_SUCCESS) {
826 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
827 "qla2x00_start_nvme_mq failed = %d\n", rval);
828 sp->priv = NULL;
829 priv->sp = NULL;
830 qla2xxx_rel_qpair_sp(sp->qpair, sp);
831 }
832
833 return rval;
834 }
835
qla_nvme_map_queues(struct nvme_fc_local_port * lport,struct blk_mq_queue_map * map)836 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
837 struct blk_mq_queue_map *map)
838 {
839 struct scsi_qla_host *vha = lport->private;
840
841 blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
842 }
843
qla_nvme_localport_delete(struct nvme_fc_local_port * lport)844 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
845 {
846 struct scsi_qla_host *vha = lport->private;
847
848 ql_log(ql_log_info, vha, 0x210f,
849 "localport delete of %p completed.\n", vha->nvme_local_port);
850 vha->nvme_local_port = NULL;
851 complete(&vha->nvme_del_done);
852 }
853
qla_nvme_remoteport_delete(struct nvme_fc_remote_port * rport)854 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
855 {
856 fc_port_t *fcport;
857 struct qla_nvme_rport *qla_rport = rport->private;
858
859 fcport = qla_rport->fcport;
860 fcport->nvme_remote_port = NULL;
861 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
862 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
863 ql_log(ql_log_info, fcport->vha, 0x2110,
864 "remoteport_delete of %p %8phN completed.\n",
865 fcport, fcport->port_name);
866 complete(&fcport->nvme_del_done);
867 }
868
869 static struct nvme_fc_port_template qla_nvme_fc_transport = {
870 .localport_delete = qla_nvme_localport_delete,
871 .remoteport_delete = qla_nvme_remoteport_delete,
872 .create_queue = qla_nvme_alloc_queue,
873 .delete_queue = NULL,
874 .ls_req = qla_nvme_ls_req,
875 .ls_abort = qla_nvme_ls_abort,
876 .fcp_io = qla_nvme_post_cmd,
877 .fcp_abort = qla_nvme_fcp_abort,
878 .xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
879 .map_queues = qla_nvme_map_queues,
880 .max_hw_queues = DEF_NVME_HW_QUEUES,
881 .max_sgl_segments = 1024,
882 .max_dif_sgl_segments = 64,
883 .dma_boundary = 0xFFFFFFFF,
884 .local_priv_sz = 8,
885 .remote_priv_sz = sizeof(struct qla_nvme_rport),
886 .lsrqst_priv_sz = sizeof(struct nvme_private),
887 .fcprqst_priv_sz = sizeof(struct nvme_private),
888 };
889
qla_nvme_unregister_remote_port(struct fc_port * fcport)890 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
891 {
892 int ret;
893
894 if (!IS_ENABLED(CONFIG_NVME_FC))
895 return;
896
897 ql_log(ql_log_warn, fcport->vha, 0x2112,
898 "%s: unregister remoteport on %p %8phN\n",
899 __func__, fcport, fcport->port_name);
900
901 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
902 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
903
904 init_completion(&fcport->nvme_del_done);
905 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
906 if (ret)
907 ql_log(ql_log_info, fcport->vha, 0x2114,
908 "%s: Failed to unregister nvme_remote_port (%d)\n",
909 __func__, ret);
910 wait_for_completion(&fcport->nvme_del_done);
911 }
912
qla_nvme_delete(struct scsi_qla_host * vha)913 void qla_nvme_delete(struct scsi_qla_host *vha)
914 {
915 int nv_ret;
916
917 if (!IS_ENABLED(CONFIG_NVME_FC))
918 return;
919
920 if (vha->nvme_local_port) {
921 init_completion(&vha->nvme_del_done);
922 ql_log(ql_log_info, vha, 0x2116,
923 "unregister localport=%p\n",
924 vha->nvme_local_port);
925 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
926 if (nv_ret)
927 ql_log(ql_log_info, vha, 0x2115,
928 "Unregister of localport failed\n");
929 else
930 wait_for_completion(&vha->nvme_del_done);
931 }
932 }
933
qla_nvme_register_hba(struct scsi_qla_host * vha)934 int qla_nvme_register_hba(struct scsi_qla_host *vha)
935 {
936 struct nvme_fc_port_template *tmpl;
937 struct qla_hw_data *ha;
938 struct nvme_fc_port_info pinfo;
939 int ret = -EINVAL;
940
941 if (!IS_ENABLED(CONFIG_NVME_FC))
942 return ret;
943
944 ha = vha->hw;
945 tmpl = &qla_nvme_fc_transport;
946
947 if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
948 ql_log(ql_log_warn, vha, 0xfffd,
949 "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
950 ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
951 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
952 } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
953 ql_log(ql_log_warn, vha, 0xfffd,
954 "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
955 ql2xnvme_queues, (ha->max_qpairs - 1),
956 (ha->max_qpairs - 1));
957 ql2xnvme_queues = ((ha->max_qpairs - 1));
958 }
959
960 qla_nvme_fc_transport.max_hw_queues =
961 min((uint8_t)(ql2xnvme_queues),
962 (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
963
964 ql_log(ql_log_info, vha, 0xfffb,
965 "Number of NVME queues used for this port: %d\n",
966 qla_nvme_fc_transport.max_hw_queues);
967
968 pinfo.node_name = wwn_to_u64(vha->node_name);
969 pinfo.port_name = wwn_to_u64(vha->port_name);
970 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
971 pinfo.port_id = vha->d_id.b24;
972
973 mutex_lock(&ha->vport_lock);
974 /*
975 * Check again for nvme_local_port to see if any other thread raced
976 * with this one and finished registration.
977 */
978 if (!vha->nvme_local_port) {
979 ql_log(ql_log_info, vha, 0xffff,
980 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
981 pinfo.node_name, pinfo.port_name, pinfo.port_id);
982 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
983
984 ret = nvme_fc_register_localport(&pinfo, tmpl,
985 get_device(&ha->pdev->dev),
986 &vha->nvme_local_port);
987 mutex_unlock(&ha->vport_lock);
988 } else {
989 mutex_unlock(&ha->vport_lock);
990 return 0;
991 }
992 if (ret) {
993 ql_log(ql_log_warn, vha, 0xffff,
994 "register_localport failed: ret=%x\n", ret);
995 } else {
996 vha->nvme_local_port->private = vha;
997 }
998
999 return ret;
1000 }
1001
qla_nvme_abort_set_option(struct abort_entry_24xx * abt,srb_t * orig_sp)1002 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
1003 {
1004 struct qla_hw_data *ha;
1005
1006 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1007 return;
1008
1009 ha = orig_sp->fcport->vha->hw;
1010
1011 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
1012 /* Use Driver Specified Retry Count */
1013 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
1014 abt->drv.abts_rty_cnt = cpu_to_le16(2);
1015 /* Use specified response timeout */
1016 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
1017 /* set it to 2 * r_a_tov in secs */
1018 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
1019 }
1020
qla_nvme_abort_process_comp_status(struct abort_entry_24xx * abt,srb_t * orig_sp)1021 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
1022 {
1023 u16 comp_status;
1024 struct scsi_qla_host *vha;
1025
1026 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1027 return;
1028
1029 vha = orig_sp->fcport->vha;
1030
1031 comp_status = le16_to_cpu(abt->comp_status);
1032 switch (comp_status) {
1033 case CS_RESET: /* reset event aborted */
1034 case CS_ABORTED: /* IOCB was cleaned */
1035 /* N_Port handle is not currently logged in */
1036 case CS_TIMEOUT:
1037 /* N_Port handle was logged out while waiting for ABTS to complete */
1038 case CS_PORT_UNAVAILABLE:
1039 /* Firmware found that the port name changed */
1040 case CS_PORT_LOGGED_OUT:
1041 /* BA_RJT was received for the ABTS */
1042 case CS_PORT_CONFIG_CHG:
1043 ql_dbg(ql_dbg_async, vha, 0xf09d,
1044 "Abort I/O IOCB completed with error, comp_status=%x\n",
1045 comp_status);
1046 break;
1047
1048 /* BA_RJT was received for the ABTS */
1049 case CS_REJECT_RECEIVED:
1050 ql_dbg(ql_dbg_async, vha, 0xf09e,
1051 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
1052 abt->fw.ba_rjt_vendorUnique);
1053 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
1054 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
1055 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
1056 break;
1057
1058 case CS_COMPLETE:
1059 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
1060 "IOCB request is completed successfully comp_status=%x\n",
1061 comp_status);
1062 break;
1063
1064 case CS_IOCB_ERROR:
1065 ql_dbg(ql_dbg_async, vha, 0xf0a0,
1066 "IOCB request is failed, comp_status=%x\n", comp_status);
1067 break;
1068
1069 default:
1070 ql_dbg(ql_dbg_async, vha, 0xf0a1,
1071 "Invalid Abort IO IOCB Completion Status %x\n",
1072 comp_status);
1073 break;
1074 }
1075 }
1076
qla_wait_nvme_release_cmd_kref(srb_t * orig_sp)1077 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
1078 {
1079 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1080 return;
1081 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
1082 }
1083
qla_nvme_fc_format_rjt(void * buf,u8 ls_cmd,u8 reason,u8 explanation,u8 vendor)1084 static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
1085 u8 explanation, u8 vendor)
1086 {
1087 struct fcnvme_ls_rjt *rjt = buf;
1088
1089 rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
1090 rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
1091 rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1092 rjt->rqst.desc_len =
1093 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1094 rjt->rqst.w0.ls_cmd = ls_cmd;
1095 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1096 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1097 rjt->rjt.reason_code = reason;
1098 rjt->rjt.reason_explanation = explanation;
1099 rjt->rjt.vendor = vendor;
1100 }
1101
qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host * vha,struct pt_ls4_request * lsrjt_iocb,struct qla_nvme_lsrjt_pt_arg * a)1102 static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
1103 struct pt_ls4_request *lsrjt_iocb,
1104 struct qla_nvme_lsrjt_pt_arg *a)
1105 {
1106 lsrjt_iocb->entry_type = PT_LS4_REQUEST;
1107 lsrjt_iocb->entry_count = 1;
1108 lsrjt_iocb->sys_define = 0;
1109 lsrjt_iocb->entry_status = 0;
1110 lsrjt_iocb->handle = QLA_SKIP_HANDLE;
1111 lsrjt_iocb->nport_handle = a->nport_handle;
1112 lsrjt_iocb->exchange_address = a->xchg_address;
1113 lsrjt_iocb->vp_index = a->vp_idx;
1114
1115 lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
1116
1117 put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
1118 lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
1119 lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
1120 lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
1121
1122 put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
1123 lsrjt_iocb->dsd[1].length = 0;
1124 lsrjt_iocb->rx_dseg_count = 0;
1125 lsrjt_iocb->rx_byte_count = 0;
1126 }
1127
1128 static int
qla_nvme_ls_reject_iocb(struct scsi_qla_host * vha,struct qla_qpair * qp,struct qla_nvme_lsrjt_pt_arg * a,bool is_xchg_terminate)1129 qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
1130 struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
1131 {
1132 struct pt_ls4_request *lsrjt_iocb;
1133
1134 lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1135 if (!lsrjt_iocb) {
1136 ql_log(ql_log_warn, vha, 0x210e,
1137 "qla2x00_alloc_iocbs failed.\n");
1138 return QLA_FUNCTION_FAILED;
1139 }
1140
1141 if (!is_xchg_terminate) {
1142 qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
1143 a->reason, a->explanation, 0);
1144
1145 a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
1146 a->tx_addr = vha->hw->lsrjt.cdma;
1147 a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
1148
1149 ql_dbg(ql_dbg_unsol, vha, 0x211f,
1150 "Sending nvme fc ls reject ox_id %04x op %04x\n",
1151 a->ox_id, a->opcode);
1152 ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
1153 vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
1154 } else {
1155 a->tx_byte_count = 0;
1156 a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
1157 ql_dbg(ql_dbg_unsol, vha, 0x2110,
1158 "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
1159 }
1160
1161 qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
1162 /* flush iocb to mem before notifying hw doorbell */
1163 wmb();
1164 qla2x00_start_iocbs(vha, qp->req);
1165 return 0;
1166 }
1167
1168 /*
1169 * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
1170 * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
1171 * LLDD need to provide memory for response buffer, which
1172 * will be used to reference the exchange corresponding
1173 * to the LS when issuing an ls response. LLDD will have to free
1174 * response buffer in lport->ops->xmt_ls_rsp().
1175 *
1176 * @vha: SCSI qla host
1177 * @item: ptr to purex_item
1178 */
1179 static void
qla2xxx_process_purls_pkt(struct scsi_qla_host * vha,struct purex_item * item)1180 qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
1181 {
1182 struct qla_nvme_unsol_ctx *uctx = item->purls_context;
1183 struct qla_nvme_lsrjt_pt_arg a;
1184 int ret = 1;
1185
1186 #if (IS_ENABLED(CONFIG_NVME_FC))
1187 ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
1188 &item->iocb, item->size);
1189 #endif
1190 if (ret) {
1191 ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
1192 memset((void *)&a, 0, sizeof(a));
1193 a.vp_idx = vha->vp_idx;
1194 a.nport_handle = uctx->nport_handle;
1195 a.xchg_address = uctx->exchange_address;
1196 qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
1197 list_del(&uctx->elem);
1198 kfree(uctx);
1199 }
1200 }
1201
1202 static scsi_qla_host_t *
qla2xxx_get_vha_from_vp_idx(struct qla_hw_data * ha,uint16_t vp_index)1203 qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
1204 {
1205 scsi_qla_host_t *base_vha, *vha, *tvp;
1206 unsigned long flags;
1207
1208 base_vha = pci_get_drvdata(ha->pdev);
1209
1210 if (!vp_index && !ha->num_vhosts)
1211 return base_vha;
1212
1213 spin_lock_irqsave(&ha->vport_slock, flags);
1214 list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
1215 if (vha->vp_idx == vp_index) {
1216 spin_unlock_irqrestore(&ha->vport_slock, flags);
1217 return vha;
1218 }
1219 }
1220 spin_unlock_irqrestore(&ha->vport_slock, flags);
1221
1222 return NULL;
1223 }
1224
qla2xxx_process_purls_iocb(void ** pkt,struct rsp_que ** rsp)1225 void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
1226 {
1227 struct nvme_fc_remote_port *rport;
1228 struct qla_nvme_rport *qla_rport;
1229 struct qla_nvme_lsrjt_pt_arg a;
1230 struct pt_ls4_rx_unsol *p = *pkt;
1231 struct qla_nvme_unsol_ctx *uctx;
1232 struct rsp_que *rsp_q = *rsp;
1233 struct qla_hw_data *ha;
1234 scsi_qla_host_t *vha;
1235 fc_port_t *fcport = NULL;
1236 struct purex_item *item;
1237 port_id_t d_id = {0};
1238 port_id_t id = {0};
1239 u8 *opcode;
1240 bool xmt_reject = false;
1241
1242 ha = rsp_q->hw;
1243
1244 vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
1245 if (!vha) {
1246 ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
1247 WARN_ON_ONCE(1);
1248 return;
1249 }
1250
1251 memset((void *)&a, 0, sizeof(a));
1252 opcode = (u8 *)&p->payload[0];
1253 a.opcode = opcode[3];
1254 a.vp_idx = p->vp_index;
1255 a.nport_handle = p->nport_handle;
1256 a.ox_id = p->ox_id;
1257 a.xchg_address = p->exchange_address;
1258
1259 id.b.domain = p->s_id.domain;
1260 id.b.area = p->s_id.area;
1261 id.b.al_pa = p->s_id.al_pa;
1262 d_id.b.domain = p->d_id[2];
1263 d_id.b.area = p->d_id[1];
1264 d_id.b.al_pa = p->d_id[0];
1265
1266 fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
1267 if (!fcport) {
1268 ql_dbg(ql_dbg_unsol, vha, 0x211e,
1269 "Failed to find sid=%06x did=%06x\n",
1270 id.b24, d_id.b24);
1271 a.reason = FCNVME_RJT_RC_INV_ASSOC;
1272 a.explanation = FCNVME_RJT_EXP_NONE;
1273 xmt_reject = true;
1274 goto out;
1275 }
1276 rport = fcport->nvme_remote_port;
1277 qla_rport = rport->private;
1278
1279 item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
1280 if (!item) {
1281 a.reason = FCNVME_RJT_RC_LOGIC;
1282 a.explanation = FCNVME_RJT_EXP_NONE;
1283 xmt_reject = true;
1284 goto out;
1285 }
1286
1287 uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
1288 if (!uctx) {
1289 ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
1290 a.reason = FCNVME_RJT_RC_LOGIC;
1291 a.explanation = FCNVME_RJT_EXP_NONE;
1292 xmt_reject = true;
1293 kfree(item);
1294 goto out;
1295 }
1296
1297 uctx->vha = vha;
1298 uctx->fcport = fcport;
1299 uctx->exchange_address = p->exchange_address;
1300 uctx->nport_handle = p->nport_handle;
1301 uctx->ox_id = p->ox_id;
1302 qla_rport->uctx = uctx;
1303 INIT_LIST_HEAD(&uctx->elem);
1304 list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
1305 item->purls_context = (void *)uctx;
1306
1307 ql_dbg(ql_dbg_unsol, vha, 0x2121,
1308 "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
1309 item->iocb.iocb[3], item->size, uctx->exchange_address,
1310 fcport->d_id.b24);
1311 /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
1312 * ----- -----------------------------------------------
1313 * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
1314 * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
1315 * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
1316 */
1317 ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
1318 &item->iocb, item->size);
1319
1320 qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
1321 out:
1322 if (xmt_reject) {
1323 qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
1324 __qla_consume_iocb(vha, pkt, rsp);
1325 }
1326 }
1327