1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
13 
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
15 
qla_nvme_register_remote(struct scsi_qla_host * vha,struct fc_port * fcport)16 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
17 {
18 	struct qla_nvme_rport *rport;
19 	struct nvme_fc_port_info req;
20 	int ret;
21 
22 	if (!IS_ENABLED(CONFIG_NVME_FC))
23 		return 0;
24 
25 	if (!vha->flags.nvme_enabled) {
26 		ql_log(ql_log_info, vha, 0x2100,
27 		    "%s: Not registering target since Host NVME is not enabled\n",
28 		    __func__);
29 		return 0;
30 	}
31 
32 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
33 		return 0;
34 
35 	if (!(fcport->nvme_prli_service_param &
36 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
37 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
38 		return 0;
39 
40 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
41 
42 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
43 	req.port_name = wwn_to_u64(fcport->port_name);
44 	req.node_name = wwn_to_u64(fcport->node_name);
45 	req.port_role = 0;
46 	req.dev_loss_tmo = fcport->dev_loss_tmo;
47 
48 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
49 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
50 
51 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
52 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
53 
54 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
55 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
56 
57 	req.port_id = fcport->d_id.b24;
58 
59 	ql_log(ql_log_info, vha, 0x2102,
60 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
61 	    __func__, req.node_name, req.port_name,
62 	    req.port_id);
63 
64 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
65 	    &fcport->nvme_remote_port);
66 	if (ret) {
67 		ql_log(ql_log_warn, vha, 0x212e,
68 		    "Failed to register remote port. Transport returned %d\n",
69 		    ret);
70 		return ret;
71 	}
72 
73 	nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
74 				       fcport->dev_loss_tmo);
75 
76 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
77 		ql_log(ql_log_info, vha, 0x212a,
78 		       "PortID:%06x Supports SLER\n", req.port_id);
79 
80 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
81 		ql_log(ql_log_info, vha, 0x212b,
82 		       "PortID:%06x Supports PI control\n", req.port_id);
83 
84 	rport = fcport->nvme_remote_port->private;
85 	rport->fcport = fcport;
86 
87 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
88 	return 0;
89 }
90 
91 /* Allocate a queue for NVMe traffic */
qla_nvme_alloc_queue(struct nvme_fc_local_port * lport,unsigned int qidx,u16 qsize,void ** handle)92 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
93     unsigned int qidx, u16 qsize, void **handle)
94 {
95 	struct scsi_qla_host *vha;
96 	struct qla_hw_data *ha;
97 	struct qla_qpair *qpair;
98 
99 	/* Map admin queue and 1st IO queue to index 0 */
100 	if (qidx)
101 		qidx--;
102 
103 	vha = (struct scsi_qla_host *)lport->private;
104 	ha = vha->hw;
105 
106 	ql_log(ql_log_info, vha, 0x2104,
107 	    "%s: handle %p, idx =%d, qsize %d\n",
108 	    __func__, handle, qidx, qsize);
109 
110 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
111 		ql_log(ql_log_warn, vha, 0x212f,
112 		    "%s: Illegal qidx=%d. Max=%d\n",
113 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
114 		return -EINVAL;
115 	}
116 
117 	/* Use base qpair if max_qpairs is 0 */
118 	if (!ha->max_qpairs) {
119 		qpair = ha->base_qpair;
120 	} else {
121 		if (ha->queue_pair_map[qidx]) {
122 			*handle = ha->queue_pair_map[qidx];
123 			ql_log(ql_log_info, vha, 0x2121,
124 			       "Returning existing qpair of %p for idx=%x\n",
125 			       *handle, qidx);
126 			return 0;
127 		}
128 
129 		qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
130 		if (!qpair) {
131 			ql_log(ql_log_warn, vha, 0x2122,
132 			       "Failed to allocate qpair\n");
133 			return -EINVAL;
134 		}
135 	}
136 	*handle = qpair;
137 
138 	return 0;
139 }
140 
qla_nvme_release_fcp_cmd_kref(struct kref * kref)141 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
142 {
143 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
144 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
145 	struct nvmefc_fcp_req *fd;
146 	struct srb_iocb *nvme;
147 	unsigned long flags;
148 
149 	if (!priv)
150 		goto out;
151 
152 	nvme = &sp->u.iocb_cmd;
153 	fd = nvme->u.nvme.desc;
154 
155 	spin_lock_irqsave(&priv->cmd_lock, flags);
156 	priv->sp = NULL;
157 	sp->priv = NULL;
158 	if (priv->comp_status == QLA_SUCCESS) {
159 		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
160 		fd->status = NVME_SC_SUCCESS;
161 	} else {
162 		fd->rcv_rsplen = 0;
163 		fd->transferred_length = 0;
164 		fd->status = NVME_SC_INTERNAL;
165 	}
166 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
167 
168 	fd->done(fd);
169 out:
170 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
171 }
172 
qla_nvme_ls_unmap(struct srb * sp,struct nvmefc_ls_req * fd)173 static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
174 {
175 	if (sp->flags & SRB_DMA_VALID) {
176 		struct srb_iocb *nvme = &sp->u.iocb_cmd;
177 		struct qla_hw_data *ha = sp->fcport->vha->hw;
178 
179 		dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
180 				 fd->rqstlen, DMA_TO_DEVICE);
181 		sp->flags &= ~SRB_DMA_VALID;
182 	}
183 }
184 
qla_nvme_release_ls_cmd_kref(struct kref * kref)185 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
186 {
187 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
188 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
189 	struct nvmefc_ls_req *fd;
190 	unsigned long flags;
191 
192 	if (!priv)
193 		goto out;
194 
195 	spin_lock_irqsave(&priv->cmd_lock, flags);
196 	priv->sp = NULL;
197 	sp->priv = NULL;
198 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
199 
200 	fd = priv->fd;
201 
202 	qla_nvme_ls_unmap(sp, fd);
203 	fd->done(fd, priv->comp_status);
204 out:
205 	qla2x00_rel_sp(sp);
206 }
207 
qla_nvme_ls_complete(struct work_struct * work)208 static void qla_nvme_ls_complete(struct work_struct *work)
209 {
210 	struct nvme_private *priv =
211 		container_of(work, struct nvme_private, ls_work);
212 
213 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
214 }
215 
qla_nvme_sp_ls_done(srb_t * sp,int res)216 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
217 {
218 	struct nvme_private *priv = sp->priv;
219 
220 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
221 		return;
222 
223 	if (res)
224 		res = -EINVAL;
225 
226 	priv->comp_status = res;
227 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
228 	schedule_work(&priv->ls_work);
229 }
230 
231 /* it assumed that QPair lock is held. */
qla_nvme_sp_done(srb_t * sp,int res)232 static void qla_nvme_sp_done(srb_t *sp, int res)
233 {
234 	struct nvme_private *priv = sp->priv;
235 
236 	priv->comp_status = res;
237 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
238 
239 	return;
240 }
241 
qla_nvme_abort_work(struct work_struct * work)242 static void qla_nvme_abort_work(struct work_struct *work)
243 {
244 	struct nvme_private *priv =
245 		container_of(work, struct nvme_private, abort_work);
246 	srb_t *sp = priv->sp;
247 	fc_port_t *fcport = sp->fcport;
248 	struct qla_hw_data *ha = fcport->vha->hw;
249 	int rval, abts_done_called = 1;
250 	bool io_wait_for_abort_done;
251 	uint32_t handle;
252 
253 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
254 	       "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
255 	       __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
256 
257 	if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
258 		goto out;
259 
260 	if (ha->flags.host_shutting_down) {
261 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
262 		    "%s Calling done on sp: %p, type: 0x%x\n",
263 		    __func__, sp, sp->type);
264 		sp->done(sp, 0);
265 		goto out;
266 	}
267 
268 	/*
269 	 * sp may not be valid after abort_command if return code is either
270 	 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
271 	 */
272 	io_wait_for_abort_done = ql2xabts_wait_nvme &&
273 					QLA_ABTS_WAIT_ENABLED(sp);
274 	handle = sp->handle;
275 
276 	rval = ha->isp_ops->abort_command(sp);
277 
278 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
279 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
280 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
281 	    sp, handle, fcport, rval);
282 
283 	/*
284 	 * If async tmf is enabled, the abort callback is called only on
285 	 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
286 	 */
287 	if (ql2xasynctmfenable &&
288 	    rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
289 		abts_done_called = 0;
290 
291 	/*
292 	 * Returned before decreasing kref so that I/O requests
293 	 * are waited until ABTS complete. This kref is decreased
294 	 * at qla24xx_abort_sp_done function.
295 	 */
296 	if (abts_done_called && io_wait_for_abort_done)
297 		return;
298 out:
299 	/* kref_get was done before work was schedule. */
300 	kref_put(&sp->cmd_kref, sp->put_fn);
301 }
302 
qla_nvme_ls_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)303 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
304     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
305 {
306 	struct nvme_private *priv = fd->private;
307 	unsigned long flags;
308 
309 	spin_lock_irqsave(&priv->cmd_lock, flags);
310 	if (!priv->sp) {
311 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
312 		return;
313 	}
314 
315 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
316 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
317 		return;
318 	}
319 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
320 
321 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
322 	schedule_work(&priv->abort_work);
323 }
324 
qla_nvme_ls_req(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)325 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
326     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
327 {
328 	struct qla_nvme_rport *qla_rport = rport->private;
329 	fc_port_t *fcport = qla_rport->fcport;
330 	struct srb_iocb   *nvme;
331 	struct nvme_private *priv = fd->private;
332 	struct scsi_qla_host *vha;
333 	int     rval = QLA_FUNCTION_FAILED;
334 	struct qla_hw_data *ha;
335 	srb_t           *sp;
336 
337 	if (!fcport || fcport->deleted)
338 		return rval;
339 
340 	vha = fcport->vha;
341 	ha = vha->hw;
342 
343 	if (!ha->flags.fw_started)
344 		return rval;
345 
346 	/* Alloc SRB structure */
347 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
348 	if (!sp)
349 		return rval;
350 
351 	sp->type = SRB_NVME_LS;
352 	sp->name = "nvme_ls";
353 	sp->done = qla_nvme_sp_ls_done;
354 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
355 	sp->priv = priv;
356 	priv->sp = sp;
357 	kref_init(&sp->cmd_kref);
358 	spin_lock_init(&priv->cmd_lock);
359 	nvme = &sp->u.iocb_cmd;
360 	priv->fd = fd;
361 	nvme->u.nvme.desc = fd;
362 	nvme->u.nvme.dir = 0;
363 	nvme->u.nvme.dl = 0;
364 	nvme->u.nvme.cmd_len = fd->rqstlen;
365 	nvme->u.nvme.rsp_len = fd->rsplen;
366 	nvme->u.nvme.rsp_dma = fd->rspdma;
367 	nvme->u.nvme.timeout_sec = fd->timeout;
368 	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
369 	    fd->rqstlen, DMA_TO_DEVICE);
370 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
371 	    fd->rqstlen, DMA_TO_DEVICE);
372 
373 	sp->flags |= SRB_DMA_VALID;
374 
375 	rval = qla2x00_start_sp(sp);
376 	if (rval != QLA_SUCCESS) {
377 		ql_log(ql_log_warn, vha, 0x700e,
378 		    "qla2x00_start_sp failed = %d\n", rval);
379 		wake_up(&sp->nvme_ls_waitq);
380 		sp->priv = NULL;
381 		priv->sp = NULL;
382 		qla_nvme_ls_unmap(sp, fd);
383 		qla2x00_rel_sp(sp);
384 		return rval;
385 	}
386 
387 	return rval;
388 }
389 
qla_nvme_fcp_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)390 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
391     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
392     struct nvmefc_fcp_req *fd)
393 {
394 	struct nvme_private *priv = fd->private;
395 	unsigned long flags;
396 
397 	spin_lock_irqsave(&priv->cmd_lock, flags);
398 	if (!priv->sp) {
399 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
400 		return;
401 	}
402 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
403 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
404 		return;
405 	}
406 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
407 
408 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
409 	schedule_work(&priv->abort_work);
410 }
411 
qla2x00_start_nvme_mq(srb_t * sp)412 static inline int qla2x00_start_nvme_mq(srb_t *sp)
413 {
414 	unsigned long   flags;
415 	uint32_t        *clr_ptr;
416 	uint32_t        handle;
417 	struct cmd_nvme *cmd_pkt;
418 	uint16_t        cnt, i;
419 	uint16_t        req_cnt;
420 	uint16_t        tot_dsds;
421 	uint16_t	avail_dsds;
422 	struct dsd64	*cur_dsd;
423 	struct req_que *req = NULL;
424 	struct rsp_que *rsp = NULL;
425 	struct scsi_qla_host *vha = sp->fcport->vha;
426 	struct qla_hw_data *ha = vha->hw;
427 	struct qla_qpair *qpair = sp->qpair;
428 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
429 	struct scatterlist *sgl, *sg;
430 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
431 	struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
432 	uint32_t        rval = QLA_SUCCESS;
433 
434 	/* Setup qpair pointers */
435 	req = qpair->req;
436 	rsp = qpair->rsp;
437 	tot_dsds = fd->sg_cnt;
438 
439 	/* Acquire qpair specific lock */
440 	spin_lock_irqsave(&qpair->qp_lock, flags);
441 
442 	handle = qla2xxx_get_next_handle(req);
443 	if (handle == 0) {
444 		rval = -EBUSY;
445 		goto queuing_error;
446 	}
447 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
448 	if (req->cnt < (req_cnt + 2)) {
449 		if (IS_SHADOW_REG_CAPABLE(ha)) {
450 			cnt = *req->out_ptr;
451 		} else {
452 			cnt = rd_reg_dword_relaxed(req->req_q_out);
453 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
454 				goto queuing_error;
455 		}
456 
457 		if (req->ring_index < cnt)
458 			req->cnt = cnt - req->ring_index;
459 		else
460 			req->cnt = req->length - (req->ring_index - cnt);
461 
462 		if (req->cnt < (req_cnt + 2)){
463 			rval = -EBUSY;
464 			goto queuing_error;
465 		}
466 	}
467 
468 	if (unlikely(!fd->sqid)) {
469 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
470 			nvme->u.nvme.aen_op = 1;
471 			atomic_inc(&ha->nvme_active_aen_cnt);
472 		}
473 	}
474 
475 	/* Build command packet. */
476 	req->current_outstanding_cmd = handle;
477 	req->outstanding_cmds[handle] = sp;
478 	sp->handle = handle;
479 	req->cnt -= req_cnt;
480 
481 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
482 	cmd_pkt->handle = make_handle(req->id, handle);
483 
484 	/* Zero out remaining portion of packet. */
485 	clr_ptr = (uint32_t *)cmd_pkt + 2;
486 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
487 
488 	cmd_pkt->entry_status = 0;
489 
490 	/* Update entry type to indicate Command NVME IOCB */
491 	cmd_pkt->entry_type = COMMAND_NVME;
492 
493 	/* No data transfer how do we check buffer len == 0?? */
494 	if (fd->io_dir == NVMEFC_FCP_READ) {
495 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
496 		qpair->counters.input_bytes += fd->payload_length;
497 		qpair->counters.input_requests++;
498 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
499 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
500 		if ((vha->flags.nvme_first_burst) &&
501 		    (sp->fcport->nvme_prli_service_param &
502 			NVME_PRLI_SP_FIRST_BURST)) {
503 			if ((fd->payload_length <=
504 			    sp->fcport->nvme_first_burst_size) ||
505 				(sp->fcport->nvme_first_burst_size == 0))
506 				cmd_pkt->control_flags |=
507 					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
508 		}
509 		qpair->counters.output_bytes += fd->payload_length;
510 		qpair->counters.output_requests++;
511 	} else if (fd->io_dir == 0) {
512 		cmd_pkt->control_flags = 0;
513 	}
514 
515 	if (sp->fcport->edif.enable && fd->io_dir != 0)
516 		cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
517 
518 	/* Set BIT_13 of control flags for Async event */
519 	if (vha->flags.nvme2_enabled &&
520 	    cmd->sqe.common.opcode == nvme_admin_async_event) {
521 		cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
522 	}
523 
524 	/* Set NPORT-ID */
525 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
526 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
527 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
528 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
529 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
530 
531 	/* NVME RSP IU */
532 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
533 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
534 
535 	/* NVME CNMD IU */
536 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
537 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
538 
539 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
540 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
541 
542 	/* One DSD is available in the Command Type NVME IOCB */
543 	avail_dsds = 1;
544 	cur_dsd = &cmd_pkt->nvme_dsd;
545 	sgl = fd->first_sgl;
546 
547 	/* Load data segments */
548 	for_each_sg(sgl, sg, tot_dsds, i) {
549 		cont_a64_entry_t *cont_pkt;
550 
551 		/* Allocate additional continuation packets? */
552 		if (avail_dsds == 0) {
553 			/*
554 			 * Five DSDs are available in the Continuation
555 			 * Type 1 IOCB.
556 			 */
557 
558 			/* Adjust ring index */
559 			req->ring_index++;
560 			if (req->ring_index == req->length) {
561 				req->ring_index = 0;
562 				req->ring_ptr = req->ring;
563 			} else {
564 				req->ring_ptr++;
565 			}
566 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
567 			put_unaligned_le32(CONTINUE_A64_TYPE,
568 					   &cont_pkt->entry_type);
569 
570 			cur_dsd = cont_pkt->dsd;
571 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
572 		}
573 
574 		append_dsd64(&cur_dsd, sg);
575 		avail_dsds--;
576 	}
577 
578 	/* Set total entry count. */
579 	cmd_pkt->entry_count = (uint8_t)req_cnt;
580 	wmb();
581 
582 	/* Adjust ring index. */
583 	req->ring_index++;
584 	if (req->ring_index == req->length) {
585 		req->ring_index = 0;
586 		req->ring_ptr = req->ring;
587 	} else {
588 		req->ring_ptr++;
589 	}
590 
591 	/* ignore nvme async cmd due to long timeout */
592 	if (!nvme->u.nvme.aen_op)
593 		sp->qpair->cmd_cnt++;
594 
595 	/* Set chip new ring index. */
596 	wrt_reg_dword(req->req_q_in, req->ring_index);
597 
598 	if (vha->flags.process_response_queue &&
599 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
600 		qla24xx_process_response_queue(vha, rsp);
601 
602 queuing_error:
603 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
604 
605 	return rval;
606 }
607 
608 /* Post a command */
qla_nvme_post_cmd(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)609 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
610     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
611     struct nvmefc_fcp_req *fd)
612 {
613 	fc_port_t *fcport;
614 	struct srb_iocb *nvme;
615 	struct scsi_qla_host *vha;
616 	int rval;
617 	srb_t *sp;
618 	struct qla_qpair *qpair = hw_queue_handle;
619 	struct nvme_private *priv = fd->private;
620 	struct qla_nvme_rport *qla_rport = rport->private;
621 
622 	if (!priv) {
623 		/* nvme association has been torn down */
624 		return -ENODEV;
625 	}
626 
627 	fcport = qla_rport->fcport;
628 
629 	if (unlikely(!qpair || !fcport || fcport->deleted))
630 		return -EBUSY;
631 
632 	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
633 		return -ENODEV;
634 
635 	vha = fcport->vha;
636 
637 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
638 		return -EBUSY;
639 
640 	/*
641 	 * If we know the dev is going away while the transport is still sending
642 	 * IO's return busy back to stall the IO Q.  This happens when the
643 	 * link goes away and fw hasn't notified us yet, but IO's are being
644 	 * returned. If the dev comes back quickly we won't exhaust the IO
645 	 * retry count at the core.
646 	 */
647 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
648 		return -EBUSY;
649 
650 	/* Alloc SRB structure */
651 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
652 	if (!sp)
653 		return -EBUSY;
654 
655 	init_waitqueue_head(&sp->nvme_ls_waitq);
656 	kref_init(&sp->cmd_kref);
657 	spin_lock_init(&priv->cmd_lock);
658 	sp->priv = priv;
659 	priv->sp = sp;
660 	sp->type = SRB_NVME_CMD;
661 	sp->name = "nvme_cmd";
662 	sp->done = qla_nvme_sp_done;
663 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
664 	sp->qpair = qpair;
665 	sp->vha = vha;
666 	sp->cmd_sp = sp;
667 	nvme = &sp->u.iocb_cmd;
668 	nvme->u.nvme.desc = fd;
669 
670 	rval = qla2x00_start_nvme_mq(sp);
671 	if (rval != QLA_SUCCESS) {
672 		ql_log(ql_log_warn, vha, 0x212d,
673 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
674 		wake_up(&sp->nvme_ls_waitq);
675 		sp->priv = NULL;
676 		priv->sp = NULL;
677 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
678 	}
679 
680 	return rval;
681 }
682 
qla_nvme_map_queues(struct nvme_fc_local_port * lport,struct blk_mq_queue_map * map)683 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
684 		struct blk_mq_queue_map *map)
685 {
686 	struct scsi_qla_host *vha = lport->private;
687 
688 	blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
689 }
690 
qla_nvme_localport_delete(struct nvme_fc_local_port * lport)691 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
692 {
693 	struct scsi_qla_host *vha = lport->private;
694 
695 	ql_log(ql_log_info, vha, 0x210f,
696 	    "localport delete of %p completed.\n", vha->nvme_local_port);
697 	vha->nvme_local_port = NULL;
698 	complete(&vha->nvme_del_done);
699 }
700 
qla_nvme_remoteport_delete(struct nvme_fc_remote_port * rport)701 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
702 {
703 	fc_port_t *fcport;
704 	struct qla_nvme_rport *qla_rport = rport->private;
705 
706 	fcport = qla_rport->fcport;
707 	fcport->nvme_remote_port = NULL;
708 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
709 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
710 	ql_log(ql_log_info, fcport->vha, 0x2110,
711 	    "remoteport_delete of %p %8phN completed.\n",
712 	    fcport, fcport->port_name);
713 	complete(&fcport->nvme_del_done);
714 }
715 
716 static struct nvme_fc_port_template qla_nvme_fc_transport = {
717 	.localport_delete = qla_nvme_localport_delete,
718 	.remoteport_delete = qla_nvme_remoteport_delete,
719 	.create_queue   = qla_nvme_alloc_queue,
720 	.delete_queue 	= NULL,
721 	.ls_req		= qla_nvme_ls_req,
722 	.ls_abort	= qla_nvme_ls_abort,
723 	.fcp_io		= qla_nvme_post_cmd,
724 	.fcp_abort	= qla_nvme_fcp_abort,
725 	.map_queues	= qla_nvme_map_queues,
726 	.max_hw_queues  = DEF_NVME_HW_QUEUES,
727 	.max_sgl_segments = 1024,
728 	.max_dif_sgl_segments = 64,
729 	.dma_boundary = 0xFFFFFFFF,
730 	.local_priv_sz  = 8,
731 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
732 	.lsrqst_priv_sz = sizeof(struct nvme_private),
733 	.fcprqst_priv_sz = sizeof(struct nvme_private),
734 };
735 
qla_nvme_unregister_remote_port(struct fc_port * fcport)736 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
737 {
738 	int ret;
739 
740 	if (!IS_ENABLED(CONFIG_NVME_FC))
741 		return;
742 
743 	ql_log(ql_log_warn, fcport->vha, 0x2112,
744 	    "%s: unregister remoteport on %p %8phN\n",
745 	    __func__, fcport, fcport->port_name);
746 
747 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
748 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
749 
750 	init_completion(&fcport->nvme_del_done);
751 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
752 	if (ret)
753 		ql_log(ql_log_info, fcport->vha, 0x2114,
754 			"%s: Failed to unregister nvme_remote_port (%d)\n",
755 			    __func__, ret);
756 	wait_for_completion(&fcport->nvme_del_done);
757 }
758 
qla_nvme_delete(struct scsi_qla_host * vha)759 void qla_nvme_delete(struct scsi_qla_host *vha)
760 {
761 	int nv_ret;
762 
763 	if (!IS_ENABLED(CONFIG_NVME_FC))
764 		return;
765 
766 	if (vha->nvme_local_port) {
767 		init_completion(&vha->nvme_del_done);
768 		ql_log(ql_log_info, vha, 0x2116,
769 			"unregister localport=%p\n",
770 			vha->nvme_local_port);
771 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
772 		if (nv_ret)
773 			ql_log(ql_log_info, vha, 0x2115,
774 			    "Unregister of localport failed\n");
775 		else
776 			wait_for_completion(&vha->nvme_del_done);
777 	}
778 }
779 
qla_nvme_register_hba(struct scsi_qla_host * vha)780 int qla_nvme_register_hba(struct scsi_qla_host *vha)
781 {
782 	struct nvme_fc_port_template *tmpl;
783 	struct qla_hw_data *ha;
784 	struct nvme_fc_port_info pinfo;
785 	int ret = -EINVAL;
786 
787 	if (!IS_ENABLED(CONFIG_NVME_FC))
788 		return ret;
789 
790 	ha = vha->hw;
791 	tmpl = &qla_nvme_fc_transport;
792 
793 	if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
794 		ql_log(ql_log_warn, vha, 0xfffd,
795 		    "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
796 		    ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
797 		ql2xnvme_queues = DEF_NVME_HW_QUEUES;
798 	} else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
799 		ql_log(ql_log_warn, vha, 0xfffd,
800 		       "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
801 		       ql2xnvme_queues, (ha->max_qpairs - 1),
802 		       (ha->max_qpairs - 1));
803 		ql2xnvme_queues = ((ha->max_qpairs - 1));
804 	}
805 
806 	qla_nvme_fc_transport.max_hw_queues =
807 	    min((uint8_t)(ql2xnvme_queues),
808 		(uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
809 
810 	ql_log(ql_log_info, vha, 0xfffb,
811 	       "Number of NVME queues used for this port: %d\n",
812 	    qla_nvme_fc_transport.max_hw_queues);
813 
814 	pinfo.node_name = wwn_to_u64(vha->node_name);
815 	pinfo.port_name = wwn_to_u64(vha->port_name);
816 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
817 	pinfo.port_id = vha->d_id.b24;
818 
819 	mutex_lock(&ha->vport_lock);
820 	/*
821 	 * Check again for nvme_local_port to see if any other thread raced
822 	 * with this one and finished registration.
823 	 */
824 	if (!vha->nvme_local_port) {
825 		ql_log(ql_log_info, vha, 0xffff,
826 		    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
827 		    pinfo.node_name, pinfo.port_name, pinfo.port_id);
828 		qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
829 
830 		ret = nvme_fc_register_localport(&pinfo, tmpl,
831 						 get_device(&ha->pdev->dev),
832 						 &vha->nvme_local_port);
833 		mutex_unlock(&ha->vport_lock);
834 	} else {
835 		mutex_unlock(&ha->vport_lock);
836 		return 0;
837 	}
838 	if (ret) {
839 		ql_log(ql_log_warn, vha, 0xffff,
840 		    "register_localport failed: ret=%x\n", ret);
841 	} else {
842 		vha->nvme_local_port->private = vha;
843 	}
844 
845 	return ret;
846 }
847 
qla_nvme_abort_set_option(struct abort_entry_24xx * abt,srb_t * orig_sp)848 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
849 {
850 	struct qla_hw_data *ha;
851 
852 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
853 		return;
854 
855 	ha = orig_sp->fcport->vha->hw;
856 
857 	WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
858 	/* Use Driver Specified Retry Count */
859 	abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
860 	abt->drv.abts_rty_cnt = cpu_to_le16(2);
861 	/* Use specified response timeout */
862 	abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
863 	/* set it to 2 * r_a_tov in secs */
864 	abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
865 }
866 
qla_nvme_abort_process_comp_status(struct abort_entry_24xx * abt,srb_t * orig_sp)867 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
868 {
869 	u16	comp_status;
870 	struct scsi_qla_host *vha;
871 
872 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
873 		return;
874 
875 	vha = orig_sp->fcport->vha;
876 
877 	comp_status = le16_to_cpu(abt->comp_status);
878 	switch (comp_status) {
879 	case CS_RESET:		/* reset event aborted */
880 	case CS_ABORTED:	/* IOCB was cleaned */
881 	/* N_Port handle is not currently logged in */
882 	case CS_TIMEOUT:
883 	/* N_Port handle was logged out while waiting for ABTS to complete */
884 	case CS_PORT_UNAVAILABLE:
885 	/* Firmware found that the port name changed */
886 	case CS_PORT_LOGGED_OUT:
887 	/* BA_RJT was received for the ABTS */
888 	case CS_PORT_CONFIG_CHG:
889 		ql_dbg(ql_dbg_async, vha, 0xf09d,
890 		       "Abort I/O IOCB completed with error, comp_status=%x\n",
891 		comp_status);
892 		break;
893 
894 	/* BA_RJT was received for the ABTS */
895 	case CS_REJECT_RECEIVED:
896 		ql_dbg(ql_dbg_async, vha, 0xf09e,
897 		       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
898 			abt->fw.ba_rjt_vendorUnique);
899 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
900 		       "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
901 		       abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
902 		break;
903 
904 	case CS_COMPLETE:
905 		ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
906 		       "IOCB request is completed successfully comp_status=%x\n",
907 		comp_status);
908 		break;
909 
910 	case CS_IOCB_ERROR:
911 		ql_dbg(ql_dbg_async, vha, 0xf0a0,
912 		       "IOCB request is failed, comp_status=%x\n", comp_status);
913 		break;
914 
915 	default:
916 		ql_dbg(ql_dbg_async, vha, 0xf0a1,
917 		       "Invalid Abort IO IOCB Completion Status %x\n",
918 		comp_status);
919 		break;
920 	}
921 }
922 
qla_wait_nvme_release_cmd_kref(srb_t * orig_sp)923 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
924 {
925 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
926 		return;
927 	kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
928 }
929