1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <linux/blk-cgroup.h>
32 #include <net/checksum.h>
33 
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_eh.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/scsi_transport_fc.h>
40 
41 #include "lpfc_version.h"
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_logmsg.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_vport.h"
53 
54 #define LPFC_RESET_WAIT  2
55 #define LPFC_ABORT_WAIT  2
56 
57 static char *dif_op_str[] = {
58 	"PROT_NORMAL",
59 	"PROT_READ_INSERT",
60 	"PROT_WRITE_STRIP",
61 	"PROT_READ_STRIP",
62 	"PROT_WRITE_INSERT",
63 	"PROT_READ_PASS",
64 	"PROT_WRITE_PASS",
65 };
66 
67 struct scsi_dif_tuple {
68 	__be16 guard_tag;       /* Checksum */
69 	__be16 app_tag;         /* Opaque storage */
70 	__be32 ref_tag;         /* Target LBA or indirect LBA */
71 };
72 
73 static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device * sdev)74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75 {
76 	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77 
78 	if (vport->phba->cfg_fof)
79 		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
80 	else
81 		return (struct lpfc_rport_data *)sdev->hostdata;
82 }
83 
84 static void
85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
86 static void
87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
88 static int
89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
90 
91 /**
92  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
93  * @phba: Pointer to HBA object.
94  * @lpfc_cmd: lpfc scsi command object pointer.
95  *
96  * This function is called from the lpfc_prep_task_mgmt_cmd function to
97  * set the last bit in the response sge entry.
98  **/
99 static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)100 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
101 				struct lpfc_io_buf *lpfc_cmd)
102 {
103 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
104 	if (sgl) {
105 		sgl += 1;
106 		sgl->word2 = le32_to_cpu(sgl->word2);
107 		bf_set(lpfc_sli4_sge_last, sgl, 1);
108 		sgl->word2 = cpu_to_le32(sgl->word2);
109 	}
110 }
111 
112 #define LPFC_INVALID_REFTAG ((u32)-1)
113 
114 /**
115  * lpfc_update_stats - Update statistical data for the command completion
116  * @vport: The virtual port on which this call is executing.
117  * @lpfc_cmd: lpfc scsi command object pointer.
118  *
119  * This function is called when there is a command completion and this
120  * function updates the statistical data for the command completion.
121  **/
122 static void
lpfc_update_stats(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)123 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
124 {
125 	struct lpfc_hba *phba = vport->phba;
126 	struct lpfc_rport_data *rdata;
127 	struct lpfc_nodelist *pnode;
128 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
129 	unsigned long flags;
130 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
131 	unsigned long latency;
132 	int i;
133 
134 	if (!vport->stat_data_enabled ||
135 	    vport->stat_data_blocked ||
136 	    (cmd->result))
137 		return;
138 
139 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
140 	rdata = lpfc_cmd->rdata;
141 	pnode = rdata->pnode;
142 
143 	spin_lock_irqsave(shost->host_lock, flags);
144 	if (!pnode ||
145 	    !pnode->lat_data ||
146 	    (phba->bucket_type == LPFC_NO_BUCKET)) {
147 		spin_unlock_irqrestore(shost->host_lock, flags);
148 		return;
149 	}
150 
151 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
152 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
153 			phba->bucket_step;
154 		/* check array subscript bounds */
155 		if (i < 0)
156 			i = 0;
157 		else if (i >= LPFC_MAX_BUCKET_COUNT)
158 			i = LPFC_MAX_BUCKET_COUNT - 1;
159 	} else {
160 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
161 			if (latency <= (phba->bucket_base +
162 				((1<<i)*phba->bucket_step)))
163 				break;
164 	}
165 
166 	pnode->lat_data[i].cmd_count++;
167 	spin_unlock_irqrestore(shost->host_lock, flags);
168 }
169 
170 /**
171  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
172  * @phba: The Hba for which this call is being executed.
173  *
174  * This routine is called when there is resource error in driver or firmware.
175  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
176  * posts at most 1 event each second. This routine wakes up worker thread of
177  * @phba to process WORKER_RAM_DOWN_EVENT event.
178  *
179  * This routine should be called with no lock held.
180  **/
181 void
lpfc_rampdown_queue_depth(struct lpfc_hba * phba)182 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
183 {
184 	unsigned long flags;
185 	uint32_t evt_posted;
186 	unsigned long expires;
187 
188 	spin_lock_irqsave(&phba->hbalock, flags);
189 	atomic_inc(&phba->num_rsrc_err);
190 	phba->last_rsrc_error_time = jiffies;
191 
192 	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
193 	if (time_after(expires, jiffies)) {
194 		spin_unlock_irqrestore(&phba->hbalock, flags);
195 		return;
196 	}
197 
198 	phba->last_ramp_down_time = jiffies;
199 
200 	spin_unlock_irqrestore(&phba->hbalock, flags);
201 
202 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
203 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
204 	if (!evt_posted)
205 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
206 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
207 
208 	if (!evt_posted)
209 		lpfc_worker_wake_up(phba);
210 	return;
211 }
212 
213 /**
214  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
215  * @phba: The Hba for which this call is being executed.
216  *
217  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
218  * thread.This routine reduces queue depth for all scsi device on each vport
219  * associated with @phba.
220  **/
221 void
lpfc_ramp_down_queue_handler(struct lpfc_hba * phba)222 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
223 {
224 	struct lpfc_vport **vports;
225 	struct Scsi_Host  *shost;
226 	struct scsi_device *sdev;
227 	unsigned long new_queue_depth;
228 	unsigned long num_rsrc_err, num_cmd_success;
229 	int i;
230 
231 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
232 	num_cmd_success = atomic_read(&phba->num_cmd_success);
233 
234 	/*
235 	 * The error and success command counters are global per
236 	 * driver instance.  If another handler has already
237 	 * operated on this error event, just exit.
238 	 */
239 	if (num_rsrc_err == 0)
240 		return;
241 
242 	vports = lpfc_create_vport_work_array(phba);
243 	if (vports != NULL)
244 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
245 			shost = lpfc_shost_from_vport(vports[i]);
246 			shost_for_each_device(sdev, shost) {
247 				new_queue_depth =
248 					sdev->queue_depth * num_rsrc_err /
249 					(num_rsrc_err + num_cmd_success);
250 				if (!new_queue_depth)
251 					new_queue_depth = sdev->queue_depth - 1;
252 				else
253 					new_queue_depth = sdev->queue_depth -
254 								new_queue_depth;
255 				scsi_change_queue_depth(sdev, new_queue_depth);
256 			}
257 		}
258 	lpfc_destroy_vport_work_array(phba, vports);
259 	atomic_set(&phba->num_rsrc_err, 0);
260 	atomic_set(&phba->num_cmd_success, 0);
261 }
262 
263 /**
264  * lpfc_scsi_dev_block - set all scsi hosts to block state
265  * @phba: Pointer to HBA context object.
266  *
267  * This function walks vport list and set each SCSI host to block state
268  * by invoking fc_remote_port_delete() routine. This function is invoked
269  * with EEH when device's PCI slot has been permanently disabled.
270  **/
271 void
lpfc_scsi_dev_block(struct lpfc_hba * phba)272 lpfc_scsi_dev_block(struct lpfc_hba *phba)
273 {
274 	struct lpfc_vport **vports;
275 	struct Scsi_Host  *shost;
276 	struct scsi_device *sdev;
277 	struct fc_rport *rport;
278 	int i;
279 
280 	vports = lpfc_create_vport_work_array(phba);
281 	if (vports != NULL)
282 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
283 			shost = lpfc_shost_from_vport(vports[i]);
284 			shost_for_each_device(sdev, shost) {
285 				rport = starget_to_rport(scsi_target(sdev));
286 				fc_remote_port_delete(rport);
287 			}
288 		}
289 	lpfc_destroy_vport_work_array(phba, vports);
290 }
291 
292 /**
293  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
294  * @vport: The virtual port for which this call being executed.
295  * @num_to_alloc: The requested number of buffers to allocate.
296  *
297  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
298  * the scsi buffer contains all the necessary information needed to initiate
299  * a SCSI I/O. The non-DMAable buffer region contains information to build
300  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
301  * and the initial BPL. In addition to allocating memory, the FCP CMND and
302  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
303  *
304  * Return codes:
305  *   int - number of scsi buffers that were allocated.
306  *   0 = failure, less than num_to_alloc is a partial failure.
307  **/
308 static int
lpfc_new_scsi_buf_s3(struct lpfc_vport * vport,int num_to_alloc)309 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
310 {
311 	struct lpfc_hba *phba = vport->phba;
312 	struct lpfc_io_buf *psb;
313 	struct ulp_bde64 *bpl;
314 	IOCB_t *iocb;
315 	dma_addr_t pdma_phys_fcp_cmd;
316 	dma_addr_t pdma_phys_fcp_rsp;
317 	dma_addr_t pdma_phys_sgl;
318 	uint16_t iotag;
319 	int bcnt, bpl_size;
320 
321 	bpl_size = phba->cfg_sg_dma_buf_size -
322 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
323 
324 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
325 			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
326 			 num_to_alloc, phba->cfg_sg_dma_buf_size,
327 			 (int)sizeof(struct fcp_cmnd),
328 			 (int)sizeof(struct fcp_rsp), bpl_size);
329 
330 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
331 		psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
332 		if (!psb)
333 			break;
334 
335 		/*
336 		 * Get memory from the pci pool to map the virt space to pci
337 		 * bus space for an I/O.  The DMA buffer includes space for the
338 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
339 		 * necessary to support the sg_tablesize.
340 		 */
341 		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
342 					GFP_KERNEL, &psb->dma_handle);
343 		if (!psb->data) {
344 			kfree(psb);
345 			break;
346 		}
347 
348 
349 		/* Allocate iotag for psb->cur_iocbq. */
350 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
351 		if (iotag == 0) {
352 			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
353 				      psb->data, psb->dma_handle);
354 			kfree(psb);
355 			break;
356 		}
357 		psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
358 
359 		psb->fcp_cmnd = psb->data;
360 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
361 		psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
362 			sizeof(struct fcp_rsp);
363 
364 		/* Initialize local short-hand pointers. */
365 		bpl = (struct ulp_bde64 *)psb->dma_sgl;
366 		pdma_phys_fcp_cmd = psb->dma_handle;
367 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
368 		pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
369 			sizeof(struct fcp_rsp);
370 
371 		/*
372 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
373 		 * are sg list bdes.  Initialize the first two and leave the
374 		 * rest for queuecommand.
375 		 */
376 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
377 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
378 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
379 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
380 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
381 
382 		/* Setup the physical region for the FCP RSP */
383 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
384 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
385 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
386 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
387 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
388 
389 		/*
390 		 * Since the IOCB for the FCP I/O is built into this
391 		 * lpfc_scsi_buf, initialize it with all known data now.
392 		 */
393 		iocb = &psb->cur_iocbq.iocb;
394 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
395 		if ((phba->sli_rev == 3) &&
396 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
397 			/* fill in immediate fcp command BDE */
398 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
399 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
400 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
401 					unsli3.fcp_ext.icd);
402 			iocb->un.fcpi64.bdl.addrHigh = 0;
403 			iocb->ulpBdeCount = 0;
404 			iocb->ulpLe = 0;
405 			/* fill in response BDE */
406 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
407 							BUFF_TYPE_BDE_64;
408 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
409 				sizeof(struct fcp_rsp);
410 			iocb->unsli3.fcp_ext.rbde.addrLow =
411 				putPaddrLow(pdma_phys_fcp_rsp);
412 			iocb->unsli3.fcp_ext.rbde.addrHigh =
413 				putPaddrHigh(pdma_phys_fcp_rsp);
414 		} else {
415 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
416 			iocb->un.fcpi64.bdl.bdeSize =
417 					(2 * sizeof(struct ulp_bde64));
418 			iocb->un.fcpi64.bdl.addrLow =
419 					putPaddrLow(pdma_phys_sgl);
420 			iocb->un.fcpi64.bdl.addrHigh =
421 					putPaddrHigh(pdma_phys_sgl);
422 			iocb->ulpBdeCount = 1;
423 			iocb->ulpLe = 1;
424 		}
425 		iocb->ulpClass = CLASS3;
426 		psb->status = IOSTAT_SUCCESS;
427 		/* Put it back into the SCSI buffer list */
428 		psb->cur_iocbq.io_buf = psb;
429 		spin_lock_init(&psb->buf_lock);
430 		lpfc_release_scsi_buf_s3(phba, psb);
431 
432 	}
433 
434 	return bcnt;
435 }
436 
437 /**
438  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
439  * @vport: pointer to lpfc vport data structure.
440  *
441  * This routine is invoked by the vport cleanup for deletions and the cleanup
442  * for an ndlp on removal.
443  **/
444 void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport * vport)445 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
446 {
447 	struct lpfc_hba *phba = vport->phba;
448 	struct lpfc_io_buf *psb, *next_psb;
449 	struct lpfc_sli4_hdw_queue *qp;
450 	unsigned long iflag = 0;
451 	int idx;
452 
453 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
454 		return;
455 
456 	spin_lock_irqsave(&phba->hbalock, iflag);
457 	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
458 		qp = &phba->sli4_hba.hdwq[idx];
459 
460 		spin_lock(&qp->abts_io_buf_list_lock);
461 		list_for_each_entry_safe(psb, next_psb,
462 					 &qp->lpfc_abts_io_buf_list, list) {
463 			if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
464 				continue;
465 
466 			if (psb->rdata && psb->rdata->pnode &&
467 			    psb->rdata->pnode->vport == vport)
468 				psb->rdata = NULL;
469 		}
470 		spin_unlock(&qp->abts_io_buf_list_lock);
471 	}
472 	spin_unlock_irqrestore(&phba->hbalock, iflag);
473 }
474 
475 /**
476  * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
477  * @phba: pointer to lpfc hba data structure.
478  * @axri: pointer to the fcp xri abort wcqe structure.
479  * @idx: index into hdwq
480  *
481  * This routine is invoked by the worker thread to process a SLI4 fast-path
482  * FCP or NVME aborted xri.
483  **/
484 void
lpfc_sli4_io_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,int idx)485 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
486 			 struct sli4_wcqe_xri_aborted *axri, int idx)
487 {
488 	u16 xri = 0;
489 	u16 rxid = 0;
490 	struct lpfc_io_buf *psb, *next_psb;
491 	struct lpfc_sli4_hdw_queue *qp;
492 	unsigned long iflag = 0;
493 	struct lpfc_iocbq *iocbq;
494 	int i;
495 	struct lpfc_nodelist *ndlp;
496 	int rrq_empty = 0;
497 	struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
498 	struct scsi_cmnd *cmd;
499 	int offline = 0;
500 
501 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
502 		return;
503 	offline = pci_channel_offline(phba->pcidev);
504 	if (!offline) {
505 		xri = bf_get(lpfc_wcqe_xa_xri, axri);
506 		rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
507 	}
508 	qp = &phba->sli4_hba.hdwq[idx];
509 	spin_lock_irqsave(&phba->hbalock, iflag);
510 	spin_lock(&qp->abts_io_buf_list_lock);
511 	list_for_each_entry_safe(psb, next_psb,
512 		&qp->lpfc_abts_io_buf_list, list) {
513 		if (offline)
514 			xri = psb->cur_iocbq.sli4_xritag;
515 		if (psb->cur_iocbq.sli4_xritag == xri) {
516 			list_del_init(&psb->list);
517 			psb->flags &= ~LPFC_SBUF_XBUSY;
518 			psb->status = IOSTAT_SUCCESS;
519 			if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
520 				qp->abts_nvme_io_bufs--;
521 				spin_unlock(&qp->abts_io_buf_list_lock);
522 				spin_unlock_irqrestore(&phba->hbalock, iflag);
523 				if (!offline) {
524 					lpfc_sli4_nvme_xri_aborted(phba, axri,
525 								   psb);
526 					return;
527 				}
528 				lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
529 				spin_lock_irqsave(&phba->hbalock, iflag);
530 				spin_lock(&qp->abts_io_buf_list_lock);
531 				continue;
532 			}
533 			qp->abts_scsi_io_bufs--;
534 			spin_unlock(&qp->abts_io_buf_list_lock);
535 
536 			if (psb->rdata && psb->rdata->pnode)
537 				ndlp = psb->rdata->pnode;
538 			else
539 				ndlp = NULL;
540 
541 			rrq_empty = list_empty(&phba->active_rrq_list);
542 			spin_unlock_irqrestore(&phba->hbalock, iflag);
543 			if (ndlp && !offline) {
544 				lpfc_set_rrq_active(phba, ndlp,
545 					psb->cur_iocbq.sli4_lxritag, rxid, 1);
546 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
547 			}
548 
549 			if (phba->cfg_fcp_wait_abts_rsp || offline) {
550 				spin_lock_irqsave(&psb->buf_lock, iflag);
551 				cmd = psb->pCmd;
552 				psb->pCmd = NULL;
553 				spin_unlock_irqrestore(&psb->buf_lock, iflag);
554 
555 				/* The sdev is not guaranteed to be valid post
556 				 * scsi_done upcall.
557 				 */
558 				if (cmd)
559 					scsi_done(cmd);
560 
561 				/*
562 				 * We expect there is an abort thread waiting
563 				 * for command completion wake up the thread.
564 				 */
565 				spin_lock_irqsave(&psb->buf_lock, iflag);
566 				psb->cur_iocbq.cmd_flag &=
567 					~LPFC_DRIVER_ABORTED;
568 				if (psb->waitq)
569 					wake_up(psb->waitq);
570 				spin_unlock_irqrestore(&psb->buf_lock, iflag);
571 			}
572 
573 			lpfc_release_scsi_buf_s4(phba, psb);
574 			if (rrq_empty)
575 				lpfc_worker_wake_up(phba);
576 			if (!offline)
577 				return;
578 			spin_lock_irqsave(&phba->hbalock, iflag);
579 			spin_lock(&qp->abts_io_buf_list_lock);
580 			continue;
581 		}
582 	}
583 	spin_unlock(&qp->abts_io_buf_list_lock);
584 	if (!offline) {
585 		for (i = 1; i <= phba->sli.last_iotag; i++) {
586 			iocbq = phba->sli.iocbq_lookup[i];
587 
588 			if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
589 			    (iocbq->cmd_flag & LPFC_IO_LIBDFC))
590 				continue;
591 			if (iocbq->sli4_xritag != xri)
592 				continue;
593 			psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
594 			psb->flags &= ~LPFC_SBUF_XBUSY;
595 			spin_unlock_irqrestore(&phba->hbalock, iflag);
596 			if (!list_empty(&pring->txq))
597 				lpfc_worker_wake_up(phba);
598 			return;
599 		}
600 	}
601 	spin_unlock_irqrestore(&phba->hbalock, iflag);
602 }
603 
604 /**
605  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
606  * @phba: The HBA for which this call is being executed.
607  * @ndlp: pointer to a node-list data structure.
608  * @cmnd: Pointer to scsi_cmnd data structure.
609  *
610  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
611  * and returns to caller.
612  *
613  * Return codes:
614  *   NULL - Error
615  *   Pointer to lpfc_scsi_buf - Success
616  **/
617 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)618 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
619 		     struct scsi_cmnd *cmnd)
620 {
621 	struct lpfc_io_buf *lpfc_cmd = NULL;
622 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
623 	unsigned long iflag = 0;
624 
625 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
626 	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
627 			 list);
628 	if (!lpfc_cmd) {
629 		spin_lock(&phba->scsi_buf_list_put_lock);
630 		list_splice(&phba->lpfc_scsi_buf_list_put,
631 			    &phba->lpfc_scsi_buf_list_get);
632 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
633 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
634 				 struct lpfc_io_buf, list);
635 		spin_unlock(&phba->scsi_buf_list_put_lock);
636 	}
637 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
638 
639 	if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
640 		atomic_inc(&ndlp->cmd_pending);
641 		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
642 	}
643 	return  lpfc_cmd;
644 }
645 /**
646  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
647  * @phba: The HBA for which this call is being executed.
648  * @ndlp: pointer to a node-list data structure.
649  * @cmnd: Pointer to scsi_cmnd data structure.
650  *
651  * This routine removes a scsi buffer from head of @hdwq io_buf_list
652  * and returns to caller.
653  *
654  * Return codes:
655  *   NULL - Error
656  *   Pointer to lpfc_scsi_buf - Success
657  **/
658 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)659 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
660 		     struct scsi_cmnd *cmnd)
661 {
662 	struct lpfc_io_buf *lpfc_cmd;
663 	struct lpfc_sli4_hdw_queue *qp;
664 	struct sli4_sge *sgl;
665 	dma_addr_t pdma_phys_fcp_rsp;
666 	dma_addr_t pdma_phys_fcp_cmd;
667 	uint32_t cpu, idx;
668 	int tag;
669 	struct fcp_cmd_rsp_buf *tmp = NULL;
670 
671 	cpu = raw_smp_processor_id();
672 	if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
673 		tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
674 		idx = blk_mq_unique_tag_to_hwq(tag);
675 	} else {
676 		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
677 	}
678 
679 	lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
680 				   !phba->cfg_xri_rebalancing);
681 	if (!lpfc_cmd) {
682 		qp = &phba->sli4_hba.hdwq[idx];
683 		qp->empty_io_bufs++;
684 		return NULL;
685 	}
686 
687 	/* Setup key fields in buffer that may have been changed
688 	 * if other protocols used this buffer.
689 	 */
690 	lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
691 	lpfc_cmd->prot_seg_cnt = 0;
692 	lpfc_cmd->seg_cnt = 0;
693 	lpfc_cmd->timeout = 0;
694 	lpfc_cmd->flags = 0;
695 	lpfc_cmd->start_time = jiffies;
696 	lpfc_cmd->waitq = NULL;
697 	lpfc_cmd->cpu = cpu;
698 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
699 	lpfc_cmd->prot_data_type = 0;
700 #endif
701 	tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
702 	if (!tmp) {
703 		lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
704 		return NULL;
705 	}
706 
707 	lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
708 	lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
709 
710 	/*
711 	 * The first two SGEs are the FCP_CMD and FCP_RSP.
712 	 * The balance are sg list bdes. Initialize the
713 	 * first two and leave the rest for queuecommand.
714 	 */
715 	sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
716 	pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
717 	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
718 	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
719 	sgl->word2 = le32_to_cpu(sgl->word2);
720 	bf_set(lpfc_sli4_sge_last, sgl, 0);
721 	sgl->word2 = cpu_to_le32(sgl->word2);
722 	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
723 	sgl++;
724 
725 	/* Setup the physical region for the FCP RSP */
726 	pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
727 	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
728 	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
729 	sgl->word2 = le32_to_cpu(sgl->word2);
730 	bf_set(lpfc_sli4_sge_last, sgl, 1);
731 	sgl->word2 = cpu_to_le32(sgl->word2);
732 	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
733 
734 	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
735 		atomic_inc(&ndlp->cmd_pending);
736 		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
737 	}
738 	return  lpfc_cmd;
739 }
740 /**
741  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
742  * @phba: The HBA for which this call is being executed.
743  * @ndlp: pointer to a node-list data structure.
744  * @cmnd: Pointer to scsi_cmnd data structure.
745  *
746  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
747  * and returns to caller.
748  *
749  * Return codes:
750  *   NULL - Error
751  *   Pointer to lpfc_scsi_buf - Success
752  **/
753 static struct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)754 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
755 		  struct scsi_cmnd *cmnd)
756 {
757 	return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
758 }
759 
760 /**
761  * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
762  * @phba: The Hba for which this call is being executed.
763  * @psb: The scsi buffer which is being released.
764  *
765  * This routine releases @psb scsi buffer by adding it to tail of @phba
766  * lpfc_scsi_buf_list list.
767  **/
768 static void
lpfc_release_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * psb)769 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
770 {
771 	unsigned long iflag = 0;
772 
773 	psb->seg_cnt = 0;
774 	psb->prot_seg_cnt = 0;
775 
776 	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
777 	psb->pCmd = NULL;
778 	psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
779 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
780 	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
781 }
782 
783 /**
784  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
785  * @phba: The Hba for which this call is being executed.
786  * @psb: The scsi buffer which is being released.
787  *
788  * This routine releases @psb scsi buffer by adding it to tail of @hdwq
789  * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
790  * and cannot be reused for at least RA_TOV amount of time if it was
791  * aborted.
792  **/
793 static void
lpfc_release_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * psb)794 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
795 {
796 	struct lpfc_sli4_hdw_queue *qp;
797 	unsigned long iflag = 0;
798 
799 	psb->seg_cnt = 0;
800 	psb->prot_seg_cnt = 0;
801 
802 	qp = psb->hdwq;
803 	if (psb->flags & LPFC_SBUF_XBUSY) {
804 		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
805 		if (!phba->cfg_fcp_wait_abts_rsp)
806 			psb->pCmd = NULL;
807 		list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
808 		qp->abts_scsi_io_bufs++;
809 		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
810 	} else {
811 		lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
812 	}
813 }
814 
815 /**
816  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
817  * @phba: The Hba for which this call is being executed.
818  * @psb: The scsi buffer which is being released.
819  *
820  * This routine releases @psb scsi buffer by adding it to tail of @phba
821  * lpfc_scsi_buf_list list.
822  **/
823 static void
lpfc_release_scsi_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)824 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
825 {
826 	if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
827 		atomic_dec(&psb->ndlp->cmd_pending);
828 
829 	psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
830 	phba->lpfc_release_scsi_buf(phba, psb);
831 }
832 
833 /**
834  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
835  * @data: A pointer to the immediate command data portion of the IOCB.
836  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
837  *
838  * The routine copies the entire FCP command from @fcp_cmnd to @data while
839  * byte swapping the data to big endian format for transmission on the wire.
840  **/
841 static void
lpfc_fcpcmd_to_iocb(u8 * data,struct fcp_cmnd * fcp_cmnd)842 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
843 {
844 	int i, j;
845 
846 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
847 	     i += sizeof(uint32_t), j++) {
848 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
849 	}
850 }
851 
852 /**
853  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
854  * @phba: The Hba for which this call is being executed.
855  * @lpfc_cmd: The scsi buffer which is going to be mapped.
856  *
857  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
858  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
859  * through sg elements and format the bde. This routine also initializes all
860  * IOCB fields which are dependent on scsi command request buffer.
861  *
862  * Return codes:
863  *   1 - Error
864  *   0 - Success
865  **/
866 static int
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)867 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
868 {
869 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
870 	struct scatterlist *sgel = NULL;
871 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
872 	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
873 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
874 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
875 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
876 	dma_addr_t physaddr;
877 	uint32_t num_bde = 0;
878 	int nseg, datadir = scsi_cmnd->sc_data_direction;
879 
880 	/*
881 	 * There are three possibilities here - use scatter-gather segment, use
882 	 * the single mapping, or neither.  Start the lpfc command prep by
883 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
884 	 * data bde entry.
885 	 */
886 	bpl += 2;
887 	if (scsi_sg_count(scsi_cmnd)) {
888 		/*
889 		 * The driver stores the segment count returned from dma_map_sg
890 		 * because this a count of dma-mappings used to map the use_sg
891 		 * pages.  They are not guaranteed to be the same for those
892 		 * architectures that implement an IOMMU.
893 		 */
894 
895 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
896 				  scsi_sg_count(scsi_cmnd), datadir);
897 		if (unlikely(!nseg))
898 			return 1;
899 
900 		lpfc_cmd->seg_cnt = nseg;
901 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
902 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
903 					"9064 BLKGRD: %s: Too many sg segments"
904 					" from dma_map_sg.  Config %d, seg_cnt"
905 					" %d\n", __func__, phba->cfg_sg_seg_cnt,
906 					lpfc_cmd->seg_cnt);
907 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
908 			lpfc_cmd->seg_cnt = 0;
909 			scsi_dma_unmap(scsi_cmnd);
910 			return 2;
911 		}
912 
913 		/*
914 		 * The driver established a maximum scatter-gather segment count
915 		 * during probe that limits the number of sg elements in any
916 		 * single scsi command.  Just run through the seg_cnt and format
917 		 * the bde's.
918 		 * When using SLI-3 the driver will try to fit all the BDEs into
919 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
920 		 * does for SLI-2 mode.
921 		 */
922 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
923 			physaddr = sg_dma_address(sgel);
924 			if (phba->sli_rev == 3 &&
925 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
926 			    !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
927 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
928 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
929 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
930 				data_bde->addrLow = putPaddrLow(physaddr);
931 				data_bde->addrHigh = putPaddrHigh(physaddr);
932 				data_bde++;
933 			} else {
934 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
935 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
936 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
937 				bpl->addrLow =
938 					le32_to_cpu(putPaddrLow(physaddr));
939 				bpl->addrHigh =
940 					le32_to_cpu(putPaddrHigh(physaddr));
941 				bpl++;
942 			}
943 		}
944 	}
945 
946 	/*
947 	 * Finish initializing those IOCB fields that are dependent on the
948 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
949 	 * explicitly reinitialized and for SLI-3 the extended bde count is
950 	 * explicitly reinitialized since all iocb memory resources are reused.
951 	 */
952 	if (phba->sli_rev == 3 &&
953 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
954 	    !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
955 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
956 			/*
957 			 * The extended IOCB format can only fit 3 BDE or a BPL.
958 			 * This I/O has more than 3 BDE so the 1st data bde will
959 			 * be a BPL that is filled in here.
960 			 */
961 			physaddr = lpfc_cmd->dma_handle;
962 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
963 			data_bde->tus.f.bdeSize = (num_bde *
964 						   sizeof(struct ulp_bde64));
965 			physaddr += (sizeof(struct fcp_cmnd) +
966 				     sizeof(struct fcp_rsp) +
967 				     (2 * sizeof(struct ulp_bde64)));
968 			data_bde->addrHigh = putPaddrHigh(physaddr);
969 			data_bde->addrLow = putPaddrLow(physaddr);
970 			/* ebde count includes the response bde and data bpl */
971 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
972 		} else {
973 			/* ebde count includes the response bde and data bdes */
974 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
975 		}
976 	} else {
977 		iocb_cmd->un.fcpi64.bdl.bdeSize =
978 			((num_bde + 2) * sizeof(struct ulp_bde64));
979 		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
980 	}
981 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
982 
983 	/*
984 	 * Due to difference in data length between DIF/non-DIF paths,
985 	 * we need to set word 4 of IOCB here
986 	 */
987 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
988 	lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
989 	return 0;
990 }
991 
992 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
993 
994 /* Return BG_ERR_INIT if error injection is detected by Initiator */
995 #define BG_ERR_INIT	0x1
996 /* Return BG_ERR_TGT if error injection is detected by Target */
997 #define BG_ERR_TGT	0x2
998 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
999 #define BG_ERR_SWAP	0x10
1000 /*
1001  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1002  * error injection
1003  */
1004 #define BG_ERR_CHECK	0x20
1005 
1006 /**
1007  * lpfc_bg_err_inject - Determine if we should inject an error
1008  * @phba: The Hba for which this call is being executed.
1009  * @sc: The SCSI command to examine
1010  * @reftag: (out) BlockGuard reference tag for transmitted data
1011  * @apptag: (out) BlockGuard application tag for transmitted data
1012  * @new_guard: (in) Value to replace CRC with if needed
1013  *
1014  * Returns BG_ERR_* bit mask or 0 if request ignored
1015  **/
1016 static int
lpfc_bg_err_inject(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint32_t * reftag,uint16_t * apptag,uint32_t new_guard)1017 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1018 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1019 {
1020 	struct scatterlist *sgpe; /* s/g prot entry */
1021 	struct lpfc_io_buf *lpfc_cmd = NULL;
1022 	struct scsi_dif_tuple *src = NULL;
1023 	struct lpfc_nodelist *ndlp;
1024 	struct lpfc_rport_data *rdata;
1025 	uint32_t op = scsi_get_prot_op(sc);
1026 	uint32_t blksize;
1027 	uint32_t numblks;
1028 	u32 lba;
1029 	int rc = 0;
1030 	int blockoff = 0;
1031 
1032 	if (op == SCSI_PROT_NORMAL)
1033 		return 0;
1034 
1035 	sgpe = scsi_prot_sglist(sc);
1036 	lba = scsi_prot_ref_tag(sc);
1037 	if (lba == LPFC_INVALID_REFTAG)
1038 		return 0;
1039 
1040 	/* First check if we need to match the LBA */
1041 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1042 		blksize = scsi_prot_interval(sc);
1043 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1044 
1045 		/* Make sure we have the right LBA if one is specified */
1046 		if (phba->lpfc_injerr_lba < (u64)lba ||
1047 		    (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1048 			return 0;
1049 		if (sgpe) {
1050 			blockoff = phba->lpfc_injerr_lba - (u64)lba;
1051 			numblks = sg_dma_len(sgpe) /
1052 				sizeof(struct scsi_dif_tuple);
1053 			if (numblks < blockoff)
1054 				blockoff = numblks;
1055 		}
1056 	}
1057 
1058 	/* Next check if we need to match the remote NPortID or WWPN */
1059 	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1060 	if (rdata && rdata->pnode) {
1061 		ndlp = rdata->pnode;
1062 
1063 		/* Make sure we have the right NPortID if one is specified */
1064 		if (phba->lpfc_injerr_nportid  &&
1065 			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1066 			return 0;
1067 
1068 		/*
1069 		 * Make sure we have the right WWPN if one is specified.
1070 		 * wwn[0] should be a non-zero NAA in a good WWPN.
1071 		 */
1072 		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1073 			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1074 				sizeof(struct lpfc_name)) != 0))
1075 			return 0;
1076 	}
1077 
1078 	/* Setup a ptr to the protection data if the SCSI host provides it */
1079 	if (sgpe) {
1080 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1081 		src += blockoff;
1082 		lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1083 	}
1084 
1085 	/* Should we change the Reference Tag */
1086 	if (reftag) {
1087 		if (phba->lpfc_injerr_wref_cnt) {
1088 			switch (op) {
1089 			case SCSI_PROT_WRITE_PASS:
1090 				if (src) {
1091 					/*
1092 					 * For WRITE_PASS, force the error
1093 					 * to be sent on the wire. It should
1094 					 * be detected by the Target.
1095 					 * If blockoff != 0 error will be
1096 					 * inserted in middle of the IO.
1097 					 */
1098 
1099 					lpfc_printf_log(phba, KERN_ERR,
1100 							LOG_TRACE_EVENT,
1101 					"9076 BLKGRD: Injecting reftag error: "
1102 					"write lba x%lx + x%x oldrefTag x%x\n",
1103 					(unsigned long)lba, blockoff,
1104 					be32_to_cpu(src->ref_tag));
1105 
1106 					/*
1107 					 * Save the old ref_tag so we can
1108 					 * restore it on completion.
1109 					 */
1110 					if (lpfc_cmd) {
1111 						lpfc_cmd->prot_data_type =
1112 							LPFC_INJERR_REFTAG;
1113 						lpfc_cmd->prot_data_segment =
1114 							src;
1115 						lpfc_cmd->prot_data =
1116 							src->ref_tag;
1117 					}
1118 					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1119 					phba->lpfc_injerr_wref_cnt--;
1120 					if (phba->lpfc_injerr_wref_cnt == 0) {
1121 						phba->lpfc_injerr_nportid = 0;
1122 						phba->lpfc_injerr_lba =
1123 							LPFC_INJERR_LBA_OFF;
1124 						memset(&phba->lpfc_injerr_wwpn,
1125 						  0, sizeof(struct lpfc_name));
1126 					}
1127 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1128 
1129 					break;
1130 				}
1131 				fallthrough;
1132 			case SCSI_PROT_WRITE_INSERT:
1133 				/*
1134 				 * For WRITE_INSERT, force the error
1135 				 * to be sent on the wire. It should be
1136 				 * detected by the Target.
1137 				 */
1138 				/* DEADBEEF will be the reftag on the wire */
1139 				*reftag = 0xDEADBEEF;
1140 				phba->lpfc_injerr_wref_cnt--;
1141 				if (phba->lpfc_injerr_wref_cnt == 0) {
1142 					phba->lpfc_injerr_nportid = 0;
1143 					phba->lpfc_injerr_lba =
1144 					LPFC_INJERR_LBA_OFF;
1145 					memset(&phba->lpfc_injerr_wwpn,
1146 						0, sizeof(struct lpfc_name));
1147 				}
1148 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1149 
1150 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1151 					"9078 BLKGRD: Injecting reftag error: "
1152 					"write lba x%lx\n", (unsigned long)lba);
1153 				break;
1154 			case SCSI_PROT_WRITE_STRIP:
1155 				/*
1156 				 * For WRITE_STRIP and WRITE_PASS,
1157 				 * force the error on data
1158 				 * being copied from SLI-Host to SLI-Port.
1159 				 */
1160 				*reftag = 0xDEADBEEF;
1161 				phba->lpfc_injerr_wref_cnt--;
1162 				if (phba->lpfc_injerr_wref_cnt == 0) {
1163 					phba->lpfc_injerr_nportid = 0;
1164 					phba->lpfc_injerr_lba =
1165 						LPFC_INJERR_LBA_OFF;
1166 					memset(&phba->lpfc_injerr_wwpn,
1167 						0, sizeof(struct lpfc_name));
1168 				}
1169 				rc = BG_ERR_INIT;
1170 
1171 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1172 					"9077 BLKGRD: Injecting reftag error: "
1173 					"write lba x%lx\n", (unsigned long)lba);
1174 				break;
1175 			}
1176 		}
1177 		if (phba->lpfc_injerr_rref_cnt) {
1178 			switch (op) {
1179 			case SCSI_PROT_READ_INSERT:
1180 			case SCSI_PROT_READ_STRIP:
1181 			case SCSI_PROT_READ_PASS:
1182 				/*
1183 				 * For READ_STRIP and READ_PASS, force the
1184 				 * error on data being read off the wire. It
1185 				 * should force an IO error to the driver.
1186 				 */
1187 				*reftag = 0xDEADBEEF;
1188 				phba->lpfc_injerr_rref_cnt--;
1189 				if (phba->lpfc_injerr_rref_cnt == 0) {
1190 					phba->lpfc_injerr_nportid = 0;
1191 					phba->lpfc_injerr_lba =
1192 						LPFC_INJERR_LBA_OFF;
1193 					memset(&phba->lpfc_injerr_wwpn,
1194 						0, sizeof(struct lpfc_name));
1195 				}
1196 				rc = BG_ERR_INIT;
1197 
1198 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1199 					"9079 BLKGRD: Injecting reftag error: "
1200 					"read lba x%lx\n", (unsigned long)lba);
1201 				break;
1202 			}
1203 		}
1204 	}
1205 
1206 	/* Should we change the Application Tag */
1207 	if (apptag) {
1208 		if (phba->lpfc_injerr_wapp_cnt) {
1209 			switch (op) {
1210 			case SCSI_PROT_WRITE_PASS:
1211 				if (src) {
1212 					/*
1213 					 * For WRITE_PASS, force the error
1214 					 * to be sent on the wire. It should
1215 					 * be detected by the Target.
1216 					 * If blockoff != 0 error will be
1217 					 * inserted in middle of the IO.
1218 					 */
1219 
1220 					lpfc_printf_log(phba, KERN_ERR,
1221 							LOG_TRACE_EVENT,
1222 					"9080 BLKGRD: Injecting apptag error: "
1223 					"write lba x%lx + x%x oldappTag x%x\n",
1224 					(unsigned long)lba, blockoff,
1225 					be16_to_cpu(src->app_tag));
1226 
1227 					/*
1228 					 * Save the old app_tag so we can
1229 					 * restore it on completion.
1230 					 */
1231 					if (lpfc_cmd) {
1232 						lpfc_cmd->prot_data_type =
1233 							LPFC_INJERR_APPTAG;
1234 						lpfc_cmd->prot_data_segment =
1235 							src;
1236 						lpfc_cmd->prot_data =
1237 							src->app_tag;
1238 					}
1239 					src->app_tag = cpu_to_be16(0xDEAD);
1240 					phba->lpfc_injerr_wapp_cnt--;
1241 					if (phba->lpfc_injerr_wapp_cnt == 0) {
1242 						phba->lpfc_injerr_nportid = 0;
1243 						phba->lpfc_injerr_lba =
1244 							LPFC_INJERR_LBA_OFF;
1245 						memset(&phba->lpfc_injerr_wwpn,
1246 						  0, sizeof(struct lpfc_name));
1247 					}
1248 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1249 					break;
1250 				}
1251 				fallthrough;
1252 			case SCSI_PROT_WRITE_INSERT:
1253 				/*
1254 				 * For WRITE_INSERT, force the
1255 				 * error to be sent on the wire. It should be
1256 				 * detected by the Target.
1257 				 */
1258 				/* DEAD will be the apptag on the wire */
1259 				*apptag = 0xDEAD;
1260 				phba->lpfc_injerr_wapp_cnt--;
1261 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1262 					phba->lpfc_injerr_nportid = 0;
1263 					phba->lpfc_injerr_lba =
1264 						LPFC_INJERR_LBA_OFF;
1265 					memset(&phba->lpfc_injerr_wwpn,
1266 						0, sizeof(struct lpfc_name));
1267 				}
1268 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1269 
1270 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1271 					"0813 BLKGRD: Injecting apptag error: "
1272 					"write lba x%lx\n", (unsigned long)lba);
1273 				break;
1274 			case SCSI_PROT_WRITE_STRIP:
1275 				/*
1276 				 * For WRITE_STRIP and WRITE_PASS,
1277 				 * force the error on data
1278 				 * being copied from SLI-Host to SLI-Port.
1279 				 */
1280 				*apptag = 0xDEAD;
1281 				phba->lpfc_injerr_wapp_cnt--;
1282 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1283 					phba->lpfc_injerr_nportid = 0;
1284 					phba->lpfc_injerr_lba =
1285 						LPFC_INJERR_LBA_OFF;
1286 					memset(&phba->lpfc_injerr_wwpn,
1287 						0, sizeof(struct lpfc_name));
1288 				}
1289 				rc = BG_ERR_INIT;
1290 
1291 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1292 					"0812 BLKGRD: Injecting apptag error: "
1293 					"write lba x%lx\n", (unsigned long)lba);
1294 				break;
1295 			}
1296 		}
1297 		if (phba->lpfc_injerr_rapp_cnt) {
1298 			switch (op) {
1299 			case SCSI_PROT_READ_INSERT:
1300 			case SCSI_PROT_READ_STRIP:
1301 			case SCSI_PROT_READ_PASS:
1302 				/*
1303 				 * For READ_STRIP and READ_PASS, force the
1304 				 * error on data being read off the wire. It
1305 				 * should force an IO error to the driver.
1306 				 */
1307 				*apptag = 0xDEAD;
1308 				phba->lpfc_injerr_rapp_cnt--;
1309 				if (phba->lpfc_injerr_rapp_cnt == 0) {
1310 					phba->lpfc_injerr_nportid = 0;
1311 					phba->lpfc_injerr_lba =
1312 						LPFC_INJERR_LBA_OFF;
1313 					memset(&phba->lpfc_injerr_wwpn,
1314 						0, sizeof(struct lpfc_name));
1315 				}
1316 				rc = BG_ERR_INIT;
1317 
1318 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1319 					"0814 BLKGRD: Injecting apptag error: "
1320 					"read lba x%lx\n", (unsigned long)lba);
1321 				break;
1322 			}
1323 		}
1324 	}
1325 
1326 
1327 	/* Should we change the Guard Tag */
1328 	if (new_guard) {
1329 		if (phba->lpfc_injerr_wgrd_cnt) {
1330 			switch (op) {
1331 			case SCSI_PROT_WRITE_PASS:
1332 				rc = BG_ERR_CHECK;
1333 				fallthrough;
1334 
1335 			case SCSI_PROT_WRITE_INSERT:
1336 				/*
1337 				 * For WRITE_INSERT, force the
1338 				 * error to be sent on the wire. It should be
1339 				 * detected by the Target.
1340 				 */
1341 				phba->lpfc_injerr_wgrd_cnt--;
1342 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1343 					phba->lpfc_injerr_nportid = 0;
1344 					phba->lpfc_injerr_lba =
1345 						LPFC_INJERR_LBA_OFF;
1346 					memset(&phba->lpfc_injerr_wwpn,
1347 						0, sizeof(struct lpfc_name));
1348 				}
1349 
1350 				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1351 				/* Signals the caller to swap CRC->CSUM */
1352 
1353 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1354 					"0817 BLKGRD: Injecting guard error: "
1355 					"write lba x%lx\n", (unsigned long)lba);
1356 				break;
1357 			case SCSI_PROT_WRITE_STRIP:
1358 				/*
1359 				 * For WRITE_STRIP and WRITE_PASS,
1360 				 * force the error on data
1361 				 * being copied from SLI-Host to SLI-Port.
1362 				 */
1363 				phba->lpfc_injerr_wgrd_cnt--;
1364 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1365 					phba->lpfc_injerr_nportid = 0;
1366 					phba->lpfc_injerr_lba =
1367 						LPFC_INJERR_LBA_OFF;
1368 					memset(&phba->lpfc_injerr_wwpn,
1369 						0, sizeof(struct lpfc_name));
1370 				}
1371 
1372 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1373 				/* Signals the caller to swap CRC->CSUM */
1374 
1375 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1376 					"0816 BLKGRD: Injecting guard error: "
1377 					"write lba x%lx\n", (unsigned long)lba);
1378 				break;
1379 			}
1380 		}
1381 		if (phba->lpfc_injerr_rgrd_cnt) {
1382 			switch (op) {
1383 			case SCSI_PROT_READ_INSERT:
1384 			case SCSI_PROT_READ_STRIP:
1385 			case SCSI_PROT_READ_PASS:
1386 				/*
1387 				 * For READ_STRIP and READ_PASS, force the
1388 				 * error on data being read off the wire. It
1389 				 * should force an IO error to the driver.
1390 				 */
1391 				phba->lpfc_injerr_rgrd_cnt--;
1392 				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1393 					phba->lpfc_injerr_nportid = 0;
1394 					phba->lpfc_injerr_lba =
1395 						LPFC_INJERR_LBA_OFF;
1396 					memset(&phba->lpfc_injerr_wwpn,
1397 						0, sizeof(struct lpfc_name));
1398 				}
1399 
1400 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1401 				/* Signals the caller to swap CRC->CSUM */
1402 
1403 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1404 					"0818 BLKGRD: Injecting guard error: "
1405 					"read lba x%lx\n", (unsigned long)lba);
1406 			}
1407 		}
1408 	}
1409 
1410 	return rc;
1411 }
1412 #endif
1413 
1414 /**
1415  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1416  * the specified SCSI command.
1417  * @phba: The Hba for which this call is being executed.
1418  * @sc: The SCSI command to examine
1419  * @txop: (out) BlockGuard operation for transmitted data
1420  * @rxop: (out) BlockGuard operation for received data
1421  *
1422  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1423  *
1424  **/
1425 static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1426 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1427 		uint8_t *txop, uint8_t *rxop)
1428 {
1429 	uint8_t ret = 0;
1430 
1431 	if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1432 		switch (scsi_get_prot_op(sc)) {
1433 		case SCSI_PROT_READ_INSERT:
1434 		case SCSI_PROT_WRITE_STRIP:
1435 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1436 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1437 			break;
1438 
1439 		case SCSI_PROT_READ_STRIP:
1440 		case SCSI_PROT_WRITE_INSERT:
1441 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1442 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1443 			break;
1444 
1445 		case SCSI_PROT_READ_PASS:
1446 		case SCSI_PROT_WRITE_PASS:
1447 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1448 			*txop = BG_OP_IN_CSUM_OUT_CRC;
1449 			break;
1450 
1451 		case SCSI_PROT_NORMAL:
1452 		default:
1453 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1454 				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1455 					scsi_get_prot_op(sc));
1456 			ret = 1;
1457 			break;
1458 
1459 		}
1460 	} else {
1461 		switch (scsi_get_prot_op(sc)) {
1462 		case SCSI_PROT_READ_STRIP:
1463 		case SCSI_PROT_WRITE_INSERT:
1464 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1465 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1466 			break;
1467 
1468 		case SCSI_PROT_READ_PASS:
1469 		case SCSI_PROT_WRITE_PASS:
1470 			*rxop = BG_OP_IN_CRC_OUT_CRC;
1471 			*txop = BG_OP_IN_CRC_OUT_CRC;
1472 			break;
1473 
1474 		case SCSI_PROT_READ_INSERT:
1475 		case SCSI_PROT_WRITE_STRIP:
1476 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1477 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1478 			break;
1479 
1480 		case SCSI_PROT_NORMAL:
1481 		default:
1482 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1483 				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1484 					scsi_get_prot_op(sc));
1485 			ret = 1;
1486 			break;
1487 		}
1488 	}
1489 
1490 	return ret;
1491 }
1492 
1493 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1494 /**
1495  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1496  * the specified SCSI command in order to force a guard tag error.
1497  * @phba: The Hba for which this call is being executed.
1498  * @sc: The SCSI command to examine
1499  * @txop: (out) BlockGuard operation for transmitted data
1500  * @rxop: (out) BlockGuard operation for received data
1501  *
1502  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1503  *
1504  **/
1505 static int
lpfc_bg_err_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1506 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1507 		uint8_t *txop, uint8_t *rxop)
1508 {
1509 
1510 	if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1511 		switch (scsi_get_prot_op(sc)) {
1512 		case SCSI_PROT_READ_INSERT:
1513 		case SCSI_PROT_WRITE_STRIP:
1514 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1515 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1516 			break;
1517 
1518 		case SCSI_PROT_READ_STRIP:
1519 		case SCSI_PROT_WRITE_INSERT:
1520 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1521 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1522 			break;
1523 
1524 		case SCSI_PROT_READ_PASS:
1525 		case SCSI_PROT_WRITE_PASS:
1526 			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1527 			*txop = BG_OP_IN_CRC_OUT_CSUM;
1528 			break;
1529 
1530 		case SCSI_PROT_NORMAL:
1531 		default:
1532 			break;
1533 
1534 		}
1535 	} else {
1536 		switch (scsi_get_prot_op(sc)) {
1537 		case SCSI_PROT_READ_STRIP:
1538 		case SCSI_PROT_WRITE_INSERT:
1539 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1540 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1541 			break;
1542 
1543 		case SCSI_PROT_READ_PASS:
1544 		case SCSI_PROT_WRITE_PASS:
1545 			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1546 			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1547 			break;
1548 
1549 		case SCSI_PROT_READ_INSERT:
1550 		case SCSI_PROT_WRITE_STRIP:
1551 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1552 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1553 			break;
1554 
1555 		case SCSI_PROT_NORMAL:
1556 		default:
1557 			break;
1558 		}
1559 	}
1560 
1561 	return 0;
1562 }
1563 #endif
1564 
1565 /**
1566  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1567  * @phba: The Hba for which this call is being executed.
1568  * @sc: pointer to scsi command we're working on
1569  * @bpl: pointer to buffer list for protection groups
1570  * @datasegcnt: number of segments of data that have been dma mapped
1571  *
1572  * This function sets up BPL buffer list for protection groups of
1573  * type LPFC_PG_TYPE_NO_DIF
1574  *
1575  * This is usually used when the HBA is instructed to generate
1576  * DIFs and insert them into data stream (or strip DIF from
1577  * incoming data stream)
1578  *
1579  * The buffer list consists of just one protection group described
1580  * below:
1581  *                                +-------------------------+
1582  *   start of prot group  -->     |          PDE_5          |
1583  *                                +-------------------------+
1584  *                                |          PDE_6          |
1585  *                                +-------------------------+
1586  *                                |         Data BDE        |
1587  *                                +-------------------------+
1588  *                                |more Data BDE's ... (opt)|
1589  *                                +-------------------------+
1590  *
1591  *
1592  * Note: Data s/g buffers have been dma mapped
1593  *
1594  * Returns the number of BDEs added to the BPL.
1595  **/
1596 static int
lpfc_bg_setup_bpl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datasegcnt)1597 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1598 		struct ulp_bde64 *bpl, int datasegcnt)
1599 {
1600 	struct scatterlist *sgde = NULL; /* s/g data entry */
1601 	struct lpfc_pde5 *pde5 = NULL;
1602 	struct lpfc_pde6 *pde6 = NULL;
1603 	dma_addr_t physaddr;
1604 	int i = 0, num_bde = 0, status;
1605 	int datadir = sc->sc_data_direction;
1606 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1607 	uint32_t rc;
1608 #endif
1609 	uint32_t checking = 1;
1610 	uint32_t reftag;
1611 	uint8_t txop, rxop;
1612 
1613 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1614 	if (status)
1615 		goto out;
1616 
1617 	/* extract some info from the scsi command for pde*/
1618 	reftag = scsi_prot_ref_tag(sc);
1619 	if (reftag == LPFC_INVALID_REFTAG)
1620 		goto out;
1621 
1622 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1623 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1624 	if (rc) {
1625 		if (rc & BG_ERR_SWAP)
1626 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1627 		if (rc & BG_ERR_CHECK)
1628 			checking = 0;
1629 	}
1630 #endif
1631 
1632 	/* setup PDE5 with what we have */
1633 	pde5 = (struct lpfc_pde5 *) bpl;
1634 	memset(pde5, 0, sizeof(struct lpfc_pde5));
1635 	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1636 
1637 	/* Endianness conversion if necessary for PDE5 */
1638 	pde5->word0 = cpu_to_le32(pde5->word0);
1639 	pde5->reftag = cpu_to_le32(reftag);
1640 
1641 	/* advance bpl and increment bde count */
1642 	num_bde++;
1643 	bpl++;
1644 	pde6 = (struct lpfc_pde6 *) bpl;
1645 
1646 	/* setup PDE6 with the rest of the info */
1647 	memset(pde6, 0, sizeof(struct lpfc_pde6));
1648 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1649 	bf_set(pde6_optx, pde6, txop);
1650 	bf_set(pde6_oprx, pde6, rxop);
1651 
1652 	/*
1653 	 * We only need to check the data on READs, for WRITEs
1654 	 * protection data is automatically generated, not checked.
1655 	 */
1656 	if (datadir == DMA_FROM_DEVICE) {
1657 		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1658 			bf_set(pde6_ce, pde6, checking);
1659 		else
1660 			bf_set(pde6_ce, pde6, 0);
1661 
1662 		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1663 			bf_set(pde6_re, pde6, checking);
1664 		else
1665 			bf_set(pde6_re, pde6, 0);
1666 	}
1667 	bf_set(pde6_ai, pde6, 1);
1668 	bf_set(pde6_ae, pde6, 0);
1669 	bf_set(pde6_apptagval, pde6, 0);
1670 
1671 	/* Endianness conversion if necessary for PDE6 */
1672 	pde6->word0 = cpu_to_le32(pde6->word0);
1673 	pde6->word1 = cpu_to_le32(pde6->word1);
1674 	pde6->word2 = cpu_to_le32(pde6->word2);
1675 
1676 	/* advance bpl and increment bde count */
1677 	num_bde++;
1678 	bpl++;
1679 
1680 	/* assumption: caller has already run dma_map_sg on command data */
1681 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1682 		physaddr = sg_dma_address(sgde);
1683 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1684 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1685 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
1686 		if (datadir == DMA_TO_DEVICE)
1687 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1688 		else
1689 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1690 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
1691 		bpl++;
1692 		num_bde++;
1693 	}
1694 
1695 out:
1696 	return num_bde;
1697 }
1698 
1699 /**
1700  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1701  * @phba: The Hba for which this call is being executed.
1702  * @sc: pointer to scsi command we're working on
1703  * @bpl: pointer to buffer list for protection groups
1704  * @datacnt: number of segments of data that have been dma mapped
1705  * @protcnt: number of segment of protection data that have been dma mapped
1706  *
1707  * This function sets up BPL buffer list for protection groups of
1708  * type LPFC_PG_TYPE_DIF
1709  *
1710  * This is usually used when DIFs are in their own buffers,
1711  * separate from the data. The HBA can then by instructed
1712  * to place the DIFs in the outgoing stream.  For read operations,
1713  * The HBA could extract the DIFs and place it in DIF buffers.
1714  *
1715  * The buffer list for this type consists of one or more of the
1716  * protection groups described below:
1717  *                                    +-------------------------+
1718  *   start of first prot group  -->   |          PDE_5          |
1719  *                                    +-------------------------+
1720  *                                    |          PDE_6          |
1721  *                                    +-------------------------+
1722  *                                    |      PDE_7 (Prot BDE)   |
1723  *                                    +-------------------------+
1724  *                                    |        Data BDE         |
1725  *                                    +-------------------------+
1726  *                                    |more Data BDE's ... (opt)|
1727  *                                    +-------------------------+
1728  *   start of new  prot group  -->    |          PDE_5          |
1729  *                                    +-------------------------+
1730  *                                    |          ...            |
1731  *                                    +-------------------------+
1732  *
1733  * Note: It is assumed that both data and protection s/g buffers have been
1734  *       mapped for DMA
1735  *
1736  * Returns the number of BDEs added to the BPL.
1737  **/
1738 static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datacnt,int protcnt)1739 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1740 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
1741 {
1742 	struct scatterlist *sgde = NULL; /* s/g data entry */
1743 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
1744 	struct lpfc_pde5 *pde5 = NULL;
1745 	struct lpfc_pde6 *pde6 = NULL;
1746 	struct lpfc_pde7 *pde7 = NULL;
1747 	dma_addr_t dataphysaddr, protphysaddr;
1748 	unsigned short curr_data = 0, curr_prot = 0;
1749 	unsigned int split_offset;
1750 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1751 	unsigned int protgrp_blks, protgrp_bytes;
1752 	unsigned int remainder, subtotal;
1753 	int status;
1754 	int datadir = sc->sc_data_direction;
1755 	unsigned char pgdone = 0, alldone = 0;
1756 	unsigned blksize;
1757 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1758 	uint32_t rc;
1759 #endif
1760 	uint32_t checking = 1;
1761 	uint32_t reftag;
1762 	uint8_t txop, rxop;
1763 	int num_bde = 0;
1764 
1765 	sgpe = scsi_prot_sglist(sc);
1766 	sgde = scsi_sglist(sc);
1767 
1768 	if (!sgpe || !sgde) {
1769 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1770 				"9020 Invalid s/g entry: data=x%px prot=x%px\n",
1771 				sgpe, sgde);
1772 		return 0;
1773 	}
1774 
1775 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1776 	if (status)
1777 		goto out;
1778 
1779 	/* extract some info from the scsi command */
1780 	blksize = scsi_prot_interval(sc);
1781 	reftag = scsi_prot_ref_tag(sc);
1782 	if (reftag == LPFC_INVALID_REFTAG)
1783 		goto out;
1784 
1785 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1786 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1787 	if (rc) {
1788 		if (rc & BG_ERR_SWAP)
1789 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1790 		if (rc & BG_ERR_CHECK)
1791 			checking = 0;
1792 	}
1793 #endif
1794 
1795 	split_offset = 0;
1796 	do {
1797 		/* Check to see if we ran out of space */
1798 		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1799 			return num_bde + 3;
1800 
1801 		/* setup PDE5 with what we have */
1802 		pde5 = (struct lpfc_pde5 *) bpl;
1803 		memset(pde5, 0, sizeof(struct lpfc_pde5));
1804 		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1805 
1806 		/* Endianness conversion if necessary for PDE5 */
1807 		pde5->word0 = cpu_to_le32(pde5->word0);
1808 		pde5->reftag = cpu_to_le32(reftag);
1809 
1810 		/* advance bpl and increment bde count */
1811 		num_bde++;
1812 		bpl++;
1813 		pde6 = (struct lpfc_pde6 *) bpl;
1814 
1815 		/* setup PDE6 with the rest of the info */
1816 		memset(pde6, 0, sizeof(struct lpfc_pde6));
1817 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1818 		bf_set(pde6_optx, pde6, txop);
1819 		bf_set(pde6_oprx, pde6, rxop);
1820 
1821 		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1822 			bf_set(pde6_ce, pde6, checking);
1823 		else
1824 			bf_set(pde6_ce, pde6, 0);
1825 
1826 		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1827 			bf_set(pde6_re, pde6, checking);
1828 		else
1829 			bf_set(pde6_re, pde6, 0);
1830 
1831 		bf_set(pde6_ai, pde6, 1);
1832 		bf_set(pde6_ae, pde6, 0);
1833 		bf_set(pde6_apptagval, pde6, 0);
1834 
1835 		/* Endianness conversion if necessary for PDE6 */
1836 		pde6->word0 = cpu_to_le32(pde6->word0);
1837 		pde6->word1 = cpu_to_le32(pde6->word1);
1838 		pde6->word2 = cpu_to_le32(pde6->word2);
1839 
1840 		/* advance bpl and increment bde count */
1841 		num_bde++;
1842 		bpl++;
1843 
1844 		/* setup the first BDE that points to protection buffer */
1845 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1846 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1847 
1848 		/* must be integer multiple of the DIF block length */
1849 		BUG_ON(protgroup_len % 8);
1850 
1851 		pde7 = (struct lpfc_pde7 *) bpl;
1852 		memset(pde7, 0, sizeof(struct lpfc_pde7));
1853 		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1854 
1855 		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1856 		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1857 
1858 		protgrp_blks = protgroup_len / 8;
1859 		protgrp_bytes = protgrp_blks * blksize;
1860 
1861 		/* check if this pde is crossing the 4K boundary; if so split */
1862 		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1863 			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1864 			protgroup_offset += protgroup_remainder;
1865 			protgrp_blks = protgroup_remainder / 8;
1866 			protgrp_bytes = protgrp_blks * blksize;
1867 		} else {
1868 			protgroup_offset = 0;
1869 			curr_prot++;
1870 		}
1871 
1872 		num_bde++;
1873 
1874 		/* setup BDE's for data blocks associated with DIF data */
1875 		pgdone = 0;
1876 		subtotal = 0; /* total bytes processed for current prot grp */
1877 		while (!pgdone) {
1878 			/* Check to see if we ran out of space */
1879 			if (num_bde >= phba->cfg_total_seg_cnt)
1880 				return num_bde + 1;
1881 
1882 			if (!sgde) {
1883 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1884 					"9065 BLKGRD:%s Invalid data segment\n",
1885 						__func__);
1886 				return 0;
1887 			}
1888 			bpl++;
1889 			dataphysaddr = sg_dma_address(sgde) + split_offset;
1890 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1891 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1892 
1893 			remainder = sg_dma_len(sgde) - split_offset;
1894 
1895 			if ((subtotal + remainder) <= protgrp_bytes) {
1896 				/* we can use this whole buffer */
1897 				bpl->tus.f.bdeSize = remainder;
1898 				split_offset = 0;
1899 
1900 				if ((subtotal + remainder) == protgrp_bytes)
1901 					pgdone = 1;
1902 			} else {
1903 				/* must split this buffer with next prot grp */
1904 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1905 				split_offset += bpl->tus.f.bdeSize;
1906 			}
1907 
1908 			subtotal += bpl->tus.f.bdeSize;
1909 
1910 			if (datadir == DMA_TO_DEVICE)
1911 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1912 			else
1913 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1914 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
1915 
1916 			num_bde++;
1917 			curr_data++;
1918 
1919 			if (split_offset)
1920 				break;
1921 
1922 			/* Move to the next s/g segment if possible */
1923 			sgde = sg_next(sgde);
1924 
1925 		}
1926 
1927 		if (protgroup_offset) {
1928 			/* update the reference tag */
1929 			reftag += protgrp_blks;
1930 			bpl++;
1931 			continue;
1932 		}
1933 
1934 		/* are we done ? */
1935 		if (curr_prot == protcnt) {
1936 			alldone = 1;
1937 		} else if (curr_prot < protcnt) {
1938 			/* advance to next prot buffer */
1939 			sgpe = sg_next(sgpe);
1940 			bpl++;
1941 
1942 			/* update the reference tag */
1943 			reftag += protgrp_blks;
1944 		} else {
1945 			/* if we're here, we have a bug */
1946 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1947 					"9054 BLKGRD: bug in %s\n", __func__);
1948 		}
1949 
1950 	} while (!alldone);
1951 out:
1952 
1953 	return num_bde;
1954 }
1955 
1956 /**
1957  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1958  * @phba: The Hba for which this call is being executed.
1959  * @sc: pointer to scsi command we're working on
1960  * @sgl: pointer to buffer list for protection groups
1961  * @datasegcnt: number of segments of data that have been dma mapped
1962  * @lpfc_cmd: lpfc scsi command object pointer.
1963  *
1964  * This function sets up SGL buffer list for protection groups of
1965  * type LPFC_PG_TYPE_NO_DIF
1966  *
1967  * This is usually used when the HBA is instructed to generate
1968  * DIFs and insert them into data stream (or strip DIF from
1969  * incoming data stream)
1970  *
1971  * The buffer list consists of just one protection group described
1972  * below:
1973  *                                +-------------------------+
1974  *   start of prot group  -->     |         DI_SEED         |
1975  *                                +-------------------------+
1976  *                                |         Data SGE        |
1977  *                                +-------------------------+
1978  *                                |more Data SGE's ... (opt)|
1979  *                                +-------------------------+
1980  *
1981  *
1982  * Note: Data s/g buffers have been dma mapped
1983  *
1984  * Returns the number of SGEs added to the SGL.
1985  **/
1986 static int
lpfc_bg_setup_sgl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datasegcnt,struct lpfc_io_buf * lpfc_cmd)1987 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1988 		struct sli4_sge *sgl, int datasegcnt,
1989 		struct lpfc_io_buf *lpfc_cmd)
1990 {
1991 	struct scatterlist *sgde = NULL; /* s/g data entry */
1992 	struct sli4_sge_diseed *diseed = NULL;
1993 	dma_addr_t physaddr;
1994 	int i = 0, num_sge = 0, status;
1995 	uint32_t reftag;
1996 	uint8_t txop, rxop;
1997 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1998 	uint32_t rc;
1999 #endif
2000 	uint32_t checking = 1;
2001 	uint32_t dma_len;
2002 	uint32_t dma_offset = 0;
2003 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
2004 	int j;
2005 	bool lsp_just_set = false;
2006 
2007 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2008 	if (status)
2009 		goto out;
2010 
2011 	/* extract some info from the scsi command for pde*/
2012 	reftag = scsi_prot_ref_tag(sc);
2013 	if (reftag == LPFC_INVALID_REFTAG)
2014 		goto out;
2015 
2016 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2017 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2018 	if (rc) {
2019 		if (rc & BG_ERR_SWAP)
2020 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2021 		if (rc & BG_ERR_CHECK)
2022 			checking = 0;
2023 	}
2024 #endif
2025 
2026 	/* setup DISEED with what we have */
2027 	diseed = (struct sli4_sge_diseed *) sgl;
2028 	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2029 	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2030 
2031 	/* Endianness conversion if necessary */
2032 	diseed->ref_tag = cpu_to_le32(reftag);
2033 	diseed->ref_tag_tran = diseed->ref_tag;
2034 
2035 	/*
2036 	 * We only need to check the data on READs, for WRITEs
2037 	 * protection data is automatically generated, not checked.
2038 	 */
2039 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2040 		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
2041 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2042 		else
2043 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2044 
2045 		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2046 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2047 		else
2048 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2049 	}
2050 
2051 	/* setup DISEED with the rest of the info */
2052 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2053 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2054 
2055 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2056 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2057 
2058 	/* Endianness conversion if necessary for DISEED */
2059 	diseed->word2 = cpu_to_le32(diseed->word2);
2060 	diseed->word3 = cpu_to_le32(diseed->word3);
2061 
2062 	/* advance bpl and increment sge count */
2063 	num_sge++;
2064 	sgl++;
2065 
2066 	/* assumption: caller has already run dma_map_sg on command data */
2067 	sgde = scsi_sglist(sc);
2068 	j = 3;
2069 	for (i = 0; i < datasegcnt; i++) {
2070 		/* clear it */
2071 		sgl->word2 = 0;
2072 
2073 		/* do we need to expand the segment */
2074 		if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2075 		    ((datasegcnt - 1) != i)) {
2076 			/* set LSP type */
2077 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2078 
2079 			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2080 
2081 			if (unlikely(!sgl_xtra)) {
2082 				lpfc_cmd->seg_cnt = 0;
2083 				return 0;
2084 			}
2085 			sgl->addr_lo = cpu_to_le32(putPaddrLow(
2086 						sgl_xtra->dma_phys_sgl));
2087 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2088 						sgl_xtra->dma_phys_sgl));
2089 
2090 		} else {
2091 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2092 		}
2093 
2094 		if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2095 			if ((datasegcnt - 1) == i)
2096 				bf_set(lpfc_sli4_sge_last, sgl, 1);
2097 			physaddr = sg_dma_address(sgde);
2098 			dma_len = sg_dma_len(sgde);
2099 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2100 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2101 
2102 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2103 			sgl->word2 = cpu_to_le32(sgl->word2);
2104 			sgl->sge_len = cpu_to_le32(dma_len);
2105 
2106 			dma_offset += dma_len;
2107 			sgde = sg_next(sgde);
2108 
2109 			sgl++;
2110 			num_sge++;
2111 			lsp_just_set = false;
2112 
2113 		} else {
2114 			sgl->word2 = cpu_to_le32(sgl->word2);
2115 			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2116 
2117 			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2118 			i = i - 1;
2119 
2120 			lsp_just_set = true;
2121 		}
2122 
2123 		j++;
2124 
2125 	}
2126 
2127 out:
2128 	return num_sge;
2129 }
2130 
2131 /**
2132  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2133  * @phba: The Hba for which this call is being executed.
2134  * @sc: pointer to scsi command we're working on
2135  * @sgl: pointer to buffer list for protection groups
2136  * @datacnt: number of segments of data that have been dma mapped
2137  * @protcnt: number of segment of protection data that have been dma mapped
2138  * @lpfc_cmd: lpfc scsi command object pointer.
2139  *
2140  * This function sets up SGL buffer list for protection groups of
2141  * type LPFC_PG_TYPE_DIF
2142  *
2143  * This is usually used when DIFs are in their own buffers,
2144  * separate from the data. The HBA can then by instructed
2145  * to place the DIFs in the outgoing stream.  For read operations,
2146  * The HBA could extract the DIFs and place it in DIF buffers.
2147  *
2148  * The buffer list for this type consists of one or more of the
2149  * protection groups described below:
2150  *                                    +-------------------------+
2151  *   start of first prot group  -->   |         DISEED          |
2152  *                                    +-------------------------+
2153  *                                    |      DIF (Prot SGE)     |
2154  *                                    +-------------------------+
2155  *                                    |        Data SGE         |
2156  *                                    +-------------------------+
2157  *                                    |more Data SGE's ... (opt)|
2158  *                                    +-------------------------+
2159  *   start of new  prot group  -->    |         DISEED          |
2160  *                                    +-------------------------+
2161  *                                    |          ...            |
2162  *                                    +-------------------------+
2163  *
2164  * Note: It is assumed that both data and protection s/g buffers have been
2165  *       mapped for DMA
2166  *
2167  * Returns the number of SGEs added to the SGL.
2168  **/
2169 static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datacnt,int protcnt,struct lpfc_io_buf * lpfc_cmd)2170 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2171 		struct sli4_sge *sgl, int datacnt, int protcnt,
2172 		struct lpfc_io_buf *lpfc_cmd)
2173 {
2174 	struct scatterlist *sgde = NULL; /* s/g data entry */
2175 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2176 	struct sli4_sge_diseed *diseed = NULL;
2177 	dma_addr_t dataphysaddr, protphysaddr;
2178 	unsigned short curr_data = 0, curr_prot = 0;
2179 	unsigned int split_offset;
2180 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2181 	unsigned int protgrp_blks, protgrp_bytes;
2182 	unsigned int remainder, subtotal;
2183 	int status;
2184 	unsigned char pgdone = 0, alldone = 0;
2185 	unsigned blksize;
2186 	uint32_t reftag;
2187 	uint8_t txop, rxop;
2188 	uint32_t dma_len;
2189 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2190 	uint32_t rc;
2191 #endif
2192 	uint32_t checking = 1;
2193 	uint32_t dma_offset = 0;
2194 	int num_sge = 0, j = 2;
2195 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
2196 
2197 	sgpe = scsi_prot_sglist(sc);
2198 	sgde = scsi_sglist(sc);
2199 
2200 	if (!sgpe || !sgde) {
2201 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202 				"9082 Invalid s/g entry: data=x%px prot=x%px\n",
2203 				sgpe, sgde);
2204 		return 0;
2205 	}
2206 
2207 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2208 	if (status)
2209 		goto out;
2210 
2211 	/* extract some info from the scsi command */
2212 	blksize = scsi_prot_interval(sc);
2213 	reftag = scsi_prot_ref_tag(sc);
2214 	if (reftag == LPFC_INVALID_REFTAG)
2215 		goto out;
2216 
2217 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2218 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2219 	if (rc) {
2220 		if (rc & BG_ERR_SWAP)
2221 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2222 		if (rc & BG_ERR_CHECK)
2223 			checking = 0;
2224 	}
2225 #endif
2226 
2227 	split_offset = 0;
2228 	do {
2229 		/* Check to see if we ran out of space */
2230 		if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2231 		    !(phba->cfg_xpsgl))
2232 			return num_sge + 3;
2233 
2234 		/* DISEED and DIF have to be together */
2235 		if (!((j + 1) % phba->border_sge_num) ||
2236 		    !((j + 2) % phba->border_sge_num) ||
2237 		    !((j + 3) % phba->border_sge_num)) {
2238 			sgl->word2 = 0;
2239 
2240 			/* set LSP type */
2241 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2242 
2243 			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2244 
2245 			if (unlikely(!sgl_xtra)) {
2246 				goto out;
2247 			} else {
2248 				sgl->addr_lo = cpu_to_le32(putPaddrLow(
2249 						sgl_xtra->dma_phys_sgl));
2250 				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2251 						       sgl_xtra->dma_phys_sgl));
2252 			}
2253 
2254 			sgl->word2 = cpu_to_le32(sgl->word2);
2255 			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2256 
2257 			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2258 			j = 0;
2259 		}
2260 
2261 		/* setup DISEED with what we have */
2262 		diseed = (struct sli4_sge_diseed *) sgl;
2263 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2264 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2265 
2266 		/* Endianness conversion if necessary */
2267 		diseed->ref_tag = cpu_to_le32(reftag);
2268 		diseed->ref_tag_tran = diseed->ref_tag;
2269 
2270 		if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
2271 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2272 		} else {
2273 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2274 			/*
2275 			 * When in this mode, the hardware will replace
2276 			 * the guard tag from the host with a
2277 			 * newly generated good CRC for the wire.
2278 			 * Switch to raw mode here to avoid this
2279 			 * behavior. What the host sends gets put on the wire.
2280 			 */
2281 			if (txop == BG_OP_IN_CRC_OUT_CRC) {
2282 				txop = BG_OP_RAW_MODE;
2283 				rxop = BG_OP_RAW_MODE;
2284 			}
2285 		}
2286 
2287 
2288 		if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2289 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2290 		else
2291 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2292 
2293 		/* setup DISEED with the rest of the info */
2294 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2295 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2296 
2297 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2298 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2299 
2300 		/* Endianness conversion if necessary for DISEED */
2301 		diseed->word2 = cpu_to_le32(diseed->word2);
2302 		diseed->word3 = cpu_to_le32(diseed->word3);
2303 
2304 		/* advance sgl and increment bde count */
2305 		num_sge++;
2306 
2307 		sgl++;
2308 		j++;
2309 
2310 		/* setup the first BDE that points to protection buffer */
2311 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2312 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2313 
2314 		/* must be integer multiple of the DIF block length */
2315 		BUG_ON(protgroup_len % 8);
2316 
2317 		/* Now setup DIF SGE */
2318 		sgl->word2 = 0;
2319 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2320 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2321 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2322 		sgl->word2 = cpu_to_le32(sgl->word2);
2323 		sgl->sge_len = 0;
2324 
2325 		protgrp_blks = protgroup_len / 8;
2326 		protgrp_bytes = protgrp_blks * blksize;
2327 
2328 		/* check if DIF SGE is crossing the 4K boundary; if so split */
2329 		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2330 			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2331 			protgroup_offset += protgroup_remainder;
2332 			protgrp_blks = protgroup_remainder / 8;
2333 			protgrp_bytes = protgrp_blks * blksize;
2334 		} else {
2335 			protgroup_offset = 0;
2336 			curr_prot++;
2337 		}
2338 
2339 		num_sge++;
2340 
2341 		/* setup SGE's for data blocks associated with DIF data */
2342 		pgdone = 0;
2343 		subtotal = 0; /* total bytes processed for current prot grp */
2344 
2345 		sgl++;
2346 		j++;
2347 
2348 		while (!pgdone) {
2349 			/* Check to see if we ran out of space */
2350 			if ((num_sge >= phba->cfg_total_seg_cnt) &&
2351 			    !phba->cfg_xpsgl)
2352 				return num_sge + 1;
2353 
2354 			if (!sgde) {
2355 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2356 					"9086 BLKGRD:%s Invalid data segment\n",
2357 						__func__);
2358 				return 0;
2359 			}
2360 
2361 			if (!((j + 1) % phba->border_sge_num)) {
2362 				sgl->word2 = 0;
2363 
2364 				/* set LSP type */
2365 				bf_set(lpfc_sli4_sge_type, sgl,
2366 				       LPFC_SGE_TYPE_LSP);
2367 
2368 				sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2369 								 lpfc_cmd);
2370 
2371 				if (unlikely(!sgl_xtra)) {
2372 					goto out;
2373 				} else {
2374 					sgl->addr_lo = cpu_to_le32(
2375 					  putPaddrLow(sgl_xtra->dma_phys_sgl));
2376 					sgl->addr_hi = cpu_to_le32(
2377 					  putPaddrHigh(sgl_xtra->dma_phys_sgl));
2378 				}
2379 
2380 				sgl->word2 = cpu_to_le32(sgl->word2);
2381 				sgl->sge_len = cpu_to_le32(
2382 						     phba->cfg_sg_dma_buf_size);
2383 
2384 				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2385 			} else {
2386 				dataphysaddr = sg_dma_address(sgde) +
2387 								   split_offset;
2388 
2389 				remainder = sg_dma_len(sgde) - split_offset;
2390 
2391 				if ((subtotal + remainder) <= protgrp_bytes) {
2392 					/* we can use this whole buffer */
2393 					dma_len = remainder;
2394 					split_offset = 0;
2395 
2396 					if ((subtotal + remainder) ==
2397 								  protgrp_bytes)
2398 						pgdone = 1;
2399 				} else {
2400 					/* must split this buffer with next
2401 					 * prot grp
2402 					 */
2403 					dma_len = protgrp_bytes - subtotal;
2404 					split_offset += dma_len;
2405 				}
2406 
2407 				subtotal += dma_len;
2408 
2409 				sgl->word2 = 0;
2410 				sgl->addr_lo = cpu_to_le32(putPaddrLow(
2411 								 dataphysaddr));
2412 				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2413 								 dataphysaddr));
2414 				bf_set(lpfc_sli4_sge_last, sgl, 0);
2415 				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2416 				bf_set(lpfc_sli4_sge_type, sgl,
2417 				       LPFC_SGE_TYPE_DATA);
2418 
2419 				sgl->sge_len = cpu_to_le32(dma_len);
2420 				dma_offset += dma_len;
2421 
2422 				num_sge++;
2423 				curr_data++;
2424 
2425 				if (split_offset) {
2426 					sgl++;
2427 					j++;
2428 					break;
2429 				}
2430 
2431 				/* Move to the next s/g segment if possible */
2432 				sgde = sg_next(sgde);
2433 
2434 				sgl++;
2435 			}
2436 
2437 			j++;
2438 		}
2439 
2440 		if (protgroup_offset) {
2441 			/* update the reference tag */
2442 			reftag += protgrp_blks;
2443 			continue;
2444 		}
2445 
2446 		/* are we done ? */
2447 		if (curr_prot == protcnt) {
2448 			/* mark the last SGL */
2449 			sgl--;
2450 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2451 			alldone = 1;
2452 		} else if (curr_prot < protcnt) {
2453 			/* advance to next prot buffer */
2454 			sgpe = sg_next(sgpe);
2455 
2456 			/* update the reference tag */
2457 			reftag += protgrp_blks;
2458 		} else {
2459 			/* if we're here, we have a bug */
2460 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2461 					"9085 BLKGRD: bug in %s\n", __func__);
2462 		}
2463 
2464 	} while (!alldone);
2465 
2466 out:
2467 
2468 	return num_sge;
2469 }
2470 
2471 /**
2472  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2473  * @phba: The Hba for which this call is being executed.
2474  * @sc: pointer to scsi command we're working on
2475  *
2476  * Given a SCSI command that supports DIF, determine composition of protection
2477  * groups involved in setting up buffer lists
2478  *
2479  * Returns: Protection group type (with or without DIF)
2480  *
2481  **/
2482 static int
lpfc_prot_group_type(struct lpfc_hba * phba,struct scsi_cmnd * sc)2483 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2484 {
2485 	int ret = LPFC_PG_TYPE_INVALID;
2486 	unsigned char op = scsi_get_prot_op(sc);
2487 
2488 	switch (op) {
2489 	case SCSI_PROT_READ_STRIP:
2490 	case SCSI_PROT_WRITE_INSERT:
2491 		ret = LPFC_PG_TYPE_NO_DIF;
2492 		break;
2493 	case SCSI_PROT_READ_INSERT:
2494 	case SCSI_PROT_WRITE_STRIP:
2495 	case SCSI_PROT_READ_PASS:
2496 	case SCSI_PROT_WRITE_PASS:
2497 		ret = LPFC_PG_TYPE_DIF_BUF;
2498 		break;
2499 	default:
2500 		if (phba)
2501 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2502 					"9021 Unsupported protection op:%d\n",
2503 					op);
2504 		break;
2505 	}
2506 	return ret;
2507 }
2508 
2509 /**
2510  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2511  * @phba: The Hba for which this call is being executed.
2512  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2513  *
2514  * Adjust the data length to account for how much data
2515  * is actually on the wire.
2516  *
2517  * returns the adjusted data length
2518  **/
2519 static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2520 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2521 		       struct lpfc_io_buf *lpfc_cmd)
2522 {
2523 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2524 	int fcpdl;
2525 
2526 	fcpdl = scsi_bufflen(sc);
2527 
2528 	/* Check if there is protection data on the wire */
2529 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2530 		/* Read check for protection data */
2531 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2532 			return fcpdl;
2533 
2534 	} else {
2535 		/* Write check for protection data */
2536 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2537 			return fcpdl;
2538 	}
2539 
2540 	/*
2541 	 * If we are in DIF Type 1 mode every data block has a 8 byte
2542 	 * DIF (trailer) attached to it. Must ajust FCP data length
2543 	 * to account for the protection data.
2544 	 */
2545 	fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
2546 
2547 	return fcpdl;
2548 }
2549 
2550 /**
2551  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2552  * @phba: The Hba for which this call is being executed.
2553  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2554  *
2555  * This is the protection/DIF aware version of
2556  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2557  * two functions eventually, but for now, it's here.
2558  * RETURNS 0 - SUCCESS,
2559  *         1 - Failed DMA map, retry.
2560  *         2 - Invalid scsi cmd or prot-type. Do not rety.
2561  **/
2562 static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2563 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2564 		struct lpfc_io_buf *lpfc_cmd)
2565 {
2566 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2567 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2568 	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2569 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2570 	uint32_t num_bde = 0;
2571 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2572 	int prot_group_type = 0;
2573 	int fcpdl;
2574 	int ret = 1;
2575 	struct lpfc_vport *vport = phba->pport;
2576 
2577 	/*
2578 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2579 	 *  fcp_rsp regions to the first data bde entry
2580 	 */
2581 	bpl += 2;
2582 	if (scsi_sg_count(scsi_cmnd)) {
2583 		/*
2584 		 * The driver stores the segment count returned from dma_map_sg
2585 		 * because this a count of dma-mappings used to map the use_sg
2586 		 * pages.  They are not guaranteed to be the same for those
2587 		 * architectures that implement an IOMMU.
2588 		 */
2589 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2590 					scsi_sglist(scsi_cmnd),
2591 					scsi_sg_count(scsi_cmnd), datadir);
2592 		if (unlikely(!datasegcnt))
2593 			return 1;
2594 
2595 		lpfc_cmd->seg_cnt = datasegcnt;
2596 
2597 		/* First check if data segment count from SCSI Layer is good */
2598 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2599 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2600 			ret = 2;
2601 			goto err;
2602 		}
2603 
2604 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2605 
2606 		switch (prot_group_type) {
2607 		case LPFC_PG_TYPE_NO_DIF:
2608 
2609 			/* Here we need to add a PDE5 and PDE6 to the count */
2610 			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2611 				ret = 2;
2612 				goto err;
2613 			}
2614 
2615 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2616 					datasegcnt);
2617 			/* we should have 2 or more entries in buffer list */
2618 			if (num_bde < 2) {
2619 				ret = 2;
2620 				goto err;
2621 			}
2622 			break;
2623 
2624 		case LPFC_PG_TYPE_DIF_BUF:
2625 			/*
2626 			 * This type indicates that protection buffers are
2627 			 * passed to the driver, so that needs to be prepared
2628 			 * for DMA
2629 			 */
2630 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2631 					scsi_prot_sglist(scsi_cmnd),
2632 					scsi_prot_sg_count(scsi_cmnd), datadir);
2633 			if (unlikely(!protsegcnt)) {
2634 				scsi_dma_unmap(scsi_cmnd);
2635 				return 1;
2636 			}
2637 
2638 			lpfc_cmd->prot_seg_cnt = protsegcnt;
2639 
2640 			/*
2641 			 * There is a minimun of 4 BPLs used for every
2642 			 * protection data segment.
2643 			 */
2644 			if ((lpfc_cmd->prot_seg_cnt * 4) >
2645 			    (phba->cfg_total_seg_cnt - 2)) {
2646 				ret = 2;
2647 				goto err;
2648 			}
2649 
2650 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2651 					datasegcnt, protsegcnt);
2652 			/* we should have 3 or more entries in buffer list */
2653 			if ((num_bde < 3) ||
2654 			    (num_bde > phba->cfg_total_seg_cnt)) {
2655 				ret = 2;
2656 				goto err;
2657 			}
2658 			break;
2659 
2660 		case LPFC_PG_TYPE_INVALID:
2661 		default:
2662 			scsi_dma_unmap(scsi_cmnd);
2663 			lpfc_cmd->seg_cnt = 0;
2664 
2665 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2666 					"9022 Unexpected protection group %i\n",
2667 					prot_group_type);
2668 			return 2;
2669 		}
2670 	}
2671 
2672 	/*
2673 	 * Finish initializing those IOCB fields that are dependent on the
2674 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2675 	 * reinitialized since all iocb memory resources are used many times
2676 	 * for transmit, receive, and continuation bpl's.
2677 	 */
2678 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2679 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2680 	iocb_cmd->ulpBdeCount = 1;
2681 	iocb_cmd->ulpLe = 1;
2682 
2683 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2684 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2685 
2686 	/*
2687 	 * Due to difference in data length between DIF/non-DIF paths,
2688 	 * we need to set word 4 of IOCB here
2689 	 */
2690 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2691 
2692 	/*
2693 	 * For First burst, we may need to adjust the initial transfer
2694 	 * length for DIF
2695 	 */
2696 	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2697 	    (fcpdl < vport->cfg_first_burst_size))
2698 		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2699 
2700 	return 0;
2701 err:
2702 	if (lpfc_cmd->seg_cnt)
2703 		scsi_dma_unmap(scsi_cmnd);
2704 	if (lpfc_cmd->prot_seg_cnt)
2705 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2706 			     scsi_prot_sg_count(scsi_cmnd),
2707 			     scsi_cmnd->sc_data_direction);
2708 
2709 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2710 			"9023 Cannot setup S/G List for HBA"
2711 			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2712 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2713 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2714 			prot_group_type, num_bde);
2715 
2716 	lpfc_cmd->seg_cnt = 0;
2717 	lpfc_cmd->prot_seg_cnt = 0;
2718 	return ret;
2719 }
2720 
2721 /*
2722  * This function calcuates the T10 DIF guard tag
2723  * on the specified data using a CRC algorithmn
2724  * using crc_t10dif.
2725  */
2726 static uint16_t
lpfc_bg_crc(uint8_t * data,int count)2727 lpfc_bg_crc(uint8_t *data, int count)
2728 {
2729 	uint16_t crc = 0;
2730 	uint16_t x;
2731 
2732 	crc = crc_t10dif(data, count);
2733 	x = cpu_to_be16(crc);
2734 	return x;
2735 }
2736 
2737 /*
2738  * This function calcuates the T10 DIF guard tag
2739  * on the specified data using a CSUM algorithmn
2740  * using ip_compute_csum.
2741  */
2742 static uint16_t
lpfc_bg_csum(uint8_t * data,int count)2743 lpfc_bg_csum(uint8_t *data, int count)
2744 {
2745 	uint16_t ret;
2746 
2747 	ret = ip_compute_csum(data, count);
2748 	return ret;
2749 }
2750 
2751 /*
2752  * This function examines the protection data to try to determine
2753  * what type of T10-DIF error occurred.
2754  */
2755 static void
lpfc_calc_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2756 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2757 {
2758 	struct scatterlist *sgpe; /* s/g prot entry */
2759 	struct scatterlist *sgde; /* s/g data entry */
2760 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2761 	struct scsi_dif_tuple *src = NULL;
2762 	uint8_t *data_src = NULL;
2763 	uint16_t guard_tag;
2764 	uint16_t start_app_tag, app_tag;
2765 	uint32_t start_ref_tag, ref_tag;
2766 	int prot, protsegcnt;
2767 	int err_type, len, data_len;
2768 	int chk_ref, chk_app, chk_guard;
2769 	uint16_t sum;
2770 	unsigned blksize;
2771 
2772 	err_type = BGS_GUARD_ERR_MASK;
2773 	sum = 0;
2774 	guard_tag = 0;
2775 
2776 	/* First check to see if there is protection data to examine */
2777 	prot = scsi_get_prot_op(cmd);
2778 	if ((prot == SCSI_PROT_READ_STRIP) ||
2779 	    (prot == SCSI_PROT_WRITE_INSERT) ||
2780 	    (prot == SCSI_PROT_NORMAL))
2781 		goto out;
2782 
2783 	/* Currently the driver just supports ref_tag and guard_tag checking */
2784 	chk_ref = 1;
2785 	chk_app = 0;
2786 	chk_guard = 0;
2787 
2788 	/* Setup a ptr to the protection data provided by the SCSI host */
2789 	sgpe = scsi_prot_sglist(cmd);
2790 	protsegcnt = lpfc_cmd->prot_seg_cnt;
2791 
2792 	if (sgpe && protsegcnt) {
2793 
2794 		/*
2795 		 * We will only try to verify guard tag if the segment
2796 		 * data length is a multiple of the blksize.
2797 		 */
2798 		sgde = scsi_sglist(cmd);
2799 		blksize = scsi_prot_interval(cmd);
2800 		data_src = (uint8_t *)sg_virt(sgde);
2801 		data_len = sgde->length;
2802 		if ((data_len & (blksize - 1)) == 0)
2803 			chk_guard = 1;
2804 
2805 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2806 		start_ref_tag = scsi_prot_ref_tag(cmd);
2807 		if (start_ref_tag == LPFC_INVALID_REFTAG)
2808 			goto out;
2809 		start_app_tag = src->app_tag;
2810 		len = sgpe->length;
2811 		while (src && protsegcnt) {
2812 			while (len) {
2813 
2814 				/*
2815 				 * First check to see if a protection data
2816 				 * check is valid
2817 				 */
2818 				if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2819 				    (src->app_tag == T10_PI_APP_ESCAPE)) {
2820 					start_ref_tag++;
2821 					goto skipit;
2822 				}
2823 
2824 				/* First Guard Tag checking */
2825 				if (chk_guard) {
2826 					guard_tag = src->guard_tag;
2827 					if (cmd->prot_flags
2828 					    & SCSI_PROT_IP_CHECKSUM)
2829 						sum = lpfc_bg_csum(data_src,
2830 								   blksize);
2831 					else
2832 						sum = lpfc_bg_crc(data_src,
2833 								  blksize);
2834 					if ((guard_tag != sum)) {
2835 						err_type = BGS_GUARD_ERR_MASK;
2836 						goto out;
2837 					}
2838 				}
2839 
2840 				/* Reference Tag checking */
2841 				ref_tag = be32_to_cpu(src->ref_tag);
2842 				if (chk_ref && (ref_tag != start_ref_tag)) {
2843 					err_type = BGS_REFTAG_ERR_MASK;
2844 					goto out;
2845 				}
2846 				start_ref_tag++;
2847 
2848 				/* App Tag checking */
2849 				app_tag = src->app_tag;
2850 				if (chk_app && (app_tag != start_app_tag)) {
2851 					err_type = BGS_APPTAG_ERR_MASK;
2852 					goto out;
2853 				}
2854 skipit:
2855 				len -= sizeof(struct scsi_dif_tuple);
2856 				if (len < 0)
2857 					len = 0;
2858 				src++;
2859 
2860 				data_src += blksize;
2861 				data_len -= blksize;
2862 
2863 				/*
2864 				 * Are we at the end of the Data segment?
2865 				 * The data segment is only used for Guard
2866 				 * tag checking.
2867 				 */
2868 				if (chk_guard && (data_len == 0)) {
2869 					chk_guard = 0;
2870 					sgde = sg_next(sgde);
2871 					if (!sgde)
2872 						goto out;
2873 
2874 					data_src = (uint8_t *)sg_virt(sgde);
2875 					data_len = sgde->length;
2876 					if ((data_len & (blksize - 1)) == 0)
2877 						chk_guard = 1;
2878 				}
2879 			}
2880 
2881 			/* Goto the next Protection data segment */
2882 			sgpe = sg_next(sgpe);
2883 			if (sgpe) {
2884 				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2885 				len = sgpe->length;
2886 			} else {
2887 				src = NULL;
2888 			}
2889 			protsegcnt--;
2890 		}
2891 	}
2892 out:
2893 	if (err_type == BGS_GUARD_ERR_MASK) {
2894 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2895 		set_host_byte(cmd, DID_ABORT);
2896 		phba->bg_guard_err_cnt++;
2897 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2898 				"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2899 				scsi_prot_ref_tag(cmd),
2900 				sum, guard_tag);
2901 
2902 	} else if (err_type == BGS_REFTAG_ERR_MASK) {
2903 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2904 		set_host_byte(cmd, DID_ABORT);
2905 
2906 		phba->bg_reftag_err_cnt++;
2907 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2908 				"9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2909 				scsi_prot_ref_tag(cmd),
2910 				ref_tag, start_ref_tag);
2911 
2912 	} else if (err_type == BGS_APPTAG_ERR_MASK) {
2913 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2914 		set_host_byte(cmd, DID_ABORT);
2915 
2916 		phba->bg_apptag_err_cnt++;
2917 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2918 				"9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2919 				scsi_prot_ref_tag(cmd),
2920 				app_tag, start_app_tag);
2921 	}
2922 }
2923 
2924 /*
2925  * This function checks for BlockGuard errors detected by
2926  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2927  * sense buffer will be set accordingly, paired with
2928  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2929  * detected corruption.
2930  *
2931  * Returns:
2932  *  0 - No error found
2933  *  1 - BlockGuard error found
2934  * -1 - Internal error (bad profile, ...etc)
2935  */
2936 static int
lpfc_parse_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * pIocbOut)2937 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2938 		  struct lpfc_iocbq *pIocbOut)
2939 {
2940 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2941 	struct sli3_bg_fields *bgf;
2942 	int ret = 0;
2943 	struct lpfc_wcqe_complete *wcqe;
2944 	u32 status;
2945 	u32 bghm = 0;
2946 	u32 bgstat = 0;
2947 	u64 failing_sector = 0;
2948 
2949 	if (phba->sli_rev == LPFC_SLI_REV4) {
2950 		wcqe = &pIocbOut->wcqe_cmpl;
2951 		status = bf_get(lpfc_wcqe_c_status, wcqe);
2952 
2953 		if (status == CQE_STATUS_DI_ERROR) {
2954 			/* Guard Check failed */
2955 			if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2956 				bgstat |= BGS_GUARD_ERR_MASK;
2957 
2958 			/* AppTag Check failed */
2959 			if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2960 				bgstat |= BGS_APPTAG_ERR_MASK;
2961 
2962 			/* RefTag Check failed */
2963 			if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2964 				bgstat |= BGS_REFTAG_ERR_MASK;
2965 
2966 			/* Check to see if there was any good data before the
2967 			 * error
2968 			 */
2969 			if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2970 				bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2971 				bghm = wcqe->total_data_placed;
2972 			}
2973 
2974 			/*
2975 			 * Set ALL the error bits to indicate we don't know what
2976 			 * type of error it is.
2977 			 */
2978 			if (!bgstat)
2979 				bgstat |= (BGS_REFTAG_ERR_MASK |
2980 					   BGS_APPTAG_ERR_MASK |
2981 					   BGS_GUARD_ERR_MASK);
2982 		}
2983 
2984 	} else {
2985 		bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2986 		bghm = bgf->bghm;
2987 		bgstat = bgf->bgstat;
2988 	}
2989 
2990 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
2991 		cmd->result = DID_ERROR << 16;
2992 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2993 				"9072 BLKGRD: Invalid BG Profile in cmd "
2994 				"0x%x reftag 0x%x blk cnt 0x%x "
2995 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2996 				scsi_prot_ref_tag(cmd),
2997 				scsi_logical_block_count(cmd), bgstat, bghm);
2998 		ret = (-1);
2999 		goto out;
3000 	}
3001 
3002 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3003 		cmd->result = DID_ERROR << 16;
3004 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3005 				"9073 BLKGRD: Invalid BG PDIF Block in cmd "
3006 				"0x%x reftag 0x%x blk cnt 0x%x "
3007 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3008 				scsi_prot_ref_tag(cmd),
3009 				scsi_logical_block_count(cmd), bgstat, bghm);
3010 		ret = (-1);
3011 		goto out;
3012 	}
3013 
3014 	if (lpfc_bgs_get_guard_err(bgstat)) {
3015 		ret = 1;
3016 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3017 		set_host_byte(cmd, DID_ABORT);
3018 		phba->bg_guard_err_cnt++;
3019 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3020 				"9055 BLKGRD: Guard Tag error in cmd "
3021 				"0x%x reftag 0x%x blk cnt 0x%x "
3022 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3023 				scsi_prot_ref_tag(cmd),
3024 				scsi_logical_block_count(cmd), bgstat, bghm);
3025 	}
3026 
3027 	if (lpfc_bgs_get_reftag_err(bgstat)) {
3028 		ret = 1;
3029 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3030 		set_host_byte(cmd, DID_ABORT);
3031 		phba->bg_reftag_err_cnt++;
3032 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3033 				"9056 BLKGRD: Ref Tag error in cmd "
3034 				"0x%x reftag 0x%x blk cnt 0x%x "
3035 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3036 				scsi_prot_ref_tag(cmd),
3037 				scsi_logical_block_count(cmd), bgstat, bghm);
3038 	}
3039 
3040 	if (lpfc_bgs_get_apptag_err(bgstat)) {
3041 		ret = 1;
3042 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3043 		set_host_byte(cmd, DID_ABORT);
3044 		phba->bg_apptag_err_cnt++;
3045 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3046 				"9061 BLKGRD: App Tag error in cmd "
3047 				"0x%x reftag 0x%x blk cnt 0x%x "
3048 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3049 				scsi_prot_ref_tag(cmd),
3050 				scsi_logical_block_count(cmd), bgstat, bghm);
3051 	}
3052 
3053 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3054 		/*
3055 		 * setup sense data descriptor 0 per SPC-4 as an information
3056 		 * field, and put the failing LBA in it.
3057 		 * This code assumes there was also a guard/app/ref tag error
3058 		 * indication.
3059 		 */
3060 		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3061 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3062 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3063 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3064 
3065 		/* bghm is a "on the wire" FC frame based count */
3066 		switch (scsi_get_prot_op(cmd)) {
3067 		case SCSI_PROT_READ_INSERT:
3068 		case SCSI_PROT_WRITE_STRIP:
3069 			bghm /= cmd->device->sector_size;
3070 			break;
3071 		case SCSI_PROT_READ_STRIP:
3072 		case SCSI_PROT_WRITE_INSERT:
3073 		case SCSI_PROT_READ_PASS:
3074 		case SCSI_PROT_WRITE_PASS:
3075 			bghm /= (cmd->device->sector_size +
3076 				sizeof(struct scsi_dif_tuple));
3077 			break;
3078 		}
3079 
3080 		failing_sector = scsi_get_lba(cmd);
3081 		failing_sector += bghm;
3082 
3083 		/* Descriptor Information */
3084 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3085 	}
3086 
3087 	if (!ret) {
3088 		/* No error was reported - problem in FW? */
3089 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3090 				"9057 BLKGRD: Unknown error in cmd "
3091 				"0x%x reftag 0x%x blk cnt 0x%x "
3092 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3093 				scsi_prot_ref_tag(cmd),
3094 				scsi_logical_block_count(cmd), bgstat, bghm);
3095 
3096 		/* Calculate what type of error it was */
3097 		lpfc_calc_bg_err(phba, lpfc_cmd);
3098 	}
3099 out:
3100 	return ret;
3101 }
3102 
3103 /**
3104  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3105  * @phba: The Hba for which this call is being executed.
3106  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3107  *
3108  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3109  * field of @lpfc_cmd for device with SLI-4 interface spec.
3110  *
3111  * Return codes:
3112  *	2 - Error - Do not retry
3113  *	1 - Error - Retry
3114  *	0 - Success
3115  **/
3116 static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3117 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3118 {
3119 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3120 	struct scatterlist *sgel = NULL;
3121 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3122 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3123 	struct sli4_sge *first_data_sgl;
3124 	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3125 	struct lpfc_vport *vport = phba->pport;
3126 	union lpfc_wqe128 *wqe = &pwqeq->wqe;
3127 	dma_addr_t physaddr;
3128 	uint32_t dma_len;
3129 	uint32_t dma_offset = 0;
3130 	int nseg, i, j;
3131 	struct ulp_bde64 *bde;
3132 	bool lsp_just_set = false;
3133 	struct sli4_hybrid_sgl *sgl_xtra = NULL;
3134 
3135 	/*
3136 	 * There are three possibilities here - use scatter-gather segment, use
3137 	 * the single mapping, or neither.  Start the lpfc command prep by
3138 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3139 	 * data bde entry.
3140 	 */
3141 	if (scsi_sg_count(scsi_cmnd)) {
3142 		/*
3143 		 * The driver stores the segment count returned from dma_map_sg
3144 		 * because this a count of dma-mappings used to map the use_sg
3145 		 * pages.  They are not guaranteed to be the same for those
3146 		 * architectures that implement an IOMMU.
3147 		 */
3148 
3149 		nseg = scsi_dma_map(scsi_cmnd);
3150 		if (unlikely(nseg <= 0))
3151 			return 1;
3152 		sgl += 1;
3153 		/* clear the last flag in the fcp_rsp map entry */
3154 		sgl->word2 = le32_to_cpu(sgl->word2);
3155 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3156 		sgl->word2 = cpu_to_le32(sgl->word2);
3157 		sgl += 1;
3158 		first_data_sgl = sgl;
3159 		lpfc_cmd->seg_cnt = nseg;
3160 		if (!phba->cfg_xpsgl &&
3161 		    lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3162 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3163 					"9074 BLKGRD:"
3164 					" %s: Too many sg segments from "
3165 					"dma_map_sg.  Config %d, seg_cnt %d\n",
3166 					__func__, phba->cfg_sg_seg_cnt,
3167 					lpfc_cmd->seg_cnt);
3168 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3169 			lpfc_cmd->seg_cnt = 0;
3170 			scsi_dma_unmap(scsi_cmnd);
3171 			return 2;
3172 		}
3173 
3174 		/*
3175 		 * The driver established a maximum scatter-gather segment count
3176 		 * during probe that limits the number of sg elements in any
3177 		 * single scsi command.  Just run through the seg_cnt and format
3178 		 * the sge's.
3179 		 * When using SLI-3 the driver will try to fit all the BDEs into
3180 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3181 		 * does for SLI-2 mode.
3182 		 */
3183 
3184 		/* for tracking segment boundaries */
3185 		sgel = scsi_sglist(scsi_cmnd);
3186 		j = 2;
3187 		for (i = 0; i < nseg; i++) {
3188 			sgl->word2 = 0;
3189 			if (nseg == 1) {
3190 				bf_set(lpfc_sli4_sge_last, sgl, 1);
3191 				bf_set(lpfc_sli4_sge_type, sgl,
3192 				       LPFC_SGE_TYPE_DATA);
3193 			} else {
3194 				bf_set(lpfc_sli4_sge_last, sgl, 0);
3195 
3196 				/* do we need to expand the segment */
3197 				if (!lsp_just_set &&
3198 				    !((j + 1) % phba->border_sge_num) &&
3199 				    ((nseg - 1) != i)) {
3200 					/* set LSP type */
3201 					bf_set(lpfc_sli4_sge_type, sgl,
3202 					       LPFC_SGE_TYPE_LSP);
3203 
3204 					sgl_xtra = lpfc_get_sgl_per_hdwq(
3205 							phba, lpfc_cmd);
3206 
3207 					if (unlikely(!sgl_xtra)) {
3208 						lpfc_cmd->seg_cnt = 0;
3209 						scsi_dma_unmap(scsi_cmnd);
3210 						return 1;
3211 					}
3212 					sgl->addr_lo = cpu_to_le32(putPaddrLow(
3213 						       sgl_xtra->dma_phys_sgl));
3214 					sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3215 						       sgl_xtra->dma_phys_sgl));
3216 
3217 				} else {
3218 					bf_set(lpfc_sli4_sge_type, sgl,
3219 					       LPFC_SGE_TYPE_DATA);
3220 				}
3221 			}
3222 
3223 			if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3224 				     LPFC_SGE_TYPE_LSP)) {
3225 				if ((nseg - 1) == i)
3226 					bf_set(lpfc_sli4_sge_last, sgl, 1);
3227 
3228 				physaddr = sg_dma_address(sgel);
3229 				dma_len = sg_dma_len(sgel);
3230 				sgl->addr_lo = cpu_to_le32(putPaddrLow(
3231 							   physaddr));
3232 				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3233 							   physaddr));
3234 
3235 				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3236 				sgl->word2 = cpu_to_le32(sgl->word2);
3237 				sgl->sge_len = cpu_to_le32(dma_len);
3238 
3239 				dma_offset += dma_len;
3240 				sgel = sg_next(sgel);
3241 
3242 				sgl++;
3243 				lsp_just_set = false;
3244 
3245 			} else {
3246 				sgl->word2 = cpu_to_le32(sgl->word2);
3247 				sgl->sge_len = cpu_to_le32(
3248 						     phba->cfg_sg_dma_buf_size);
3249 
3250 				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3251 				i = i - 1;
3252 
3253 				lsp_just_set = true;
3254 			}
3255 
3256 			j++;
3257 		}
3258 
3259 		/* PBDE support for first data SGE only.
3260 		 * For FCoE, we key off Performance Hints.
3261 		 * For FC, we key off lpfc_enable_pbde.
3262 		 */
3263 		if (nseg == 1 &&
3264 		    ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3265 		     phba->cfg_enable_pbde)) {
3266 			/* Words 13-15 */
3267 			bde = (struct ulp_bde64 *)
3268 				&wqe->words[13];
3269 			bde->addrLow = first_data_sgl->addr_lo;
3270 			bde->addrHigh = first_data_sgl->addr_hi;
3271 			bde->tus.f.bdeSize =
3272 					le32_to_cpu(first_data_sgl->sge_len);
3273 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3274 			bde->tus.w = cpu_to_le32(bde->tus.w);
3275 
3276 			/* Word 11 - set PBDE bit */
3277 			bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3278 		} else {
3279 			memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3280 			/* Word 11 - PBDE bit disabled by default template */
3281 		}
3282 	} else {
3283 		sgl += 1;
3284 		/* set the last flag in the fcp_rsp map entry */
3285 		sgl->word2 = le32_to_cpu(sgl->word2);
3286 		bf_set(lpfc_sli4_sge_last, sgl, 1);
3287 		sgl->word2 = cpu_to_le32(sgl->word2);
3288 
3289 		if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3290 		    phba->cfg_enable_pbde) {
3291 			bde = (struct ulp_bde64 *)
3292 				&wqe->words[13];
3293 			memset(bde, 0, (sizeof(uint32_t) * 3));
3294 		}
3295 	}
3296 
3297 	/*
3298 	 * Finish initializing those IOCB fields that are dependent on the
3299 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3300 	 * explicitly reinitialized.
3301 	 * all iocb memory resources are reused.
3302 	 */
3303 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3304 	/* Set first-burst provided it was successfully negotiated */
3305 	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3306 	    vport->cfg_first_burst_size &&
3307 	    scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3308 		u32 init_len, total_len;
3309 
3310 		total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3311 		init_len = min(total_len, vport->cfg_first_burst_size);
3312 
3313 		/* Word 4 & 5 */
3314 		wqe->fcp_iwrite.initial_xfer_len = init_len;
3315 		wqe->fcp_iwrite.total_xfer_len = total_len;
3316 	} else {
3317 		/* Word 4 */
3318 		wqe->fcp_iwrite.total_xfer_len =
3319 			be32_to_cpu(fcp_cmnd->fcpDl);
3320 	}
3321 
3322 	/*
3323 	 * If the OAS driver feature is enabled and the lun is enabled for
3324 	 * OAS, set the oas iocb related flags.
3325 	 */
3326 	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3327 		scsi_cmnd->device->hostdata)->oas_enabled) {
3328 		lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3329 		lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3330 			scsi_cmnd->device->hostdata)->priority;
3331 
3332 		/* Word 10 */
3333 		bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3334 		bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3335 
3336 		if (lpfc_cmd->cur_iocbq.priority)
3337 			bf_set(wqe_ccp, &wqe->generic.wqe_com,
3338 			       (lpfc_cmd->cur_iocbq.priority << 1));
3339 		else
3340 			bf_set(wqe_ccp, &wqe->generic.wqe_com,
3341 			       (phba->cfg_XLanePriority << 1));
3342 	}
3343 
3344 	return 0;
3345 }
3346 
3347 /**
3348  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3349  * @phba: The Hba for which this call is being executed.
3350  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3351  *
3352  * This is the protection/DIF aware version of
3353  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3354  * two functions eventually, but for now, it's here
3355  * Return codes:
3356  *	2 - Error - Do not retry
3357  *	1 - Error - Retry
3358  *	0 - Success
3359  **/
3360 static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3361 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3362 		struct lpfc_io_buf *lpfc_cmd)
3363 {
3364 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3365 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3366 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3367 	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3368 	union lpfc_wqe128 *wqe = &pwqeq->wqe;
3369 	uint32_t num_sge = 0;
3370 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3371 	int prot_group_type = 0;
3372 	int fcpdl;
3373 	int ret = 1;
3374 	struct lpfc_vport *vport = phba->pport;
3375 
3376 	/*
3377 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3378 	 *  fcp_rsp regions to the first data sge entry
3379 	 */
3380 	if (scsi_sg_count(scsi_cmnd)) {
3381 		/*
3382 		 * The driver stores the segment count returned from dma_map_sg
3383 		 * because this a count of dma-mappings used to map the use_sg
3384 		 * pages.  They are not guaranteed to be the same for those
3385 		 * architectures that implement an IOMMU.
3386 		 */
3387 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3388 					scsi_sglist(scsi_cmnd),
3389 					scsi_sg_count(scsi_cmnd), datadir);
3390 		if (unlikely(!datasegcnt))
3391 			return 1;
3392 
3393 		sgl += 1;
3394 		/* clear the last flag in the fcp_rsp map entry */
3395 		sgl->word2 = le32_to_cpu(sgl->word2);
3396 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3397 		sgl->word2 = cpu_to_le32(sgl->word2);
3398 
3399 		sgl += 1;
3400 		lpfc_cmd->seg_cnt = datasegcnt;
3401 
3402 		/* First check if data segment count from SCSI Layer is good */
3403 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3404 		    !phba->cfg_xpsgl) {
3405 			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3406 			ret = 2;
3407 			goto err;
3408 		}
3409 
3410 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3411 
3412 		switch (prot_group_type) {
3413 		case LPFC_PG_TYPE_NO_DIF:
3414 			/* Here we need to add a DISEED to the count */
3415 			if (((lpfc_cmd->seg_cnt + 1) >
3416 					phba->cfg_total_seg_cnt) &&
3417 			    !phba->cfg_xpsgl) {
3418 				ret = 2;
3419 				goto err;
3420 			}
3421 
3422 			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3423 					datasegcnt, lpfc_cmd);
3424 
3425 			/* we should have 2 or more entries in buffer list */
3426 			if (num_sge < 2) {
3427 				ret = 2;
3428 				goto err;
3429 			}
3430 			break;
3431 
3432 		case LPFC_PG_TYPE_DIF_BUF:
3433 			/*
3434 			 * This type indicates that protection buffers are
3435 			 * passed to the driver, so that needs to be prepared
3436 			 * for DMA
3437 			 */
3438 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3439 					scsi_prot_sglist(scsi_cmnd),
3440 					scsi_prot_sg_count(scsi_cmnd), datadir);
3441 			if (unlikely(!protsegcnt)) {
3442 				scsi_dma_unmap(scsi_cmnd);
3443 				return 1;
3444 			}
3445 
3446 			lpfc_cmd->prot_seg_cnt = protsegcnt;
3447 			/*
3448 			 * There is a minimun of 3 SGEs used for every
3449 			 * protection data segment.
3450 			 */
3451 			if (((lpfc_cmd->prot_seg_cnt * 3) >
3452 					(phba->cfg_total_seg_cnt - 2)) &&
3453 			    !phba->cfg_xpsgl) {
3454 				ret = 2;
3455 				goto err;
3456 			}
3457 
3458 			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3459 					datasegcnt, protsegcnt, lpfc_cmd);
3460 
3461 			/* we should have 3 or more entries in buffer list */
3462 			if (num_sge < 3 ||
3463 			    (num_sge > phba->cfg_total_seg_cnt &&
3464 			     !phba->cfg_xpsgl)) {
3465 				ret = 2;
3466 				goto err;
3467 			}
3468 			break;
3469 
3470 		case LPFC_PG_TYPE_INVALID:
3471 		default:
3472 			scsi_dma_unmap(scsi_cmnd);
3473 			lpfc_cmd->seg_cnt = 0;
3474 
3475 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3476 					"9083 Unexpected protection group %i\n",
3477 					prot_group_type);
3478 			return 2;
3479 		}
3480 	}
3481 
3482 	switch (scsi_get_prot_op(scsi_cmnd)) {
3483 	case SCSI_PROT_WRITE_STRIP:
3484 	case SCSI_PROT_READ_STRIP:
3485 		lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
3486 		break;
3487 	case SCSI_PROT_WRITE_INSERT:
3488 	case SCSI_PROT_READ_INSERT:
3489 		lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
3490 		break;
3491 	case SCSI_PROT_WRITE_PASS:
3492 	case SCSI_PROT_READ_PASS:
3493 		lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
3494 		break;
3495 	}
3496 
3497 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3498 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3499 
3500 	/* Set first-burst provided it was successfully negotiated */
3501 	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3502 	    vport->cfg_first_burst_size &&
3503 	    scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3504 		u32 init_len, total_len;
3505 
3506 		total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3507 		init_len = min(total_len, vport->cfg_first_burst_size);
3508 
3509 		/* Word 4 & 5 */
3510 		wqe->fcp_iwrite.initial_xfer_len = init_len;
3511 		wqe->fcp_iwrite.total_xfer_len = total_len;
3512 	} else {
3513 		/* Word 4 */
3514 		wqe->fcp_iwrite.total_xfer_len =
3515 			be32_to_cpu(fcp_cmnd->fcpDl);
3516 	}
3517 
3518 	/*
3519 	 * If the OAS driver feature is enabled and the lun is enabled for
3520 	 * OAS, set the oas iocb related flags.
3521 	 */
3522 	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3523 		scsi_cmnd->device->hostdata)->oas_enabled) {
3524 		lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3525 
3526 		/* Word 10 */
3527 		bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3528 		bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3529 		bf_set(wqe_ccp, &wqe->generic.wqe_com,
3530 		       (phba->cfg_XLanePriority << 1));
3531 	}
3532 
3533 	/* Word 7. DIF Flags */
3534 	if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
3535 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3536 	else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
3537 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3538 	else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
3539 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3540 
3541 	lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
3542 				 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3543 
3544 	return 0;
3545 err:
3546 	if (lpfc_cmd->seg_cnt)
3547 		scsi_dma_unmap(scsi_cmnd);
3548 	if (lpfc_cmd->prot_seg_cnt)
3549 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3550 			     scsi_prot_sg_count(scsi_cmnd),
3551 			     scsi_cmnd->sc_data_direction);
3552 
3553 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3554 			"9084 Cannot setup S/G List for HBA"
3555 			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3556 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3557 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3558 			prot_group_type, num_sge);
3559 
3560 	lpfc_cmd->seg_cnt = 0;
3561 	lpfc_cmd->prot_seg_cnt = 0;
3562 	return ret;
3563 }
3564 
3565 /**
3566  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3567  * @phba: The Hba for which this call is being executed.
3568  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3569  *
3570  * This routine wraps the actual DMA mapping function pointer from the
3571  * lpfc_hba struct.
3572  *
3573  * Return codes:
3574  *	1 - Error
3575  *	0 - Success
3576  **/
3577 static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3578 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3579 {
3580 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3581 }
3582 
3583 /**
3584  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3585  * using BlockGuard.
3586  * @phba: The Hba for which this call is being executed.
3587  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3588  *
3589  * This routine wraps the actual DMA mapping function pointer from the
3590  * lpfc_hba struct.
3591  *
3592  * Return codes:
3593  *	1 - Error
3594  *	0 - Success
3595  **/
3596 static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3597 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3598 {
3599 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3600 }
3601 
3602 /**
3603  * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3604  * buffer
3605  * @vport: Pointer to vport object.
3606  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3607  * @tmo: Timeout value for IO
3608  *
3609  * This routine initializes IOCB/WQE data structure from scsi command
3610  *
3611  * Return codes:
3612  *	1 - Error
3613  *	0 - Success
3614  **/
3615 static inline int
lpfc_scsi_prep_cmnd_buf(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)3616 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3617 			uint8_t tmo)
3618 {
3619 	return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3620 }
3621 
3622 /**
3623  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3624  * @phba: Pointer to hba context object.
3625  * @vport: Pointer to vport object.
3626  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3627  * @fcpi_parm: FCP Initiator parameter.
3628  *
3629  * This function posts an event when there is a SCSI command reporting
3630  * error from the scsi device.
3631  **/
3632 static void
lpfc_send_scsi_error_event(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint32_t fcpi_parm)3633 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3634 		struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3635 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3636 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3637 	uint32_t resp_info = fcprsp->rspStatus2;
3638 	uint32_t scsi_status = fcprsp->rspStatus3;
3639 	struct lpfc_fast_path_event *fast_path_evt = NULL;
3640 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3641 	unsigned long flags;
3642 
3643 	if (!pnode)
3644 		return;
3645 
3646 	/* If there is queuefull or busy condition send a scsi event */
3647 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3648 		(cmnd->result == SAM_STAT_BUSY)) {
3649 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3650 		if (!fast_path_evt)
3651 			return;
3652 		fast_path_evt->un.scsi_evt.event_type =
3653 			FC_REG_SCSI_EVENT;
3654 		fast_path_evt->un.scsi_evt.subcategory =
3655 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3656 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3657 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3658 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3659 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3660 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3661 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3662 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3663 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3664 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3665 		if (!fast_path_evt)
3666 			return;
3667 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3668 			FC_REG_SCSI_EVENT;
3669 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3670 			LPFC_EVENT_CHECK_COND;
3671 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3672 			cmnd->device->lun;
3673 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3674 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3675 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3676 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3677 		fast_path_evt->un.check_cond_evt.sense_key =
3678 			cmnd->sense_buffer[2] & 0xf;
3679 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3680 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3681 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3682 		     fcpi_parm &&
3683 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3684 			((scsi_status == SAM_STAT_GOOD) &&
3685 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3686 		/*
3687 		 * If status is good or resid does not match with fcp_param and
3688 		 * there is valid fcpi_parm, then there is a read_check error
3689 		 */
3690 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3691 		if (!fast_path_evt)
3692 			return;
3693 		fast_path_evt->un.read_check_error.header.event_type =
3694 			FC_REG_FABRIC_EVENT;
3695 		fast_path_evt->un.read_check_error.header.subcategory =
3696 			LPFC_EVENT_FCPRDCHKERR;
3697 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3698 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3699 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3700 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3701 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3702 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3703 		fast_path_evt->un.read_check_error.fcpiparam =
3704 			fcpi_parm;
3705 	} else
3706 		return;
3707 
3708 	fast_path_evt->vport = vport;
3709 	spin_lock_irqsave(&phba->hbalock, flags);
3710 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3711 	spin_unlock_irqrestore(&phba->hbalock, flags);
3712 	lpfc_worker_wake_up(phba);
3713 	return;
3714 }
3715 
3716 /**
3717  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3718  * @phba: The HBA for which this call is being executed.
3719  * @psb: The scsi buffer which is going to be un-mapped.
3720  *
3721  * This routine does DMA un-mapping of scatter gather list of scsi command
3722  * field of @lpfc_cmd for device with SLI-3 interface spec.
3723  **/
3724 static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)3725 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3726 {
3727 	/*
3728 	 * There are only two special cases to consider.  (1) the scsi command
3729 	 * requested scatter-gather usage or (2) the scsi command allocated
3730 	 * a request buffer, but did not request use_sg.  There is a third
3731 	 * case, but it does not require resource deallocation.
3732 	 */
3733 	if (psb->seg_cnt > 0)
3734 		scsi_dma_unmap(psb->pCmd);
3735 	if (psb->prot_seg_cnt > 0)
3736 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3737 				scsi_prot_sg_count(psb->pCmd),
3738 				psb->pCmd->sc_data_direction);
3739 }
3740 
3741 /**
3742  * lpfc_unblock_requests - allow further commands to be queued.
3743  * @phba: pointer to phba object
3744  *
3745  * For single vport, just call scsi_unblock_requests on physical port.
3746  * For multiple vports, send scsi_unblock_requests for all the vports.
3747  */
3748 void
lpfc_unblock_requests(struct lpfc_hba * phba)3749 lpfc_unblock_requests(struct lpfc_hba *phba)
3750 {
3751 	struct lpfc_vport **vports;
3752 	struct Scsi_Host  *shost;
3753 	int i;
3754 
3755 	if (phba->sli_rev == LPFC_SLI_REV4 &&
3756 	    !phba->sli4_hba.max_cfg_param.vpi_used) {
3757 		shost = lpfc_shost_from_vport(phba->pport);
3758 		scsi_unblock_requests(shost);
3759 		return;
3760 	}
3761 
3762 	vports = lpfc_create_vport_work_array(phba);
3763 	if (vports != NULL)
3764 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3765 			shost = lpfc_shost_from_vport(vports[i]);
3766 			scsi_unblock_requests(shost);
3767 		}
3768 	lpfc_destroy_vport_work_array(phba, vports);
3769 }
3770 
3771 /**
3772  * lpfc_block_requests - prevent further commands from being queued.
3773  * @phba: pointer to phba object
3774  *
3775  * For single vport, just call scsi_block_requests on physical port.
3776  * For multiple vports, send scsi_block_requests for all the vports.
3777  */
3778 void
lpfc_block_requests(struct lpfc_hba * phba)3779 lpfc_block_requests(struct lpfc_hba *phba)
3780 {
3781 	struct lpfc_vport **vports;
3782 	struct Scsi_Host  *shost;
3783 	int i;
3784 
3785 	if (atomic_read(&phba->cmf_stop_io))
3786 		return;
3787 
3788 	if (phba->sli_rev == LPFC_SLI_REV4 &&
3789 	    !phba->sli4_hba.max_cfg_param.vpi_used) {
3790 		shost = lpfc_shost_from_vport(phba->pport);
3791 		scsi_block_requests(shost);
3792 		return;
3793 	}
3794 
3795 	vports = lpfc_create_vport_work_array(phba);
3796 	if (vports != NULL)
3797 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3798 			shost = lpfc_shost_from_vport(vports[i]);
3799 			scsi_block_requests(shost);
3800 		}
3801 	lpfc_destroy_vport_work_array(phba, vports);
3802 }
3803 
3804 /**
3805  * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
3806  * @phba: The HBA for which this call is being executed.
3807  * @time: The latency of the IO that completed (in ns)
3808  * @size: The size of the IO that completed
3809  * @shost: SCSI host the IO completed on (NULL for a NVME IO)
3810  *
3811  * The routine adjusts the various Burst and Bandwidth counters used in
3812  * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
3813  * that means the IO was never issued to the HBA, so this routine is
3814  * just being called to cleanup the counter from a previous
3815  * lpfc_update_cmf_cmd call.
3816  */
3817 int
lpfc_update_cmf_cmpl(struct lpfc_hba * phba,uint64_t time,uint32_t size,struct Scsi_Host * shost)3818 lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3819 		     uint64_t time, uint32_t size, struct Scsi_Host *shost)
3820 {
3821 	struct lpfc_cgn_stat *cgs;
3822 
3823 	if (time != LPFC_CGN_NOT_SENT) {
3824 		/* lat is ns coming in, save latency in us */
3825 		if (time < 1000)
3826 			time = 1;
3827 		else
3828 			time = div_u64(time + 500, 1000); /* round it */
3829 
3830 		cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3831 		atomic64_add(size, &cgs->rcv_bytes);
3832 		atomic64_add(time, &cgs->rx_latency);
3833 		atomic_inc(&cgs->rx_io_cnt);
3834 	}
3835 	return 0;
3836 }
3837 
3838 /**
3839  * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
3840  * @phba: The HBA for which this call is being executed.
3841  * @size: The size of the IO that will be issued
3842  *
3843  * The routine adjusts the various Burst and Bandwidth counters used in
3844  * Congestion management and E2E.
3845  */
3846 int
lpfc_update_cmf_cmd(struct lpfc_hba * phba,uint32_t size)3847 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3848 {
3849 	uint64_t total;
3850 	struct lpfc_cgn_stat *cgs;
3851 	int cpu;
3852 
3853 	/* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
3854 	if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3855 	    phba->cmf_max_bytes_per_interval) {
3856 		total = 0;
3857 		for_each_present_cpu(cpu) {
3858 			cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3859 			total += atomic64_read(&cgs->total_bytes);
3860 		}
3861 		if (total >= phba->cmf_max_bytes_per_interval) {
3862 			if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3863 				lpfc_block_requests(phba);
3864 				phba->cmf_last_ts =
3865 					lpfc_calc_cmf_latency(phba);
3866 			}
3867 			atomic_inc(&phba->cmf_busy);
3868 			return -EBUSY;
3869 		}
3870 		if (size > atomic_read(&phba->rx_max_read_cnt))
3871 			atomic_set(&phba->rx_max_read_cnt, size);
3872 	}
3873 
3874 	cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3875 	atomic64_add(size, &cgs->total_bytes);
3876 	return 0;
3877 }
3878 
3879 /**
3880  * lpfc_handle_fcp_err - FCP response handler
3881  * @vport: The virtual port for which this call is being executed.
3882  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3883  * @fcpi_parm: FCP Initiator parameter.
3884  *
3885  * This routine is called to process response IOCB with status field
3886  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3887  * based upon SCSI and FCP error.
3888  **/
3889 static void
lpfc_handle_fcp_err(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint32_t fcpi_parm)3890 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3891 		    uint32_t fcpi_parm)
3892 {
3893 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3894 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3895 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3896 	uint32_t resp_info = fcprsp->rspStatus2;
3897 	uint32_t scsi_status = fcprsp->rspStatus3;
3898 	uint32_t *lp;
3899 	uint32_t host_status = DID_OK;
3900 	uint32_t rsplen = 0;
3901 	uint32_t fcpDl;
3902 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3903 
3904 
3905 	/*
3906 	 *  If this is a task management command, there is no
3907 	 *  scsi packet associated with this lpfc_cmd.  The driver
3908 	 *  consumes it.
3909 	 */
3910 	if (fcpcmd->fcpCntl2) {
3911 		scsi_status = 0;
3912 		goto out;
3913 	}
3914 
3915 	if (resp_info & RSP_LEN_VALID) {
3916 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3917 		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3918 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3919 					 "2719 Invalid response length: "
3920 					 "tgt x%x lun x%llx cmnd x%x rsplen "
3921 					 "x%x\n", cmnd->device->id,
3922 					 cmnd->device->lun, cmnd->cmnd[0],
3923 					 rsplen);
3924 			host_status = DID_ERROR;
3925 			goto out;
3926 		}
3927 		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3928 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3929 				 "2757 Protocol failure detected during "
3930 				 "processing of FCP I/O op: "
3931 				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3932 				 cmnd->device->id,
3933 				 cmnd->device->lun, cmnd->cmnd[0],
3934 				 fcprsp->rspInfo3);
3935 			host_status = DID_ERROR;
3936 			goto out;
3937 		}
3938 	}
3939 
3940 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3941 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3942 		if (snslen > SCSI_SENSE_BUFFERSIZE)
3943 			snslen = SCSI_SENSE_BUFFERSIZE;
3944 
3945 		if (resp_info & RSP_LEN_VALID)
3946 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3947 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3948 	}
3949 	lp = (uint32_t *)cmnd->sense_buffer;
3950 
3951 	/* special handling for under run conditions */
3952 	if (!scsi_status && (resp_info & RESID_UNDER)) {
3953 		/* don't log under runs if fcp set... */
3954 		if (vport->cfg_log_verbose & LOG_FCP)
3955 			logit = LOG_FCP_ERROR;
3956 		/* unless operator says so */
3957 		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3958 			logit = LOG_FCP_UNDER;
3959 	}
3960 
3961 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3962 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3963 			 "Data: x%x x%x x%x x%x x%x\n",
3964 			 cmnd->cmnd[0], scsi_status,
3965 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3966 			 be32_to_cpu(fcprsp->rspResId),
3967 			 be32_to_cpu(fcprsp->rspSnsLen),
3968 			 be32_to_cpu(fcprsp->rspRspLen),
3969 			 fcprsp->rspInfo3);
3970 
3971 	scsi_set_resid(cmnd, 0);
3972 	fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3973 	if (resp_info & RESID_UNDER) {
3974 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3975 
3976 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3977 				 "9025 FCP Underrun, expected %d, "
3978 				 "residual %d Data: x%x x%x x%x\n",
3979 				 fcpDl,
3980 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3981 				 cmnd->underflow);
3982 
3983 		/*
3984 		 * If there is an under run, check if under run reported by
3985 		 * storage array is same as the under run reported by HBA.
3986 		 * If this is not same, there is a dropped frame.
3987 		 */
3988 		if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3989 			lpfc_printf_vlog(vport, KERN_WARNING,
3990 					 LOG_FCP | LOG_FCP_ERROR,
3991 					 "9026 FCP Read Check Error "
3992 					 "and Underrun Data: x%x x%x x%x x%x\n",
3993 					 fcpDl,
3994 					 scsi_get_resid(cmnd), fcpi_parm,
3995 					 cmnd->cmnd[0]);
3996 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3997 			host_status = DID_ERROR;
3998 		}
3999 		/*
4000 		 * The cmnd->underflow is the minimum number of bytes that must
4001 		 * be transferred for this command.  Provided a sense condition
4002 		 * is not present, make sure the actual amount transferred is at
4003 		 * least the underflow value or fail.
4004 		 */
4005 		if (!(resp_info & SNS_LEN_VALID) &&
4006 		    (scsi_status == SAM_STAT_GOOD) &&
4007 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
4008 		     < cmnd->underflow)) {
4009 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4010 					 "9027 FCP command x%x residual "
4011 					 "underrun converted to error "
4012 					 "Data: x%x x%x x%x\n",
4013 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
4014 					 scsi_get_resid(cmnd), cmnd->underflow);
4015 			host_status = DID_ERROR;
4016 		}
4017 	} else if (resp_info & RESID_OVER) {
4018 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4019 				 "9028 FCP command x%x residual overrun error. "
4020 				 "Data: x%x x%x\n", cmnd->cmnd[0],
4021 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
4022 		host_status = DID_ERROR;
4023 
4024 	/*
4025 	 * Check SLI validation that all the transfer was actually done
4026 	 * (fcpi_parm should be zero). Apply check only to reads.
4027 	 */
4028 	} else if (fcpi_parm) {
4029 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4030 				 "9029 FCP %s Check Error Data: "
4031 				 "x%x x%x x%x x%x x%x\n",
4032 				 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4033 				 "Read" : "Write"),
4034 				 fcpDl, be32_to_cpu(fcprsp->rspResId),
4035 				 fcpi_parm, cmnd->cmnd[0], scsi_status);
4036 
4037 		/* There is some issue with the LPe12000 that causes it
4038 		 * to miscalculate the fcpi_parm and falsely trip this
4039 		 * recovery logic.  Detect this case and don't error when true.
4040 		 */
4041 		if (fcpi_parm > fcpDl)
4042 			goto out;
4043 
4044 		switch (scsi_status) {
4045 		case SAM_STAT_GOOD:
4046 		case SAM_STAT_CHECK_CONDITION:
4047 			/* Fabric dropped a data frame. Fail any successful
4048 			 * command in which we detected dropped frames.
4049 			 * A status of good or some check conditions could
4050 			 * be considered a successful command.
4051 			 */
4052 			host_status = DID_ERROR;
4053 			break;
4054 		}
4055 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4056 	}
4057 
4058  out:
4059 	cmnd->result = host_status << 16 | scsi_status;
4060 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4061 }
4062 
4063 /**
4064  * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4065  * @phba: The hba for which this call is being executed.
4066  * @pwqeIn: The command WQE for the scsi cmnd.
4067  * @pwqeOut: Pointer to driver response WQE object.
4068  *
4069  * This routine assigns scsi command result by looking into response WQE
4070  * status field appropriately. This routine handles QUEUE FULL condition as
4071  * well by ramping down device queue depth.
4072  **/
4073 static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeIn,struct lpfc_iocbq * pwqeOut)4074 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4075 			 struct lpfc_iocbq *pwqeOut)
4076 {
4077 	struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
4078 	struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
4079 	struct lpfc_vport *vport = pwqeIn->vport;
4080 	struct lpfc_rport_data *rdata;
4081 	struct lpfc_nodelist *ndlp;
4082 	struct scsi_cmnd *cmd;
4083 	unsigned long flags;
4084 	struct lpfc_fast_path_event *fast_path_evt;
4085 	struct Scsi_Host *shost;
4086 	u32 logit = LOG_FCP;
4087 	u32 status, idx;
4088 	u32 lat;
4089 	u8 wait_xb_clr = 0;
4090 
4091 	/* Sanity check on return of outstanding command */
4092 	if (!lpfc_cmd) {
4093 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4094 				 "9032 Null lpfc_cmd pointer. No "
4095 				 "release, skip completion\n");
4096 		return;
4097 	}
4098 
4099 	rdata = lpfc_cmd->rdata;
4100 	ndlp = rdata->pnode;
4101 
4102 	/* Sanity check on return of outstanding command */
4103 	cmd = lpfc_cmd->pCmd;
4104 	if (!cmd) {
4105 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4106 				 "9042 I/O completion: Not an active IO\n");
4107 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4108 		return;
4109 	}
4110 	/* Guard against abort handler being called at same time */
4111 	spin_lock(&lpfc_cmd->buf_lock);
4112 	idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4113 	if (phba->sli4_hba.hdwq)
4114 		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4115 
4116 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4117 	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4118 		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4119 #endif
4120 	shost = cmd->device->host;
4121 
4122 	status = bf_get(lpfc_wcqe_c_status, wcqe);
4123 	lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4124 	lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4125 
4126 	lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4127 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4128 		lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4129 		if (phba->cfg_fcp_wait_abts_rsp)
4130 			wait_xb_clr = 1;
4131 	}
4132 
4133 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4134 	if (lpfc_cmd->prot_data_type) {
4135 		struct scsi_dif_tuple *src = NULL;
4136 
4137 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4138 		/*
4139 		 * Used to restore any changes to protection
4140 		 * data for error injection.
4141 		 */
4142 		switch (lpfc_cmd->prot_data_type) {
4143 		case LPFC_INJERR_REFTAG:
4144 			src->ref_tag =
4145 				lpfc_cmd->prot_data;
4146 			break;
4147 		case LPFC_INJERR_APPTAG:
4148 			src->app_tag =
4149 				(uint16_t)lpfc_cmd->prot_data;
4150 			break;
4151 		case LPFC_INJERR_GUARD:
4152 			src->guard_tag =
4153 				(uint16_t)lpfc_cmd->prot_data;
4154 			break;
4155 		default:
4156 			break;
4157 		}
4158 
4159 		lpfc_cmd->prot_data = 0;
4160 		lpfc_cmd->prot_data_type = 0;
4161 		lpfc_cmd->prot_data_segment = NULL;
4162 	}
4163 #endif
4164 	if (unlikely(lpfc_cmd->status)) {
4165 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4166 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
4167 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4168 		else if (lpfc_cmd->status >= IOSTAT_CNT)
4169 			lpfc_cmd->status = IOSTAT_DEFAULT;
4170 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4171 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4172 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4173 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4174 			logit = 0;
4175 		else
4176 			logit = LOG_FCP | LOG_FCP_UNDER;
4177 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4178 				 "9034 FCP cmd x%x failed <%d/%lld> "
4179 				 "status: x%x result: x%x "
4180 				 "sid: x%x did: x%x oxid: x%x "
4181 				 "Data: x%x x%x x%x\n",
4182 				 cmd->cmnd[0],
4183 				 cmd->device ? cmd->device->id : 0xffff,
4184 				 cmd->device ? cmd->device->lun : 0xffff,
4185 				 lpfc_cmd->status, lpfc_cmd->result,
4186 				 vport->fc_myDID,
4187 				 (ndlp) ? ndlp->nlp_DID : 0,
4188 				 lpfc_cmd->cur_iocbq.sli4_xritag,
4189 				 wcqe->parameter, wcqe->total_data_placed,
4190 				 lpfc_cmd->cur_iocbq.iotag);
4191 	}
4192 
4193 	switch (lpfc_cmd->status) {
4194 	case IOSTAT_SUCCESS:
4195 		cmd->result = DID_OK << 16;
4196 		break;
4197 	case IOSTAT_FCP_RSP_ERROR:
4198 		lpfc_handle_fcp_err(vport, lpfc_cmd,
4199 				    pwqeIn->wqe.fcp_iread.total_xfer_len -
4200 				    wcqe->total_data_placed);
4201 		break;
4202 	case IOSTAT_NPORT_BSY:
4203 	case IOSTAT_FABRIC_BSY:
4204 		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4205 		fast_path_evt = lpfc_alloc_fast_evt(phba);
4206 		if (!fast_path_evt)
4207 			break;
4208 		fast_path_evt->un.fabric_evt.event_type =
4209 			FC_REG_FABRIC_EVENT;
4210 		fast_path_evt->un.fabric_evt.subcategory =
4211 			(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4212 			LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4213 		if (ndlp) {
4214 			memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4215 			       &ndlp->nlp_portname,
4216 				sizeof(struct lpfc_name));
4217 			memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4218 			       &ndlp->nlp_nodename,
4219 				sizeof(struct lpfc_name));
4220 		}
4221 		fast_path_evt->vport = vport;
4222 		fast_path_evt->work_evt.evt =
4223 			LPFC_EVT_FASTPATH_MGMT_EVT;
4224 		spin_lock_irqsave(&phba->hbalock, flags);
4225 		list_add_tail(&fast_path_evt->work_evt.evt_listp,
4226 			      &phba->work_list);
4227 		spin_unlock_irqrestore(&phba->hbalock, flags);
4228 		lpfc_worker_wake_up(phba);
4229 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4230 				 "9035 Fabric/Node busy FCP cmd x%x failed"
4231 				 " <%d/%lld> "
4232 				 "status: x%x result: x%x "
4233 				 "sid: x%x did: x%x oxid: x%x "
4234 				 "Data: x%x x%x x%x\n",
4235 				 cmd->cmnd[0],
4236 				 cmd->device ? cmd->device->id : 0xffff,
4237 				 cmd->device ? cmd->device->lun : 0xffff,
4238 				 lpfc_cmd->status, lpfc_cmd->result,
4239 				 vport->fc_myDID,
4240 				 (ndlp) ? ndlp->nlp_DID : 0,
4241 				 lpfc_cmd->cur_iocbq.sli4_xritag,
4242 				 wcqe->parameter,
4243 				 wcqe->total_data_placed,
4244 				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4245 		break;
4246 	case IOSTAT_REMOTE_STOP:
4247 		if (ndlp) {
4248 			/* This I/O was aborted by the target, we don't
4249 			 * know the rxid and because we did not send the
4250 			 * ABTS we cannot generate and RRQ.
4251 			 */
4252 			lpfc_set_rrq_active(phba, ndlp,
4253 					    lpfc_cmd->cur_iocbq.sli4_lxritag,
4254 					    0, 0);
4255 		}
4256 		fallthrough;
4257 	case IOSTAT_LOCAL_REJECT:
4258 		if (lpfc_cmd->result & IOERR_DRVR_MASK)
4259 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4260 		if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4261 		    lpfc_cmd->result ==
4262 		    IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4263 		    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4264 		    lpfc_cmd->result ==
4265 		    IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4266 			cmd->result = DID_NO_CONNECT << 16;
4267 			break;
4268 		}
4269 		if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4270 		    lpfc_cmd->result == IOERR_LINK_DOWN ||
4271 		    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4272 		    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4273 		    lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
4274 		    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4275 			cmd->result = DID_REQUEUE << 16;
4276 			break;
4277 		}
4278 		if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4279 		     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4280 		     status == CQE_STATUS_DI_ERROR) {
4281 			if (scsi_get_prot_op(cmd) !=
4282 			    SCSI_PROT_NORMAL) {
4283 				/*
4284 				 * This is a response for a BG enabled
4285 				 * cmd. Parse BG error
4286 				 */
4287 				lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
4288 				break;
4289 			} else {
4290 				lpfc_printf_vlog(vport, KERN_WARNING,
4291 						 LOG_BG,
4292 						 "9040 non-zero BGSTAT "
4293 						 "on unprotected cmd\n");
4294 			}
4295 		}
4296 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4297 				 "9036 Local Reject FCP cmd x%x failed"
4298 				 " <%d/%lld> "
4299 				 "status: x%x result: x%x "
4300 				 "sid: x%x did: x%x oxid: x%x "
4301 				 "Data: x%x x%x x%x\n",
4302 				 cmd->cmnd[0],
4303 				 cmd->device ? cmd->device->id : 0xffff,
4304 				 cmd->device ? cmd->device->lun : 0xffff,
4305 				 lpfc_cmd->status, lpfc_cmd->result,
4306 				 vport->fc_myDID,
4307 				 (ndlp) ? ndlp->nlp_DID : 0,
4308 				 lpfc_cmd->cur_iocbq.sli4_xritag,
4309 				 wcqe->parameter,
4310 				 wcqe->total_data_placed,
4311 				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4312 		fallthrough;
4313 	default:
4314 		if (lpfc_cmd->status >= IOSTAT_CNT)
4315 			lpfc_cmd->status = IOSTAT_DEFAULT;
4316 		cmd->result = DID_ERROR << 16;
4317 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4318 				 "9037 FCP Completion Error: xri %x "
4319 				 "status x%x result x%x [x%x] "
4320 				 "placed x%x\n",
4321 				 lpfc_cmd->cur_iocbq.sli4_xritag,
4322 				 lpfc_cmd->status, lpfc_cmd->result,
4323 				 wcqe->parameter,
4324 				 wcqe->total_data_placed);
4325 	}
4326 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4327 		u32 *lp = (u32 *)cmd->sense_buffer;
4328 
4329 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4330 				 "9039 Iodone <%d/%llu> cmd x%px, error "
4331 				 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
4332 				 cmd->device->id, cmd->device->lun, cmd,
4333 				 cmd->result, *lp, *(lp + 3),
4334 				 (u64)scsi_get_lba(cmd),
4335 				 cmd->retries, scsi_get_resid(cmd));
4336 	}
4337 
4338 	lpfc_update_stats(vport, lpfc_cmd);
4339 
4340 	if (vport->cfg_max_scsicmpl_time &&
4341 	    time_after(jiffies, lpfc_cmd->start_time +
4342 	    msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4343 		spin_lock_irqsave(shost->host_lock, flags);
4344 		if (ndlp) {
4345 			if (ndlp->cmd_qdepth >
4346 				atomic_read(&ndlp->cmd_pending) &&
4347 				(atomic_read(&ndlp->cmd_pending) >
4348 				LPFC_MIN_TGT_QDEPTH) &&
4349 				(cmd->cmnd[0] == READ_10 ||
4350 				cmd->cmnd[0] == WRITE_10))
4351 				ndlp->cmd_qdepth =
4352 					atomic_read(&ndlp->cmd_pending);
4353 
4354 			ndlp->last_change_time = jiffies;
4355 		}
4356 		spin_unlock_irqrestore(shost->host_lock, flags);
4357 	}
4358 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4359 
4360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4361 	if (lpfc_cmd->ts_cmd_start) {
4362 		lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4363 		lpfc_cmd->ts_data_io = ktime_get_ns();
4364 		phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4365 		lpfc_io_ktime(phba, lpfc_cmd);
4366 	}
4367 #endif
4368 	if (likely(!wait_xb_clr))
4369 		lpfc_cmd->pCmd = NULL;
4370 	spin_unlock(&lpfc_cmd->buf_lock);
4371 
4372 	/* Check if IO qualified for CMF */
4373 	if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4374 	    cmd->sc_data_direction == DMA_FROM_DEVICE &&
4375 	    (scsi_sg_count(cmd))) {
4376 		/* Used when calculating average latency */
4377 		lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4378 		lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4379 	}
4380 
4381 	if (wait_xb_clr)
4382 		goto out;
4383 
4384 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4385 	scsi_done(cmd);
4386 
4387 	/*
4388 	 * If there is an abort thread waiting for command completion
4389 	 * wake up the thread.
4390 	 */
4391 	spin_lock(&lpfc_cmd->buf_lock);
4392 	lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4393 	if (lpfc_cmd->waitq)
4394 		wake_up(lpfc_cmd->waitq);
4395 	spin_unlock(&lpfc_cmd->buf_lock);
4396 out:
4397 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4398 }
4399 
4400 /**
4401  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4402  * @phba: The Hba for which this call is being executed.
4403  * @pIocbIn: The command IOCBQ for the scsi cmnd.
4404  * @pIocbOut: The response IOCBQ for the scsi cmnd.
4405  *
4406  * This routine assigns scsi command result by looking into response IOCB
4407  * status field appropriately. This routine handles QUEUE FULL condition as
4408  * well by ramping down device queue depth.
4409  **/
4410 static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut)4411 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4412 			struct lpfc_iocbq *pIocbOut)
4413 {
4414 	struct lpfc_io_buf *lpfc_cmd =
4415 		(struct lpfc_io_buf *) pIocbIn->io_buf;
4416 	struct lpfc_vport      *vport = pIocbIn->vport;
4417 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4418 	struct lpfc_nodelist *pnode = rdata->pnode;
4419 	struct scsi_cmnd *cmd;
4420 	unsigned long flags;
4421 	struct lpfc_fast_path_event *fast_path_evt;
4422 	struct Scsi_Host *shost;
4423 	int idx;
4424 	uint32_t logit = LOG_FCP;
4425 
4426 	/* Guard against abort handler being called at same time */
4427 	spin_lock(&lpfc_cmd->buf_lock);
4428 
4429 	/* Sanity check on return of outstanding command */
4430 	cmd = lpfc_cmd->pCmd;
4431 	if (!cmd || !phba) {
4432 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4433 				 "2621 IO completion: Not an active IO\n");
4434 		spin_unlock(&lpfc_cmd->buf_lock);
4435 		return;
4436 	}
4437 
4438 	idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4439 	if (phba->sli4_hba.hdwq)
4440 		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4441 
4442 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4443 	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4444 		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4445 #endif
4446 	shost = cmd->device->host;
4447 
4448 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4449 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4450 	/* pick up SLI4 exchange busy status from HBA */
4451 	lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4452 	if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
4453 		lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4454 
4455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4456 	if (lpfc_cmd->prot_data_type) {
4457 		struct scsi_dif_tuple *src = NULL;
4458 
4459 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4460 		/*
4461 		 * Used to restore any changes to protection
4462 		 * data for error injection.
4463 		 */
4464 		switch (lpfc_cmd->prot_data_type) {
4465 		case LPFC_INJERR_REFTAG:
4466 			src->ref_tag =
4467 				lpfc_cmd->prot_data;
4468 			break;
4469 		case LPFC_INJERR_APPTAG:
4470 			src->app_tag =
4471 				(uint16_t)lpfc_cmd->prot_data;
4472 			break;
4473 		case LPFC_INJERR_GUARD:
4474 			src->guard_tag =
4475 				(uint16_t)lpfc_cmd->prot_data;
4476 			break;
4477 		default:
4478 			break;
4479 		}
4480 
4481 		lpfc_cmd->prot_data = 0;
4482 		lpfc_cmd->prot_data_type = 0;
4483 		lpfc_cmd->prot_data_segment = NULL;
4484 	}
4485 #endif
4486 
4487 	if (unlikely(lpfc_cmd->status)) {
4488 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4489 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
4490 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4491 		else if (lpfc_cmd->status >= IOSTAT_CNT)
4492 			lpfc_cmd->status = IOSTAT_DEFAULT;
4493 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4494 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4495 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4496 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4497 			logit = 0;
4498 		else
4499 			logit = LOG_FCP | LOG_FCP_UNDER;
4500 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4501 			 "9030 FCP cmd x%x failed <%d/%lld> "
4502 			 "status: x%x result: x%x "
4503 			 "sid: x%x did: x%x oxid: x%x "
4504 			 "Data: x%x x%x\n",
4505 			 cmd->cmnd[0],
4506 			 cmd->device ? cmd->device->id : 0xffff,
4507 			 cmd->device ? cmd->device->lun : 0xffff,
4508 			 lpfc_cmd->status, lpfc_cmd->result,
4509 			 vport->fc_myDID,
4510 			 (pnode) ? pnode->nlp_DID : 0,
4511 			 phba->sli_rev == LPFC_SLI_REV4 ?
4512 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4513 			 pIocbOut->iocb.ulpContext,
4514 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4515 
4516 		switch (lpfc_cmd->status) {
4517 		case IOSTAT_FCP_RSP_ERROR:
4518 			/* Call FCP RSP handler to determine result */
4519 			lpfc_handle_fcp_err(vport, lpfc_cmd,
4520 					    pIocbOut->iocb.un.fcpi.fcpi_parm);
4521 			break;
4522 		case IOSTAT_NPORT_BSY:
4523 		case IOSTAT_FABRIC_BSY:
4524 			cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4525 			fast_path_evt = lpfc_alloc_fast_evt(phba);
4526 			if (!fast_path_evt)
4527 				break;
4528 			fast_path_evt->un.fabric_evt.event_type =
4529 				FC_REG_FABRIC_EVENT;
4530 			fast_path_evt->un.fabric_evt.subcategory =
4531 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4532 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4533 			if (pnode) {
4534 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4535 					&pnode->nlp_portname,
4536 					sizeof(struct lpfc_name));
4537 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4538 					&pnode->nlp_nodename,
4539 					sizeof(struct lpfc_name));
4540 			}
4541 			fast_path_evt->vport = vport;
4542 			fast_path_evt->work_evt.evt =
4543 				LPFC_EVT_FASTPATH_MGMT_EVT;
4544 			spin_lock_irqsave(&phba->hbalock, flags);
4545 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
4546 				&phba->work_list);
4547 			spin_unlock_irqrestore(&phba->hbalock, flags);
4548 			lpfc_worker_wake_up(phba);
4549 			break;
4550 		case IOSTAT_LOCAL_REJECT:
4551 		case IOSTAT_REMOTE_STOP:
4552 			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4553 			    lpfc_cmd->result ==
4554 					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4555 			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4556 			    lpfc_cmd->result ==
4557 					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4558 				cmd->result = DID_NO_CONNECT << 16;
4559 				break;
4560 			}
4561 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4562 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4563 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4564 			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4565 				cmd->result = DID_REQUEUE << 16;
4566 				break;
4567 			}
4568 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4569 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4570 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4571 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4572 					/*
4573 					 * This is a response for a BG enabled
4574 					 * cmd. Parse BG error
4575 					 */
4576 					lpfc_parse_bg_err(phba, lpfc_cmd,
4577 							pIocbOut);
4578 					break;
4579 				} else {
4580 					lpfc_printf_vlog(vport, KERN_WARNING,
4581 							LOG_BG,
4582 							"9031 non-zero BGSTAT "
4583 							"on unprotected cmd\n");
4584 				}
4585 			}
4586 			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4587 				&& (phba->sli_rev == LPFC_SLI_REV4)
4588 				&& pnode) {
4589 				/* This IO was aborted by the target, we don't
4590 				 * know the rxid and because we did not send the
4591 				 * ABTS we cannot generate and RRQ.
4592 				 */
4593 				lpfc_set_rrq_active(phba, pnode,
4594 					lpfc_cmd->cur_iocbq.sli4_lxritag,
4595 					0, 0);
4596 			}
4597 			fallthrough;
4598 		default:
4599 			cmd->result = DID_ERROR << 16;
4600 			break;
4601 		}
4602 
4603 		if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4604 			cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4605 				      SAM_STAT_BUSY;
4606 	} else
4607 		cmd->result = DID_OK << 16;
4608 
4609 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4610 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4611 
4612 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4613 				 "0710 Iodone <%d/%llu> cmd x%px, error "
4614 				 "x%x SNS x%x x%x Data: x%x x%x\n",
4615 				 cmd->device->id, cmd->device->lun, cmd,
4616 				 cmd->result, *lp, *(lp + 3), cmd->retries,
4617 				 scsi_get_resid(cmd));
4618 	}
4619 
4620 	lpfc_update_stats(vport, lpfc_cmd);
4621 	if (vport->cfg_max_scsicmpl_time &&
4622 	   time_after(jiffies, lpfc_cmd->start_time +
4623 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4624 		spin_lock_irqsave(shost->host_lock, flags);
4625 		if (pnode) {
4626 			if (pnode->cmd_qdepth >
4627 				atomic_read(&pnode->cmd_pending) &&
4628 				(atomic_read(&pnode->cmd_pending) >
4629 				LPFC_MIN_TGT_QDEPTH) &&
4630 				((cmd->cmnd[0] == READ_10) ||
4631 				(cmd->cmnd[0] == WRITE_10)))
4632 				pnode->cmd_qdepth =
4633 					atomic_read(&pnode->cmd_pending);
4634 
4635 			pnode->last_change_time = jiffies;
4636 		}
4637 		spin_unlock_irqrestore(shost->host_lock, flags);
4638 	}
4639 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4640 
4641 	lpfc_cmd->pCmd = NULL;
4642 	spin_unlock(&lpfc_cmd->buf_lock);
4643 
4644 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4645 	if (lpfc_cmd->ts_cmd_start) {
4646 		lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4647 		lpfc_cmd->ts_data_io = ktime_get_ns();
4648 		phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4649 		lpfc_io_ktime(phba, lpfc_cmd);
4650 	}
4651 #endif
4652 
4653 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4654 	scsi_done(cmd);
4655 
4656 	/*
4657 	 * If there is an abort thread waiting for command completion
4658 	 * wake up the thread.
4659 	 */
4660 	spin_lock(&lpfc_cmd->buf_lock);
4661 	lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4662 	if (lpfc_cmd->waitq)
4663 		wake_up(lpfc_cmd->waitq);
4664 	spin_unlock(&lpfc_cmd->buf_lock);
4665 
4666 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4667 }
4668 
4669 /**
4670  * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4671  * @vport: Pointer to vport object.
4672  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4673  * @tmo: timeout value for the IO
4674  *
4675  * Based on the data-direction of the command, initialize IOCB
4676  * in the I/O buffer. Fill in the IOCB fields which are independent
4677  * of the scsi buffer
4678  *
4679  * RETURNS 0 - SUCCESS,
4680  **/
lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)4681 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4682 				      struct lpfc_io_buf *lpfc_cmd,
4683 				      uint8_t tmo)
4684 {
4685 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4686 	struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4687 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4688 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4689 	struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4690 	int datadir = scsi_cmnd->sc_data_direction;
4691 	u32 fcpdl;
4692 
4693 	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4694 
4695 	/*
4696 	 * There are three possibilities here - use scatter-gather segment, use
4697 	 * the single mapping, or neither.  Start the lpfc command prep by
4698 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4699 	 * data bde entry.
4700 	 */
4701 	if (scsi_sg_count(scsi_cmnd)) {
4702 		if (datadir == DMA_TO_DEVICE) {
4703 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4704 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4705 			if (vport->cfg_first_burst_size &&
4706 			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4707 				u32 xrdy_len;
4708 
4709 				fcpdl = scsi_bufflen(scsi_cmnd);
4710 				xrdy_len = min(fcpdl,
4711 					       vport->cfg_first_burst_size);
4712 				piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4713 			}
4714 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4715 		} else {
4716 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4717 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4718 			fcp_cmnd->fcpCntl3 = READ_DATA;
4719 		}
4720 	} else {
4721 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4722 		iocb_cmd->un.fcpi.fcpi_parm = 0;
4723 		iocb_cmd->ulpPU = 0;
4724 		fcp_cmnd->fcpCntl3 = 0;
4725 	}
4726 
4727 	/*
4728 	 * Finish initializing those IOCB fields that are independent
4729 	 * of the scsi_cmnd request_buffer
4730 	 */
4731 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4732 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4733 		piocbq->iocb.ulpFCP2Rcvy = 1;
4734 	else
4735 		piocbq->iocb.ulpFCP2Rcvy = 0;
4736 
4737 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4738 	piocbq->io_buf  = lpfc_cmd;
4739 	if (!piocbq->cmd_cmpl)
4740 		piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4741 	piocbq->iocb.ulpTimeout = tmo;
4742 	piocbq->vport = vport;
4743 	return 0;
4744 }
4745 
4746 /**
4747  * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4748  * @vport: Pointer to vport object.
4749  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4750  * @tmo: timeout value for the IO
4751  *
4752  * Based on the data-direction of the command copy WQE template
4753  * to I/O buffer WQE. Fill in the WQE fields which are independent
4754  * of the scsi buffer
4755  *
4756  * RETURNS 0 - SUCCESS,
4757  **/
lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)4758 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4759 				      struct lpfc_io_buf *lpfc_cmd,
4760 				      uint8_t tmo)
4761 {
4762 	struct lpfc_hba *phba = vport->phba;
4763 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4764 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4765 	struct lpfc_sli4_hdw_queue *hdwq = NULL;
4766 	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4767 	struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4768 	union lpfc_wqe128 *wqe = &pwqeq->wqe;
4769 	u16 idx = lpfc_cmd->hdwq_no;
4770 	int datadir = scsi_cmnd->sc_data_direction;
4771 
4772 	hdwq = &phba->sli4_hba.hdwq[idx];
4773 
4774 	/* Initialize 64 bytes only */
4775 	memset(wqe, 0, sizeof(union lpfc_wqe128));
4776 
4777 	/*
4778 	 * There are three possibilities here - use scatter-gather segment, use
4779 	 * the single mapping, or neither.
4780 	 */
4781 	if (scsi_sg_count(scsi_cmnd)) {
4782 		if (datadir == DMA_TO_DEVICE) {
4783 			/* From the iwrite template, initialize words 7 -  11 */
4784 			memcpy(&wqe->words[7],
4785 			       &lpfc_iwrite_cmd_template.words[7],
4786 			       sizeof(uint32_t) * 5);
4787 
4788 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4789 			if (hdwq)
4790 				hdwq->scsi_cstat.output_requests++;
4791 		} else {
4792 			/* From the iread template, initialize words 7 - 11 */
4793 			memcpy(&wqe->words[7],
4794 			       &lpfc_iread_cmd_template.words[7],
4795 			       sizeof(uint32_t) * 5);
4796 
4797 			/* Word 7 */
4798 			bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4799 
4800 			fcp_cmnd->fcpCntl3 = READ_DATA;
4801 			if (hdwq)
4802 				hdwq->scsi_cstat.input_requests++;
4803 
4804 			/* For a CMF Managed port, iod must be zero'ed */
4805 			if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4806 				bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4807 				       LPFC_WQE_IOD_NONE);
4808 		}
4809 	} else {
4810 		/* From the icmnd template, initialize words 4 - 11 */
4811 		memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4812 		       sizeof(uint32_t) * 8);
4813 
4814 		/* Word 7 */
4815 		bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4816 
4817 		fcp_cmnd->fcpCntl3 = 0;
4818 		if (hdwq)
4819 			hdwq->scsi_cstat.control_requests++;
4820 	}
4821 
4822 	/*
4823 	 * Finish initializing those WQE fields that are independent
4824 	 * of the request_buffer
4825 	 */
4826 
4827 	 /* Word 3 */
4828 	bf_set(payload_offset_len, &wqe->fcp_icmd,
4829 	       sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4830 
4831 	/* Word 6 */
4832 	bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4833 	       phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4834 	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4835 
4836 	/* Word 7*/
4837 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4838 		bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4839 
4840 	bf_set(wqe_class, &wqe->generic.wqe_com,
4841 	       (pnode->nlp_fcp_info & 0x0f));
4842 
4843 	 /* Word 8 */
4844 	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4845 
4846 	/* Word 9 */
4847 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4848 
4849 	pwqeq->vport = vport;
4850 	pwqeq->io_buf = lpfc_cmd;
4851 	pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4852 	pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4853 
4854 	return 0;
4855 }
4856 
4857 /**
4858  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4859  * @vport: The virtual port for which this call is being executed.
4860  * @lpfc_cmd: The scsi command which needs to send.
4861  * @pnode: Pointer to lpfc_nodelist.
4862  *
4863  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4864  * to transfer for device with SLI3 interface spec.
4865  **/
4866 static int
lpfc_scsi_prep_cmnd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_nodelist * pnode)4867 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4868 		    struct lpfc_nodelist *pnode)
4869 {
4870 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4871 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4872 	u8 *ptr;
4873 
4874 	if (!pnode)
4875 		return 0;
4876 
4877 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4878 	/* clear task management bits */
4879 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4880 
4881 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4882 		       &lpfc_cmd->fcp_cmnd->fcp_lun);
4883 
4884 	ptr = &fcp_cmnd->fcpCdb[0];
4885 	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4886 	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4887 		ptr += scsi_cmnd->cmd_len;
4888 		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4889 	}
4890 
4891 	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4892 
4893 	lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4894 
4895 	return 0;
4896 }
4897 
4898 /**
4899  * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
4900  * @vport: The virtual port for which this call is being executed.
4901  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4902  * @lun: Logical unit number.
4903  * @task_mgmt_cmd: SCSI task management command.
4904  *
4905  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4906  * for device with SLI-3 interface spec.
4907  *
4908  * Return codes:
4909  *   0 - Error
4910  *   1 - Success
4911  **/
4912 static int
lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,u64 lun,u8 task_mgmt_cmd)4913 lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
4914 				struct lpfc_io_buf *lpfc_cmd,
4915 				u64 lun, u8 task_mgmt_cmd)
4916 {
4917 	struct lpfc_iocbq *piocbq;
4918 	IOCB_t *piocb;
4919 	struct fcp_cmnd *fcp_cmnd;
4920 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4921 	struct lpfc_nodelist *ndlp = rdata->pnode;
4922 
4923 	if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4924 		return 0;
4925 
4926 	piocbq = &(lpfc_cmd->cur_iocbq);
4927 	piocbq->vport = vport;
4928 
4929 	piocb = &piocbq->iocb;
4930 
4931 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4932 	/* Clear out any old data in the FCP command area */
4933 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4934 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4935 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4936 	if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4937 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4938 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4939 	piocb->ulpContext = ndlp->nlp_rpi;
4940 	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4941 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4942 	piocb->ulpPU = 0;
4943 	piocb->un.fcpi.fcpi_parm = 0;
4944 
4945 	/* ulpTimeout is only one byte */
4946 	if (lpfc_cmd->timeout > 0xff) {
4947 		/*
4948 		 * Do not timeout the command at the firmware level.
4949 		 * The driver will provide the timeout mechanism.
4950 		 */
4951 		piocb->ulpTimeout = 0;
4952 	} else
4953 		piocb->ulpTimeout = lpfc_cmd->timeout;
4954 
4955 	return 1;
4956 }
4957 
4958 /**
4959  * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
4960  * @vport: The virtual port for which this call is being executed.
4961  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4962  * @lun: Logical unit number.
4963  * @task_mgmt_cmd: SCSI task management command.
4964  *
4965  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4966  * for device with SLI-4 interface spec.
4967  *
4968  * Return codes:
4969  *   0 - Error
4970  *   1 - Success
4971  **/
4972 static int
lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,u64 lun,u8 task_mgmt_cmd)4973 lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
4974 				struct lpfc_io_buf *lpfc_cmd,
4975 				u64 lun, u8 task_mgmt_cmd)
4976 {
4977 	struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4978 	union lpfc_wqe128 *wqe = &pwqeq->wqe;
4979 	struct fcp_cmnd *fcp_cmnd;
4980 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4981 	struct lpfc_nodelist *ndlp = rdata->pnode;
4982 
4983 	if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4984 		return 0;
4985 
4986 	pwqeq->vport = vport;
4987 	/* Initialize 64 bytes only */
4988 	memset(wqe, 0, sizeof(union lpfc_wqe128));
4989 
4990 	/* From the icmnd template, initialize words 4 - 11 */
4991 	memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4992 	       sizeof(uint32_t) * 8);
4993 
4994 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4995 	/* Clear out any old data in the FCP command area */
4996 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4997 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4998 	fcp_cmnd->fcpCntl3 = 0;
4999 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
5000 
5001 	bf_set(payload_offset_len, &wqe->fcp_icmd,
5002 	       sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
5003 	bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
5004 	bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,  /* ulpContext */
5005 	       vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
5006 	bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
5007 	       ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
5008 	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
5009 	       (ndlp->nlp_fcp_info & 0x0f));
5010 
5011 	/* ulpTimeout is only one byte */
5012 	if (lpfc_cmd->timeout > 0xff) {
5013 		/*
5014 		 * Do not timeout the command at the firmware level.
5015 		 * The driver will provide the timeout mechanism.
5016 		 */
5017 		bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
5018 	} else {
5019 		bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
5020 	}
5021 
5022 	lpfc_prep_embed_io(vport->phba, lpfc_cmd);
5023 	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
5024 	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
5025 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
5026 
5027 	lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
5028 
5029 	return 1;
5030 }
5031 
5032 /**
5033  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
5034  * @phba: The hba struct for which this call is being executed.
5035  * @dev_grp: The HBA PCI-Device group number.
5036  *
5037  * This routine sets up the SCSI interface API function jump table in @phba
5038  * struct.
5039  * Returns: 0 - success, -ENODEV - failure.
5040  **/
5041 int
lpfc_scsi_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)5042 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5043 {
5044 
5045 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
5046 
5047 	switch (dev_grp) {
5048 	case LPFC_PCI_DEV_LP:
5049 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
5050 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
5051 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
5052 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
5053 		phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
5054 		phba->lpfc_scsi_prep_task_mgmt_cmd =
5055 					lpfc_scsi_prep_task_mgmt_cmd_s3;
5056 		break;
5057 	case LPFC_PCI_DEV_OC:
5058 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
5059 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
5060 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
5061 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
5062 		phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
5063 		phba->lpfc_scsi_prep_task_mgmt_cmd =
5064 					lpfc_scsi_prep_task_mgmt_cmd_s4;
5065 		break;
5066 	default:
5067 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5068 				"1418 Invalid HBA PCI-device group: 0x%x\n",
5069 				dev_grp);
5070 		return -ENODEV;
5071 	}
5072 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5073 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
5074 	return 0;
5075 }
5076 
5077 /**
5078  * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
5079  * @phba: The Hba for which this call is being executed.
5080  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
5081  * @rspiocbq: Pointer to lpfc_iocbq data structure.
5082  *
5083  * This routine is IOCB completion routine for device reset and target reset
5084  * routine. This routine release scsi buffer associated with lpfc_cmd.
5085  **/
5086 static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)5087 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5088 			struct lpfc_iocbq *cmdiocbq,
5089 			struct lpfc_iocbq *rspiocbq)
5090 {
5091 	struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
5092 	if (lpfc_cmd)
5093 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5094 	return;
5095 }
5096 
5097 /**
5098  * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5099  *                             if issuing a pci_bus_reset is possibly unsafe
5100  * @phba: lpfc_hba pointer.
5101  *
5102  * Description:
5103  * Walks the bus_list to ensure only PCI devices with Emulex
5104  * vendor id, device ids that support hot reset, and only one occurrence
5105  * of function 0.
5106  *
5107  * Returns:
5108  * -EBADSLT,  detected invalid device
5109  *      0,    successful
5110  */
5111 int
lpfc_check_pci_resettable(struct lpfc_hba * phba)5112 lpfc_check_pci_resettable(struct lpfc_hba *phba)
5113 {
5114 	const struct pci_dev *pdev = phba->pcidev;
5115 	struct pci_dev *ptr = NULL;
5116 	u8 counter = 0;
5117 
5118 	/* Walk the list of devices on the pci_dev's bus */
5119 	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5120 		/* Check for Emulex Vendor ID */
5121 		if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5122 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5123 					"8346 Non-Emulex vendor found: "
5124 					"0x%04x\n", ptr->vendor);
5125 			return -EBADSLT;
5126 		}
5127 
5128 		/* Check for valid Emulex Device ID */
5129 		if (phba->sli_rev != LPFC_SLI_REV4 ||
5130 		    phba->hba_flag & HBA_FCOE_MODE) {
5131 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5132 					"8347 Incapable PCI reset device: "
5133 					"0x%04x\n", ptr->device);
5134 			return -EBADSLT;
5135 		}
5136 
5137 		/* Check for only one function 0 ID to ensure only one HBA on
5138 		 * secondary bus
5139 		 */
5140 		if (ptr->devfn == 0) {
5141 			if (++counter > 1) {
5142 				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5143 						"8348 More than one device on "
5144 						"secondary bus found\n");
5145 				return -EBADSLT;
5146 			}
5147 		}
5148 	}
5149 
5150 	return 0;
5151 }
5152 
5153 /**
5154  * lpfc_info - Info entry point of scsi_host_template data structure
5155  * @host: The scsi host for which this call is being executed.
5156  *
5157  * This routine provides module information about hba.
5158  *
5159  * Reutrn code:
5160  *   Pointer to char - Success.
5161  **/
5162 const char *
lpfc_info(struct Scsi_Host * host)5163 lpfc_info(struct Scsi_Host *host)
5164 {
5165 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5166 	struct lpfc_hba   *phba = vport->phba;
5167 	int link_speed = 0;
5168 	static char lpfcinfobuf[384];
5169 	char tmp[384] = {0};
5170 
5171 	memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5172 	if (phba && phba->pcidev){
5173 		/* Model Description */
5174 		scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5175 		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5176 		    sizeof(lpfcinfobuf))
5177 			goto buffer_done;
5178 
5179 		/* PCI Info */
5180 		scnprintf(tmp, sizeof(tmp),
5181 			  " on PCI bus %02x device %02x irq %d",
5182 			  phba->pcidev->bus->number, phba->pcidev->devfn,
5183 			  phba->pcidev->irq);
5184 		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5185 		    sizeof(lpfcinfobuf))
5186 			goto buffer_done;
5187 
5188 		/* Port Number */
5189 		if (phba->Port[0]) {
5190 			scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5191 			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5192 			    sizeof(lpfcinfobuf))
5193 				goto buffer_done;
5194 		}
5195 
5196 		/* Link Speed */
5197 		link_speed = lpfc_sli_port_speed_get(phba);
5198 		if (link_speed != 0) {
5199 			scnprintf(tmp, sizeof(tmp),
5200 				  " Logical Link Speed: %d Mbps", link_speed);
5201 			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5202 			    sizeof(lpfcinfobuf))
5203 				goto buffer_done;
5204 		}
5205 
5206 		/* PCI resettable */
5207 		if (!lpfc_check_pci_resettable(phba)) {
5208 			scnprintf(tmp, sizeof(tmp), " PCI resettable");
5209 			strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5210 		}
5211 	}
5212 
5213 buffer_done:
5214 	return lpfcinfobuf;
5215 }
5216 
5217 /**
5218  * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
5219  * @phba: The Hba for which this call is being executed.
5220  *
5221  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
5222  * The default value of cfg_poll_tmo is 10 milliseconds.
5223  **/
lpfc_poll_rearm_timer(struct lpfc_hba * phba)5224 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5225 {
5226 	unsigned long  poll_tmo_expires =
5227 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5228 
5229 	if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5230 		mod_timer(&phba->fcp_poll_timer,
5231 			  poll_tmo_expires);
5232 }
5233 
5234 /**
5235  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5236  * @phba: The Hba for which this call is being executed.
5237  *
5238  * This routine starts the fcp_poll_timer of @phba.
5239  **/
lpfc_poll_start_timer(struct lpfc_hba * phba)5240 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5241 {
5242 	lpfc_poll_rearm_timer(phba);
5243 }
5244 
5245 /**
5246  * lpfc_poll_timeout - Restart polling timer
5247  * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5248  *
5249  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
5250  * and FCP Ring interrupt is disable.
5251  **/
lpfc_poll_timeout(struct timer_list * t)5252 void lpfc_poll_timeout(struct timer_list *t)
5253 {
5254 	struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5255 
5256 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5257 		lpfc_sli_handle_fast_ring_event(phba,
5258 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5259 
5260 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5261 			lpfc_poll_rearm_timer(phba);
5262 	}
5263 }
5264 
5265 /*
5266  * lpfc_is_command_vm_io - get the UUID from blk cgroup
5267  * @cmd: Pointer to scsi_cmnd data structure
5268  * Returns UUID if present, otherwise NULL
5269  */
lpfc_is_command_vm_io(struct scsi_cmnd * cmd)5270 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5271 {
5272 	struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
5273 
5274 	if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
5275 		return NULL;
5276 	return blkcg_get_fc_appid(bio);
5277 }
5278 
5279 /**
5280  * lpfc_queuecommand - scsi_host_template queuecommand entry point
5281  * @shost: kernel scsi host pointer.
5282  * @cmnd: Pointer to scsi_cmnd data structure.
5283  *
5284  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5285  * This routine prepares an IOCB from scsi command and provides to firmware.
5286  * The @done callback is invoked after driver finished processing the command.
5287  *
5288  * Return value :
5289  *   0 - Success
5290  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5291  **/
5292 static int
lpfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)5293 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5294 {
5295 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5296 	struct lpfc_hba   *phba = vport->phba;
5297 	struct lpfc_iocbq *cur_iocbq = NULL;
5298 	struct lpfc_rport_data *rdata;
5299 	struct lpfc_nodelist *ndlp;
5300 	struct lpfc_io_buf *lpfc_cmd;
5301 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5302 	int err, idx;
5303 	u8 *uuid = NULL;
5304 	uint64_t start;
5305 
5306 	start = ktime_get_ns();
5307 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5308 
5309 	/* sanity check on references */
5310 	if (unlikely(!rdata) || unlikely(!rport))
5311 		goto out_fail_command;
5312 
5313 	err = fc_remote_port_chkready(rport);
5314 	if (err) {
5315 		cmnd->result = err;
5316 		goto out_fail_command;
5317 	}
5318 	ndlp = rdata->pnode;
5319 
5320 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5321 		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5322 
5323 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5324 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5325 				" op:%02x str=%s without registering for"
5326 				" BlockGuard - Rejecting command\n",
5327 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5328 				dif_op_str[scsi_get_prot_op(cmnd)]);
5329 		goto out_fail_command;
5330 	}
5331 
5332 	/*
5333 	 * Catch race where our node has transitioned, but the
5334 	 * transport is still transitioning.
5335 	 */
5336 	if (!ndlp)
5337 		goto out_tgt_busy1;
5338 
5339 	/* Check if IO qualifies for CMF */
5340 	if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5341 	    cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5342 	    (scsi_sg_count(cmnd))) {
5343 		/* Latency start time saved in rx_cmd_start later in routine */
5344 		err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5345 		if (err)
5346 			goto out_tgt_busy1;
5347 	}
5348 
5349 	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5350 		if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5351 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5352 					 "3377 Target Queue Full, scsi Id:%d "
5353 					 "Qdepth:%d Pending command:%d"
5354 					 " WWNN:%02x:%02x:%02x:%02x:"
5355 					 "%02x:%02x:%02x:%02x, "
5356 					 " WWPN:%02x:%02x:%02x:%02x:"
5357 					 "%02x:%02x:%02x:%02x",
5358 					 ndlp->nlp_sid, ndlp->cmd_qdepth,
5359 					 atomic_read(&ndlp->cmd_pending),
5360 					 ndlp->nlp_nodename.u.wwn[0],
5361 					 ndlp->nlp_nodename.u.wwn[1],
5362 					 ndlp->nlp_nodename.u.wwn[2],
5363 					 ndlp->nlp_nodename.u.wwn[3],
5364 					 ndlp->nlp_nodename.u.wwn[4],
5365 					 ndlp->nlp_nodename.u.wwn[5],
5366 					 ndlp->nlp_nodename.u.wwn[6],
5367 					 ndlp->nlp_nodename.u.wwn[7],
5368 					 ndlp->nlp_portname.u.wwn[0],
5369 					 ndlp->nlp_portname.u.wwn[1],
5370 					 ndlp->nlp_portname.u.wwn[2],
5371 					 ndlp->nlp_portname.u.wwn[3],
5372 					 ndlp->nlp_portname.u.wwn[4],
5373 					 ndlp->nlp_portname.u.wwn[5],
5374 					 ndlp->nlp_portname.u.wwn[6],
5375 					 ndlp->nlp_portname.u.wwn[7]);
5376 			goto out_tgt_busy2;
5377 		}
5378 	}
5379 
5380 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5381 	if (lpfc_cmd == NULL) {
5382 		lpfc_rampdown_queue_depth(phba);
5383 
5384 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5385 				 "0707 driver's buffer pool is empty, "
5386 				 "IO busied\n");
5387 		goto out_host_busy;
5388 	}
5389 	lpfc_cmd->rx_cmd_start = start;
5390 
5391 	cur_iocbq = &lpfc_cmd->cur_iocbq;
5392 	/*
5393 	 * Store the midlayer's command structure for the completion phase
5394 	 * and complete the command initialization.
5395 	 */
5396 	lpfc_cmd->pCmd  = cmnd;
5397 	lpfc_cmd->rdata = rdata;
5398 	lpfc_cmd->ndlp = ndlp;
5399 	cur_iocbq->cmd_cmpl = NULL;
5400 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5401 
5402 	err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5403 	if (err)
5404 		goto out_host_busy_release_buf;
5405 
5406 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5407 		if (vport->phba->cfg_enable_bg) {
5408 			lpfc_printf_vlog(vport,
5409 					 KERN_INFO, LOG_SCSI_CMD,
5410 					 "9033 BLKGRD: rcvd %s cmd:x%x "
5411 					 "reftag x%x cnt %u pt %x\n",
5412 					 dif_op_str[scsi_get_prot_op(cmnd)],
5413 					 cmnd->cmnd[0],
5414 					 scsi_prot_ref_tag(cmnd),
5415 					 scsi_logical_block_count(cmnd),
5416 					 (cmnd->cmnd[1]>>5));
5417 		}
5418 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5419 	} else {
5420 		if (vport->phba->cfg_enable_bg) {
5421 			lpfc_printf_vlog(vport,
5422 					 KERN_INFO, LOG_SCSI_CMD,
5423 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5424 					 "x%x reftag x%x cnt %u pt %x\n",
5425 					 cmnd->cmnd[0],
5426 					 scsi_prot_ref_tag(cmnd),
5427 					 scsi_logical_block_count(cmnd),
5428 					 (cmnd->cmnd[1]>>5));
5429 		}
5430 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5431 	}
5432 
5433 	if (unlikely(err)) {
5434 		if (err == 2) {
5435 			cmnd->result = DID_ERROR << 16;
5436 			goto out_fail_command_release_buf;
5437 		}
5438 		goto out_host_busy_free_buf;
5439 	}
5440 
5441 	/* check the necessary and sufficient condition to support VMID */
5442 	if (lpfc_is_vmid_enabled(phba) &&
5443 	    (ndlp->vmid_support ||
5444 	     phba->pport->vmid_priority_tagging ==
5445 	     LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5446 		/* is the I/O generated by a VM, get the associated virtual */
5447 		/* entity id */
5448 		uuid = lpfc_is_command_vm_io(cmnd);
5449 
5450 		if (uuid) {
5451 			err = lpfc_vmid_get_appid(vport, uuid,
5452 					cmnd->sc_data_direction,
5453 					(union lpfc_vmid_io_tag *)
5454 						&cur_iocbq->vmid_tag);
5455 			if (!err)
5456 				cur_iocbq->cmd_flag |= LPFC_IO_VMID;
5457 		}
5458 	}
5459 
5460 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5461 	if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5462 		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5463 #endif
5464 	/* Issue I/O to adapter */
5465 	err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
5466 				    SLI_IOCB_RET_IOCB);
5467 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5468 	if (start) {
5469 		lpfc_cmd->ts_cmd_start = start;
5470 		lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5471 		lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5472 	} else {
5473 		lpfc_cmd->ts_cmd_start = 0;
5474 	}
5475 #endif
5476 	if (err) {
5477 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5478 				 "3376 FCP could not issue iocb err %x "
5479 				 "FCP cmd x%x <%d/%llu> "
5480 				 "sid: x%x did: x%x oxid: x%x "
5481 				 "Data: x%x x%x x%x x%x\n",
5482 				 err, cmnd->cmnd[0],
5483 				 cmnd->device ? cmnd->device->id : 0xffff,
5484 				 cmnd->device ? cmnd->device->lun : (u64)-1,
5485 				 vport->fc_myDID, ndlp->nlp_DID,
5486 				 phba->sli_rev == LPFC_SLI_REV4 ?
5487 				 cur_iocbq->sli4_xritag : 0xffff,
5488 				 phba->sli_rev == LPFC_SLI_REV4 ?
5489 				 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5490 				 cur_iocbq->iocb.ulpContext,
5491 				 cur_iocbq->iotag,
5492 				 phba->sli_rev == LPFC_SLI_REV4 ?
5493 				 bf_get(wqe_tmo,
5494 					&cur_iocbq->wqe.generic.wqe_com) :
5495 				 cur_iocbq->iocb.ulpTimeout,
5496 				 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
5497 
5498 		goto out_host_busy_free_buf;
5499 	}
5500 
5501 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5502 		lpfc_sli_handle_fast_ring_event(phba,
5503 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5504 
5505 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5506 			lpfc_poll_rearm_timer(phba);
5507 	}
5508 
5509 	if (phba->cfg_xri_rebalancing)
5510 		lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5511 
5512 	return 0;
5513 
5514  out_host_busy_free_buf:
5515 	idx = lpfc_cmd->hdwq_no;
5516 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5517 	if (phba->sli4_hba.hdwq) {
5518 		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5519 		case WRITE_DATA:
5520 			phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5521 			break;
5522 		case READ_DATA:
5523 			phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5524 			break;
5525 		default:
5526 			phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5527 		}
5528 	}
5529  out_host_busy_release_buf:
5530 	lpfc_release_scsi_buf(phba, lpfc_cmd);
5531  out_host_busy:
5532 	lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5533 			     shost);
5534 	return SCSI_MLQUEUE_HOST_BUSY;
5535 
5536  out_tgt_busy2:
5537 	lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5538 			     shost);
5539  out_tgt_busy1:
5540 	return SCSI_MLQUEUE_TARGET_BUSY;
5541 
5542  out_fail_command_release_buf:
5543 	lpfc_release_scsi_buf(phba, lpfc_cmd);
5544 	lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5545 			     shost);
5546 
5547  out_fail_command:
5548 	scsi_done(cmnd);
5549 	return 0;
5550 }
5551 
5552 /*
5553  * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5554  * @vport: The virtual port for which this call is being executed.
5555  */
lpfc_vmid_vport_cleanup(struct lpfc_vport * vport)5556 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5557 {
5558 	u32 bucket;
5559 	struct lpfc_vmid *cur;
5560 
5561 	if (vport->port_type == LPFC_PHYSICAL_PORT)
5562 		del_timer_sync(&vport->phba->inactive_vmid_poll);
5563 
5564 	kfree(vport->qfpa_res);
5565 	kfree(vport->vmid_priority.vmid_range);
5566 	kfree(vport->vmid);
5567 
5568 	if (!hash_empty(vport->hash_table))
5569 		hash_for_each(vport->hash_table, bucket, cur, hnode)
5570 			hash_del(&cur->hnode);
5571 
5572 	vport->qfpa_res = NULL;
5573 	vport->vmid_priority.vmid_range = NULL;
5574 	vport->vmid = NULL;
5575 	vport->cur_vmid_cnt = 0;
5576 }
5577 
5578 /**
5579  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5580  * @cmnd: Pointer to scsi_cmnd data structure.
5581  *
5582  * This routine aborts @cmnd pending in base driver.
5583  *
5584  * Return code :
5585  *   0x2003 - Error
5586  *   0x2002 - Success
5587  **/
5588 static int
lpfc_abort_handler(struct scsi_cmnd * cmnd)5589 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5590 {
5591 	struct Scsi_Host  *shost = cmnd->device->host;
5592 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5593 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5594 	struct lpfc_hba   *phba = vport->phba;
5595 	struct lpfc_iocbq *iocb;
5596 	struct lpfc_io_buf *lpfc_cmd;
5597 	int ret = SUCCESS, status = 0;
5598 	struct lpfc_sli_ring *pring_s4 = NULL;
5599 	struct lpfc_sli_ring *pring = NULL;
5600 	int ret_val;
5601 	unsigned long flags;
5602 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5603 
5604 	status = fc_block_rport(rport);
5605 	if (status != 0 && status != SUCCESS)
5606 		return status;
5607 
5608 	lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5609 	if (!lpfc_cmd)
5610 		return ret;
5611 
5612 	/* Guard against IO completion being called at same time */
5613 	spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
5614 
5615 	spin_lock(&phba->hbalock);
5616 	/* driver queued commands are in process of being flushed */
5617 	if (phba->hba_flag & HBA_IOQ_FLUSH) {
5618 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5619 			"3168 SCSI Layer abort requested I/O has been "
5620 			"flushed by LLD.\n");
5621 		ret = FAILED;
5622 		goto out_unlock_hba;
5623 	}
5624 
5625 	if (!lpfc_cmd->pCmd) {
5626 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5627 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5628 			 "x%x ID %d LUN %llu\n",
5629 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
5630 		goto out_unlock_hba;
5631 	}
5632 
5633 	iocb = &lpfc_cmd->cur_iocbq;
5634 	if (phba->sli_rev == LPFC_SLI_REV4) {
5635 		pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5636 		if (!pring_s4) {
5637 			ret = FAILED;
5638 			goto out_unlock_hba;
5639 		}
5640 		spin_lock(&pring_s4->ring_lock);
5641 	}
5642 	/* the command is in process of being cancelled */
5643 	if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
5644 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5645 			"3169 SCSI Layer abort requested I/O has been "
5646 			"cancelled by LLD.\n");
5647 		ret = FAILED;
5648 		goto out_unlock_ring;
5649 	}
5650 	/*
5651 	 * If pCmd field of the corresponding lpfc_io_buf structure
5652 	 * points to a different SCSI command, then the driver has
5653 	 * already completed this command, but the midlayer did not
5654 	 * see the completion before the eh fired. Just return SUCCESS.
5655 	 */
5656 	if (lpfc_cmd->pCmd != cmnd) {
5657 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5658 			"3170 SCSI Layer abort requested I/O has been "
5659 			"completed by LLD.\n");
5660 		goto out_unlock_ring;
5661 	}
5662 
5663 	WARN_ON(iocb->io_buf != lpfc_cmd);
5664 
5665 	/* abort issued in recovery is still in progress */
5666 	if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
5667 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5668 			 "3389 SCSI Layer I/O Abort Request is pending\n");
5669 		if (phba->sli_rev == LPFC_SLI_REV4)
5670 			spin_unlock(&pring_s4->ring_lock);
5671 		spin_unlock(&phba->hbalock);
5672 		spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5673 		goto wait_for_cmpl;
5674 	}
5675 
5676 	lpfc_cmd->waitq = &waitq;
5677 	if (phba->sli_rev == LPFC_SLI_REV4) {
5678 		spin_unlock(&pring_s4->ring_lock);
5679 		ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5680 						      lpfc_sli_abort_fcp_cmpl);
5681 	} else {
5682 		pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5683 		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5684 						     lpfc_sli_abort_fcp_cmpl);
5685 	}
5686 
5687 	/* Make sure HBA is alive */
5688 	lpfc_issue_hb_tmo(phba);
5689 
5690 	if (ret_val != IOCB_SUCCESS) {
5691 		/* Indicate the IO is not being aborted by the driver. */
5692 		lpfc_cmd->waitq = NULL;
5693 		ret = FAILED;
5694 		goto out_unlock_hba;
5695 	}
5696 
5697 	/* no longer need the lock after this point */
5698 	spin_unlock(&phba->hbalock);
5699 	spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5700 
5701 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5702 		lpfc_sli_handle_fast_ring_event(phba,
5703 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5704 
5705 wait_for_cmpl:
5706 	/*
5707 	 * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
5708 	 * for abort to complete.
5709 	 */
5710 	wait_event_timeout(waitq,
5711 			  (lpfc_cmd->pCmd != cmnd),
5712 			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5713 
5714 	spin_lock(&lpfc_cmd->buf_lock);
5715 
5716 	if (lpfc_cmd->pCmd == cmnd) {
5717 		ret = FAILED;
5718 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5719 				 "0748 abort handler timed out waiting "
5720 				 "for aborting I/O (xri:x%x) to complete: "
5721 				 "ret %#x, ID %d, LUN %llu\n",
5722 				 iocb->sli4_xritag, ret,
5723 				 cmnd->device->id, cmnd->device->lun);
5724 	}
5725 
5726 	lpfc_cmd->waitq = NULL;
5727 
5728 	spin_unlock(&lpfc_cmd->buf_lock);
5729 	goto out;
5730 
5731 out_unlock_ring:
5732 	if (phba->sli_rev == LPFC_SLI_REV4)
5733 		spin_unlock(&pring_s4->ring_lock);
5734 out_unlock_hba:
5735 	spin_unlock(&phba->hbalock);
5736 	spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5737 out:
5738 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5739 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5740 			 "LUN %llu\n", ret, cmnd->device->id,
5741 			 cmnd->device->lun);
5742 	return ret;
5743 }
5744 
5745 static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)5746 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5747 {
5748 	switch (task_mgmt_cmd) {
5749 	case FCP_ABORT_TASK_SET:
5750 		return "ABORT_TASK_SET";
5751 	case FCP_CLEAR_TASK_SET:
5752 		return "FCP_CLEAR_TASK_SET";
5753 	case FCP_BUS_RESET:
5754 		return "FCP_BUS_RESET";
5755 	case FCP_LUN_RESET:
5756 		return "FCP_LUN_RESET";
5757 	case FCP_TARGET_RESET:
5758 		return "FCP_TARGET_RESET";
5759 	case FCP_CLEAR_ACA:
5760 		return "FCP_CLEAR_ACA";
5761 	case FCP_TERMINATE_TASK:
5762 		return "FCP_TERMINATE_TASK";
5763 	default:
5764 		return "unknown";
5765 	}
5766 }
5767 
5768 
5769 /**
5770  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5771  * @vport: The virtual port for which this call is being executed.
5772  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5773  *
5774  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5775  *
5776  * Return code :
5777  *   0x2003 - Error
5778  *   0x2002 - Success
5779  **/
5780 static int
lpfc_check_fcp_rsp(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)5781 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5782 {
5783 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5784 	uint32_t rsp_info;
5785 	uint32_t rsp_len;
5786 	uint8_t  rsp_info_code;
5787 	int ret = FAILED;
5788 
5789 
5790 	if (fcprsp == NULL)
5791 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5792 				 "0703 fcp_rsp is missing\n");
5793 	else {
5794 		rsp_info = fcprsp->rspStatus2;
5795 		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5796 		rsp_info_code = fcprsp->rspInfo3;
5797 
5798 
5799 		lpfc_printf_vlog(vport, KERN_INFO,
5800 				 LOG_FCP,
5801 				 "0706 fcp_rsp valid 0x%x,"
5802 				 " rsp len=%d code 0x%x\n",
5803 				 rsp_info,
5804 				 rsp_len, rsp_info_code);
5805 
5806 		/* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5807 		 * field specifies the number of valid bytes of FCP_RSP_INFO.
5808 		 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5809 		 */
5810 		if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5811 		    ((rsp_len == 8) || (rsp_len == 4))) {
5812 			switch (rsp_info_code) {
5813 			case RSP_NO_FAILURE:
5814 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5815 						 "0715 Task Mgmt No Failure\n");
5816 				ret = SUCCESS;
5817 				break;
5818 			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5819 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5820 						 "0716 Task Mgmt Target "
5821 						"reject\n");
5822 				break;
5823 			case RSP_TM_NOT_COMPLETED: /* TM failed */
5824 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5825 						 "0717 Task Mgmt Target "
5826 						"failed TM\n");
5827 				break;
5828 			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5829 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5830 						 "0718 Task Mgmt to invalid "
5831 						"LUN\n");
5832 				break;
5833 			}
5834 		}
5835 	}
5836 	return ret;
5837 }
5838 
5839 
5840 /**
5841  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5842  * @vport: The virtual port for which this call is being executed.
5843  * @rport: Pointer to remote port
5844  * @tgt_id: Target ID of remote device.
5845  * @lun_id: Lun number for the TMF
5846  * @task_mgmt_cmd: type of TMF to send
5847  *
5848  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5849  * a remote port.
5850  *
5851  * Return Code:
5852  *   0x2003 - Error
5853  *   0x2002 - Success.
5854  **/
5855 static int
lpfc_send_taskmgmt(struct lpfc_vport * vport,struct fc_rport * rport,unsigned int tgt_id,uint64_t lun_id,uint8_t task_mgmt_cmd)5856 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
5857 		   unsigned int tgt_id, uint64_t lun_id,
5858 		   uint8_t task_mgmt_cmd)
5859 {
5860 	struct lpfc_hba   *phba = vport->phba;
5861 	struct lpfc_io_buf *lpfc_cmd;
5862 	struct lpfc_iocbq *iocbq;
5863 	struct lpfc_iocbq *iocbqrsp;
5864 	struct lpfc_rport_data *rdata;
5865 	struct lpfc_nodelist *pnode;
5866 	int ret;
5867 	int status;
5868 
5869 	rdata = rport->dd_data;
5870 	if (!rdata || !rdata->pnode)
5871 		return FAILED;
5872 	pnode = rdata->pnode;
5873 
5874 	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
5875 	if (lpfc_cmd == NULL)
5876 		return FAILED;
5877 	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5878 	lpfc_cmd->rdata = rdata;
5879 	lpfc_cmd->pCmd = NULL;
5880 	lpfc_cmd->ndlp = pnode;
5881 
5882 	status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5883 						    task_mgmt_cmd);
5884 	if (!status) {
5885 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5886 		return FAILED;
5887 	}
5888 
5889 	iocbq = &lpfc_cmd->cur_iocbq;
5890 	iocbqrsp = lpfc_sli_get_iocbq(phba);
5891 	if (iocbqrsp == NULL) {
5892 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5893 		return FAILED;
5894 	}
5895 	iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
5896 	iocbq->vport = vport;
5897 
5898 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5899 			 "0702 Issue %s to TGT %d LUN %llu "
5900 			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5901 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5902 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5903 			 iocbq->cmd_flag);
5904 
5905 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5906 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5907 	if ((status != IOCB_SUCCESS) ||
5908 	    (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
5909 		if (status != IOCB_SUCCESS ||
5910 		    get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
5911 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5912 					 "0727 TMF %s to TGT %d LUN %llu "
5913 					 "failed (%d, %d) cmd_flag x%x\n",
5914 					 lpfc_taskmgmt_name(task_mgmt_cmd),
5915 					 tgt_id, lun_id,
5916 					 get_job_ulpstatus(phba, iocbqrsp),
5917 					 get_job_word4(phba, iocbqrsp),
5918 					 iocbq->cmd_flag);
5919 		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5920 		if (status == IOCB_SUCCESS) {
5921 			if (get_job_ulpstatus(phba, iocbqrsp) ==
5922 			    IOSTAT_FCP_RSP_ERROR)
5923 				/* Something in the FCP_RSP was invalid.
5924 				 * Check conditions */
5925 				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5926 			else
5927 				ret = FAILED;
5928 		} else if ((status == IOCB_TIMEDOUT) ||
5929 			   (status == IOCB_ABORTED)) {
5930 			ret = TIMEOUT_ERROR;
5931 		} else {
5932 			ret = FAILED;
5933 		}
5934 	} else
5935 		ret = SUCCESS;
5936 
5937 	lpfc_sli_release_iocbq(phba, iocbqrsp);
5938 
5939 	if (status != IOCB_TIMEDOUT)
5940 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5941 
5942 	return ret;
5943 }
5944 
5945 /**
5946  * lpfc_chk_tgt_mapped -
5947  * @vport: The virtual port to check on
5948  * @rport: Pointer to fc_rport data structure.
5949  *
5950  * This routine delays until the scsi target (aka rport) for the
5951  * command exists (is present and logged in) or we declare it non-existent.
5952  *
5953  * Return code :
5954  *  0x2003 - Error
5955  *  0x2002 - Success
5956  **/
5957 static int
lpfc_chk_tgt_mapped(struct lpfc_vport * vport,struct fc_rport * rport)5958 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
5959 {
5960 	struct lpfc_rport_data *rdata;
5961 	struct lpfc_nodelist *pnode = NULL;
5962 	unsigned long later;
5963 
5964 	rdata = rport->dd_data;
5965 	if (!rdata) {
5966 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5967 			"0797 Tgt Map rport failure: rdata x%px\n", rdata);
5968 		return FAILED;
5969 	}
5970 	pnode = rdata->pnode;
5971 
5972 	/*
5973 	 * If target is not in a MAPPED state, delay until
5974 	 * target is rediscovered or devloss timeout expires.
5975 	 */
5976 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5977 	while (time_after(later, jiffies)) {
5978 		if (!pnode)
5979 			return FAILED;
5980 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5981 			return SUCCESS;
5982 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5983 		rdata = rport->dd_data;
5984 		if (!rdata)
5985 			return FAILED;
5986 		pnode = rdata->pnode;
5987 	}
5988 	if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5989 		return FAILED;
5990 	return SUCCESS;
5991 }
5992 
5993 /**
5994  * lpfc_reset_flush_io_context -
5995  * @vport: The virtual port (scsi_host) for the flush context
5996  * @tgt_id: If aborting by Target contect - specifies the target id
5997  * @lun_id: If aborting by Lun context - specifies the lun id
5998  * @context: specifies the context level to flush at.
5999  *
6000  * After a reset condition via TMF, we need to flush orphaned i/o
6001  * contexts from the adapter. This routine aborts any contexts
6002  * outstanding, then waits for their completions. The wait is
6003  * bounded by devloss_tmo though.
6004  *
6005  * Return code :
6006  *  0x2003 - Error
6007  *  0x2002 - Success
6008  **/
6009 static int
lpfc_reset_flush_io_context(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd context)6010 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6011 			uint64_t lun_id, lpfc_ctx_cmd context)
6012 {
6013 	struct lpfc_hba   *phba = vport->phba;
6014 	unsigned long later;
6015 	int cnt;
6016 
6017 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6018 	if (cnt)
6019 		lpfc_sli_abort_taskmgmt(vport,
6020 					&phba->sli.sli3_ring[LPFC_FCP_RING],
6021 					tgt_id, lun_id, context);
6022 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6023 	while (time_after(later, jiffies) && cnt) {
6024 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
6025 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6026 	}
6027 	if (cnt) {
6028 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6029 			"0724 I/O flush failure for context %s : cnt x%x\n",
6030 			((context == LPFC_CTX_LUN) ? "LUN" :
6031 			 ((context == LPFC_CTX_TGT) ? "TGT" :
6032 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6033 			cnt);
6034 		return FAILED;
6035 	}
6036 	return SUCCESS;
6037 }
6038 
6039 /**
6040  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
6041  * @cmnd: Pointer to scsi_cmnd data structure.
6042  *
6043  * This routine does a device reset by sending a LUN_RESET task management
6044  * command.
6045  *
6046  * Return code :
6047  *  0x2003 - Error
6048  *  0x2002 - Success
6049  **/
6050 static int
lpfc_device_reset_handler(struct scsi_cmnd * cmnd)6051 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6052 {
6053 	struct Scsi_Host  *shost = cmnd->device->host;
6054 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6055 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6056 	struct lpfc_rport_data *rdata;
6057 	struct lpfc_nodelist *pnode;
6058 	unsigned tgt_id = cmnd->device->id;
6059 	uint64_t lun_id = cmnd->device->lun;
6060 	struct lpfc_scsi_event_header scsi_event;
6061 	int status;
6062 	u32 logit = LOG_FCP;
6063 
6064 	if (!rport)
6065 		return FAILED;
6066 
6067 	rdata = rport->dd_data;
6068 	if (!rdata || !rdata->pnode) {
6069 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6070 				 "0798 Device Reset rdata failure: rdata x%px\n",
6071 				 rdata);
6072 		return FAILED;
6073 	}
6074 	pnode = rdata->pnode;
6075 	status = fc_block_rport(rport);
6076 	if (status != 0 && status != SUCCESS)
6077 		return status;
6078 
6079 	status = lpfc_chk_tgt_mapped(vport, rport);
6080 	if (status == FAILED) {
6081 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6082 			"0721 Device Reset rport failure: rdata x%px\n", rdata);
6083 		return FAILED;
6084 	}
6085 
6086 	scsi_event.event_type = FC_REG_SCSI_EVENT;
6087 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6088 	scsi_event.lun = lun_id;
6089 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6090 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6091 
6092 	fc_host_post_vendor_event(shost, fc_get_event_number(),
6093 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6094 
6095 	status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6096 						FCP_LUN_RESET);
6097 	if (status != SUCCESS)
6098 		logit =  LOG_TRACE_EVENT;
6099 
6100 	lpfc_printf_vlog(vport, KERN_ERR, logit,
6101 			 "0713 SCSI layer issued Device Reset (%d, %llu) "
6102 			 "return x%x\n", tgt_id, lun_id, status);
6103 
6104 	/*
6105 	 * We have to clean up i/o as : they may be orphaned by the TMF;
6106 	 * or if the TMF failed, they may be in an indeterminate state.
6107 	 * So, continue on.
6108 	 * We will report success if all the i/o aborts successfully.
6109 	 */
6110 	if (status == SUCCESS)
6111 		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6112 						LPFC_CTX_LUN);
6113 
6114 	return status;
6115 }
6116 
6117 /**
6118  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6119  * @cmnd: Pointer to scsi_cmnd data structure.
6120  *
6121  * This routine does a target reset by sending a TARGET_RESET task management
6122  * command.
6123  *
6124  * Return code :
6125  *  0x2003 - Error
6126  *  0x2002 - Success
6127  **/
6128 static int
lpfc_target_reset_handler(struct scsi_cmnd * cmnd)6129 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6130 {
6131 	struct Scsi_Host  *shost = cmnd->device->host;
6132 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6133 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6134 	struct lpfc_rport_data *rdata;
6135 	struct lpfc_nodelist *pnode;
6136 	unsigned tgt_id = cmnd->device->id;
6137 	uint64_t lun_id = cmnd->device->lun;
6138 	struct lpfc_scsi_event_header scsi_event;
6139 	int status;
6140 	u32 logit = LOG_FCP;
6141 	u32 dev_loss_tmo = vport->cfg_devloss_tmo;
6142 	unsigned long flags;
6143 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6144 
6145 	if (!rport)
6146 		return FAILED;
6147 
6148 	rdata = rport->dd_data;
6149 	if (!rdata || !rdata->pnode) {
6150 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6151 				 "0799 Target Reset rdata failure: rdata x%px\n",
6152 				 rdata);
6153 		return FAILED;
6154 	}
6155 	pnode = rdata->pnode;
6156 	status = fc_block_rport(rport);
6157 	if (status != 0 && status != SUCCESS)
6158 		return status;
6159 
6160 	status = lpfc_chk_tgt_mapped(vport, rport);
6161 	if (status == FAILED) {
6162 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6163 			"0722 Target Reset rport failure: rdata x%px\n", rdata);
6164 		if (pnode) {
6165 			spin_lock_irqsave(&pnode->lock, flags);
6166 			pnode->nlp_flag &= ~NLP_NPR_ADISC;
6167 			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6168 			spin_unlock_irqrestore(&pnode->lock, flags);
6169 		}
6170 		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6171 					  LPFC_CTX_TGT);
6172 		return FAST_IO_FAIL;
6173 	}
6174 
6175 	scsi_event.event_type = FC_REG_SCSI_EVENT;
6176 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6177 	scsi_event.lun = 0;
6178 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6179 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6180 
6181 	fc_host_post_vendor_event(shost, fc_get_event_number(),
6182 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6183 
6184 	status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6185 					FCP_TARGET_RESET);
6186 	if (status != SUCCESS) {
6187 		logit = LOG_TRACE_EVENT;
6188 
6189 		/* Issue LOGO, if no LOGO is outstanding */
6190 		spin_lock_irqsave(&pnode->lock, flags);
6191 		if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
6192 		    !pnode->logo_waitq) {
6193 			pnode->logo_waitq = &waitq;
6194 			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6195 			pnode->nlp_flag |= NLP_ISSUE_LOGO;
6196 			pnode->save_flags |= NLP_WAIT_FOR_LOGO;
6197 			spin_unlock_irqrestore(&pnode->lock, flags);
6198 			lpfc_unreg_rpi(vport, pnode);
6199 			wait_event_timeout(waitq,
6200 					   (!(pnode->save_flags &
6201 					      NLP_WAIT_FOR_LOGO)),
6202 					   msecs_to_jiffies(dev_loss_tmo *
6203 							    1000));
6204 
6205 			if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
6206 				lpfc_printf_vlog(vport, KERN_ERR, logit,
6207 						 "0725 SCSI layer TGTRST "
6208 						 "failed & LOGO TMO (%d, %llu) "
6209 						 "return x%x\n",
6210 						 tgt_id, lun_id, status);
6211 				spin_lock_irqsave(&pnode->lock, flags);
6212 				pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
6213 			} else {
6214 				spin_lock_irqsave(&pnode->lock, flags);
6215 			}
6216 			pnode->logo_waitq = NULL;
6217 			spin_unlock_irqrestore(&pnode->lock, flags);
6218 			status = SUCCESS;
6219 
6220 		} else {
6221 			spin_unlock_irqrestore(&pnode->lock, flags);
6222 			status = FAILED;
6223 		}
6224 	}
6225 
6226 	lpfc_printf_vlog(vport, KERN_ERR, logit,
6227 			 "0723 SCSI layer issued Target Reset (%d, %llu) "
6228 			 "return x%x\n", tgt_id, lun_id, status);
6229 
6230 	/*
6231 	 * We have to clean up i/o as : they may be orphaned by the TMF;
6232 	 * or if the TMF failed, they may be in an indeterminate state.
6233 	 * So, continue on.
6234 	 * We will report success if all the i/o aborts successfully.
6235 	 */
6236 	if (status == SUCCESS)
6237 		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6238 					  LPFC_CTX_TGT);
6239 	return status;
6240 }
6241 
6242 /**
6243  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6244  * @cmnd: Pointer to scsi_cmnd data structure.
6245  *
6246  * This routine does host reset to the adaptor port. It brings the HBA
6247  * offline, performs a board restart, and then brings the board back online.
6248  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6249  * reject all outstanding SCSI commands to the host and error returned
6250  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6251  * of error handling, it will only return error if resetting of the adapter
6252  * is not successful; in all other cases, will return success.
6253  *
6254  * Return code :
6255  *  0x2003 - Error
6256  *  0x2002 - Success
6257  **/
6258 static int
lpfc_host_reset_handler(struct scsi_cmnd * cmnd)6259 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6260 {
6261 	struct Scsi_Host *shost = cmnd->device->host;
6262 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6263 	struct lpfc_hba *phba = vport->phba;
6264 	int rc, ret = SUCCESS;
6265 
6266 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6267 			 "3172 SCSI layer issued Host Reset Data:\n");
6268 
6269 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6270 	lpfc_offline(phba);
6271 	rc = lpfc_sli_brdrestart(phba);
6272 	if (rc)
6273 		goto error;
6274 
6275 	/* Wait for successful restart of adapter */
6276 	if (phba->sli_rev < LPFC_SLI_REV4) {
6277 		rc = lpfc_sli_chipset_init(phba);
6278 		if (rc)
6279 			goto error;
6280 	}
6281 
6282 	rc = lpfc_online(phba);
6283 	if (rc)
6284 		goto error;
6285 
6286 	lpfc_unblock_mgmt_io(phba);
6287 
6288 	return ret;
6289 error:
6290 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6291 			 "3323 Failed host reset\n");
6292 	lpfc_unblock_mgmt_io(phba);
6293 	return FAILED;
6294 }
6295 
6296 /**
6297  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6298  * @sdev: Pointer to scsi_device.
6299  *
6300  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
6301  * globally available list of scsi buffers. This routine also makes sure scsi
6302  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6303  * of scsi buffer exists for the lifetime of the driver.
6304  *
6305  * Return codes:
6306  *   non-0 - Error
6307  *   0 - Success
6308  **/
6309 static int
lpfc_slave_alloc(struct scsi_device * sdev)6310 lpfc_slave_alloc(struct scsi_device *sdev)
6311 {
6312 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6313 	struct lpfc_hba   *phba = vport->phba;
6314 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6315 	uint32_t total = 0;
6316 	uint32_t num_to_alloc = 0;
6317 	int num_allocated = 0;
6318 	uint32_t sdev_cnt;
6319 	struct lpfc_device_data *device_data;
6320 	unsigned long flags;
6321 	struct lpfc_name target_wwpn;
6322 
6323 	if (!rport || fc_remote_port_chkready(rport))
6324 		return -ENXIO;
6325 
6326 	if (phba->cfg_fof) {
6327 
6328 		/*
6329 		 * Check to see if the device data structure for the lun
6330 		 * exists.  If not, create one.
6331 		 */
6332 
6333 		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6334 		spin_lock_irqsave(&phba->devicelock, flags);
6335 		device_data = __lpfc_get_device_data(phba,
6336 						     &phba->luns,
6337 						     &vport->fc_portname,
6338 						     &target_wwpn,
6339 						     sdev->lun);
6340 		if (!device_data) {
6341 			spin_unlock_irqrestore(&phba->devicelock, flags);
6342 			device_data = lpfc_create_device_data(phba,
6343 							&vport->fc_portname,
6344 							&target_wwpn,
6345 							sdev->lun,
6346 							phba->cfg_XLanePriority,
6347 							true);
6348 			if (!device_data)
6349 				return -ENOMEM;
6350 			spin_lock_irqsave(&phba->devicelock, flags);
6351 			list_add_tail(&device_data->listentry, &phba->luns);
6352 		}
6353 		device_data->rport_data = rport->dd_data;
6354 		device_data->available = true;
6355 		spin_unlock_irqrestore(&phba->devicelock, flags);
6356 		sdev->hostdata = device_data;
6357 	} else {
6358 		sdev->hostdata = rport->dd_data;
6359 	}
6360 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6361 
6362 	/* For SLI4, all IO buffers are pre-allocated */
6363 	if (phba->sli_rev == LPFC_SLI_REV4)
6364 		return 0;
6365 
6366 	/* This code path is now ONLY for SLI3 adapters */
6367 
6368 	/*
6369 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6370 	 * available list of scsi buffers.  Don't allocate more than the
6371 	 * HBA limit conveyed to the midlayer via the host structure.  The
6372 	 * formula accounts for the lun_queue_depth + error handlers + 1
6373 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
6374 	 */
6375 	total = phba->total_scsi_bufs;
6376 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
6377 
6378 	/* If allocated buffers are enough do nothing */
6379 	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6380 		return 0;
6381 
6382 	/* Allow some exchanges to be available always to complete discovery */
6383 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6384 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6385 				 "0704 At limitation of %d preallocated "
6386 				 "command buffers\n", total);
6387 		return 0;
6388 	/* Allow some exchanges to be available always to complete discovery */
6389 	} else if (total + num_to_alloc >
6390 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6391 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6392 				 "0705 Allocation request of %d "
6393 				 "command buffers will exceed max of %d.  "
6394 				 "Reducing allocation request to %d.\n",
6395 				 num_to_alloc, phba->cfg_hba_queue_depth,
6396 				 (phba->cfg_hba_queue_depth - total));
6397 		num_to_alloc = phba->cfg_hba_queue_depth - total;
6398 	}
6399 	num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6400 	if (num_to_alloc != num_allocated) {
6401 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6402 					 "0708 Allocation request of %d "
6403 					 "command buffers did not succeed.  "
6404 					 "Allocated %d buffers.\n",
6405 					 num_to_alloc, num_allocated);
6406 	}
6407 	if (num_allocated > 0)
6408 		phba->total_scsi_bufs += num_allocated;
6409 	return 0;
6410 }
6411 
6412 /**
6413  * lpfc_slave_configure - scsi_host_template slave_configure entry point
6414  * @sdev: Pointer to scsi_device.
6415  *
6416  * This routine configures following items
6417  *   - Tag command queuing support for @sdev if supported.
6418  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6419  *
6420  * Return codes:
6421  *   0 - Success
6422  **/
6423 static int
lpfc_slave_configure(struct scsi_device * sdev)6424 lpfc_slave_configure(struct scsi_device *sdev)
6425 {
6426 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6427 	struct lpfc_hba   *phba = vport->phba;
6428 
6429 	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6430 
6431 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6432 		lpfc_sli_handle_fast_ring_event(phba,
6433 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6434 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6435 			lpfc_poll_rearm_timer(phba);
6436 	}
6437 
6438 	return 0;
6439 }
6440 
6441 /**
6442  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6443  * @sdev: Pointer to scsi_device.
6444  *
6445  * This routine sets @sdev hostatdata filed to null.
6446  **/
6447 static void
lpfc_slave_destroy(struct scsi_device * sdev)6448 lpfc_slave_destroy(struct scsi_device *sdev)
6449 {
6450 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6451 	struct lpfc_hba   *phba = vport->phba;
6452 	unsigned long flags;
6453 	struct lpfc_device_data *device_data = sdev->hostdata;
6454 
6455 	atomic_dec(&phba->sdev_cnt);
6456 	if ((phba->cfg_fof) && (device_data)) {
6457 		spin_lock_irqsave(&phba->devicelock, flags);
6458 		device_data->available = false;
6459 		if (!device_data->oas_enabled)
6460 			lpfc_delete_device_data(phba, device_data);
6461 		spin_unlock_irqrestore(&phba->devicelock, flags);
6462 	}
6463 	sdev->hostdata = NULL;
6464 	return;
6465 }
6466 
6467 /**
6468  * lpfc_create_device_data - creates and initializes device data structure for OAS
6469  * @phba: Pointer to host bus adapter structure.
6470  * @vport_wwpn: Pointer to vport's wwpn information
6471  * @target_wwpn: Pointer to target's wwpn information
6472  * @lun: Lun on target
6473  * @pri: Priority
6474  * @atomic_create: Flag to indicate if memory should be allocated using the
6475  *		  GFP_ATOMIC flag or not.
6476  *
6477  * This routine creates a device data structure which will contain identifying
6478  * information for the device (host wwpn, target wwpn, lun), state of OAS,
6479  * whether or not the corresponding lun is available by the system,
6480  * and pointer to the rport data.
6481  *
6482  * Return codes:
6483  *   NULL - Error
6484  *   Pointer to lpfc_device_data - Success
6485  **/
6486 struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint32_t pri,bool atomic_create)6487 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6488 			struct lpfc_name *target_wwpn, uint64_t lun,
6489 			uint32_t pri, bool atomic_create)
6490 {
6491 
6492 	struct lpfc_device_data *lun_info;
6493 	int memory_flags;
6494 
6495 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
6496 	    !(phba->cfg_fof))
6497 		return NULL;
6498 
6499 	/* Attempt to create the device data to contain lun info */
6500 
6501 	if (atomic_create)
6502 		memory_flags = GFP_ATOMIC;
6503 	else
6504 		memory_flags = GFP_KERNEL;
6505 	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6506 	if (!lun_info)
6507 		return NULL;
6508 	INIT_LIST_HEAD(&lun_info->listentry);
6509 	lun_info->rport_data  = NULL;
6510 	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6511 	       sizeof(struct lpfc_name));
6512 	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6513 	       sizeof(struct lpfc_name));
6514 	lun_info->device_id.lun = lun;
6515 	lun_info->oas_enabled = false;
6516 	lun_info->priority = pri;
6517 	lun_info->available = false;
6518 	return lun_info;
6519 }
6520 
6521 /**
6522  * lpfc_delete_device_data - frees a device data structure for OAS
6523  * @phba: Pointer to host bus adapter structure.
6524  * @lun_info: Pointer to device data structure to free.
6525  *
6526  * This routine frees the previously allocated device data structure passed.
6527  *
6528  **/
6529 void
lpfc_delete_device_data(struct lpfc_hba * phba,struct lpfc_device_data * lun_info)6530 lpfc_delete_device_data(struct lpfc_hba *phba,
6531 			struct lpfc_device_data *lun_info)
6532 {
6533 
6534 	if (unlikely(!phba) || !lun_info  ||
6535 	    !(phba->cfg_fof))
6536 		return;
6537 
6538 	if (!list_empty(&lun_info->listentry))
6539 		list_del(&lun_info->listentry);
6540 	mempool_free(lun_info, phba->device_data_mem_pool);
6541 	return;
6542 }
6543 
6544 /**
6545  * __lpfc_get_device_data - returns the device data for the specified lun
6546  * @phba: Pointer to host bus adapter structure.
6547  * @list: Point to list to search.
6548  * @vport_wwpn: Pointer to vport's wwpn information
6549  * @target_wwpn: Pointer to target's wwpn information
6550  * @lun: Lun on target
6551  *
6552  * This routine searches the list passed for the specified lun's device data.
6553  * This function does not hold locks, it is the responsibility of the caller
6554  * to ensure the proper lock is held before calling the function.
6555  *
6556  * Return codes:
6557  *   NULL - Error
6558  *   Pointer to lpfc_device_data - Success
6559  **/
6560 struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba * phba,struct list_head * list,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun)6561 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6562 		       struct lpfc_name *vport_wwpn,
6563 		       struct lpfc_name *target_wwpn, uint64_t lun)
6564 {
6565 
6566 	struct lpfc_device_data *lun_info;
6567 
6568 	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6569 	    !phba->cfg_fof)
6570 		return NULL;
6571 
6572 	/* Check to see if the lun is already enabled for OAS. */
6573 
6574 	list_for_each_entry(lun_info, list, listentry) {
6575 		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6576 			    sizeof(struct lpfc_name)) == 0) &&
6577 		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6578 			    sizeof(struct lpfc_name)) == 0) &&
6579 		    (lun_info->device_id.lun == lun))
6580 			return lun_info;
6581 	}
6582 
6583 	return NULL;
6584 }
6585 
6586 /**
6587  * lpfc_find_next_oas_lun - searches for the next oas lun
6588  * @phba: Pointer to host bus adapter structure.
6589  * @vport_wwpn: Pointer to vport's wwpn information
6590  * @target_wwpn: Pointer to target's wwpn information
6591  * @starting_lun: Pointer to the lun to start searching for
6592  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6593  * @found_target_wwpn: Pointer to the found lun's target wwpn information
6594  * @found_lun: Pointer to the found lun.
6595  * @found_lun_status: Pointer to status of the found lun.
6596  * @found_lun_pri: Pointer to priority of the found lun.
6597  *
6598  * This routine searches the luns list for the specified lun
6599  * or the first lun for the vport/target.  If the vport wwpn contains
6600  * a zero value then a specific vport is not specified. In this case
6601  * any vport which contains the lun will be considered a match.  If the
6602  * target wwpn contains a zero value then a specific target is not specified.
6603  * In this case any target which contains the lun will be considered a
6604  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
6605  * are returned.  The function will also return the next lun if available.
6606  * If the next lun is not found, starting_lun parameter will be set to
6607  * NO_MORE_OAS_LUN.
6608  *
6609  * Return codes:
6610  *   non-0 - Error
6611  *   0 - Success
6612  **/
6613 bool
lpfc_find_next_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t * starting_lun,struct lpfc_name * found_vport_wwpn,struct lpfc_name * found_target_wwpn,uint64_t * found_lun,uint32_t * found_lun_status,uint32_t * found_lun_pri)6614 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6615 		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6616 		       struct lpfc_name *found_vport_wwpn,
6617 		       struct lpfc_name *found_target_wwpn,
6618 		       uint64_t *found_lun,
6619 		       uint32_t *found_lun_status,
6620 		       uint32_t *found_lun_pri)
6621 {
6622 
6623 	unsigned long flags;
6624 	struct lpfc_device_data *lun_info;
6625 	struct lpfc_device_id *device_id;
6626 	uint64_t lun;
6627 	bool found = false;
6628 
6629 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6630 	    !starting_lun || !found_vport_wwpn ||
6631 	    !found_target_wwpn || !found_lun || !found_lun_status ||
6632 	    (*starting_lun == NO_MORE_OAS_LUN) ||
6633 	    !phba->cfg_fof)
6634 		return false;
6635 
6636 	lun = *starting_lun;
6637 	*found_lun = NO_MORE_OAS_LUN;
6638 	*starting_lun = NO_MORE_OAS_LUN;
6639 
6640 	/* Search for lun or the lun closet in value */
6641 
6642 	spin_lock_irqsave(&phba->devicelock, flags);
6643 	list_for_each_entry(lun_info, &phba->luns, listentry) {
6644 		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6645 		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6646 			    sizeof(struct lpfc_name)) == 0)) &&
6647 		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6648 		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6649 			    sizeof(struct lpfc_name)) == 0)) &&
6650 		    (lun_info->oas_enabled)) {
6651 			device_id = &lun_info->device_id;
6652 			if ((!found) &&
6653 			    ((lun == FIND_FIRST_OAS_LUN) ||
6654 			     (device_id->lun == lun))) {
6655 				*found_lun = device_id->lun;
6656 				memcpy(found_vport_wwpn,
6657 				       &device_id->vport_wwpn,
6658 				       sizeof(struct lpfc_name));
6659 				memcpy(found_target_wwpn,
6660 				       &device_id->target_wwpn,
6661 				       sizeof(struct lpfc_name));
6662 				if (lun_info->available)
6663 					*found_lun_status =
6664 						OAS_LUN_STATUS_EXISTS;
6665 				else
6666 					*found_lun_status = 0;
6667 				*found_lun_pri = lun_info->priority;
6668 				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6669 					memset(vport_wwpn, 0x0,
6670 					       sizeof(struct lpfc_name));
6671 				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6672 					memset(target_wwpn, 0x0,
6673 					       sizeof(struct lpfc_name));
6674 				found = true;
6675 			} else if (found) {
6676 				*starting_lun = device_id->lun;
6677 				memcpy(vport_wwpn, &device_id->vport_wwpn,
6678 				       sizeof(struct lpfc_name));
6679 				memcpy(target_wwpn, &device_id->target_wwpn,
6680 				       sizeof(struct lpfc_name));
6681 				break;
6682 			}
6683 		}
6684 	}
6685 	spin_unlock_irqrestore(&phba->devicelock, flags);
6686 	return found;
6687 }
6688 
6689 /**
6690  * lpfc_enable_oas_lun - enables a lun for OAS operations
6691  * @phba: Pointer to host bus adapter structure.
6692  * @vport_wwpn: Pointer to vport's wwpn information
6693  * @target_wwpn: Pointer to target's wwpn information
6694  * @lun: Lun
6695  * @pri: Priority
6696  *
6697  * This routine enables a lun for oas operations.  The routines does so by
6698  * doing the following :
6699  *
6700  *   1) Checks to see if the device data for the lun has been created.
6701  *   2) If found, sets the OAS enabled flag if not set and returns.
6702  *   3) Otherwise, creates a device data structure.
6703  *   4) If successfully created, indicates the device data is for an OAS lun,
6704  *   indicates the lun is not available and add to the list of luns.
6705  *
6706  * Return codes:
6707  *   false - Error
6708  *   true - Success
6709  **/
6710 bool
lpfc_enable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)6711 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6712 		    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6713 {
6714 
6715 	struct lpfc_device_data *lun_info;
6716 	unsigned long flags;
6717 
6718 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6719 	    !phba->cfg_fof)
6720 		return false;
6721 
6722 	spin_lock_irqsave(&phba->devicelock, flags);
6723 
6724 	/* Check to see if the device data for the lun has been created */
6725 	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6726 					  target_wwpn, lun);
6727 	if (lun_info) {
6728 		if (!lun_info->oas_enabled)
6729 			lun_info->oas_enabled = true;
6730 		lun_info->priority = pri;
6731 		spin_unlock_irqrestore(&phba->devicelock, flags);
6732 		return true;
6733 	}
6734 
6735 	/* Create an lun info structure and add to list of luns */
6736 	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6737 					   pri, true);
6738 	if (lun_info) {
6739 		lun_info->oas_enabled = true;
6740 		lun_info->priority = pri;
6741 		lun_info->available = false;
6742 		list_add_tail(&lun_info->listentry, &phba->luns);
6743 		spin_unlock_irqrestore(&phba->devicelock, flags);
6744 		return true;
6745 	}
6746 	spin_unlock_irqrestore(&phba->devicelock, flags);
6747 	return false;
6748 }
6749 
6750 /**
6751  * lpfc_disable_oas_lun - disables a lun for OAS operations
6752  * @phba: Pointer to host bus adapter structure.
6753  * @vport_wwpn: Pointer to vport's wwpn information
6754  * @target_wwpn: Pointer to target's wwpn information
6755  * @lun: Lun
6756  * @pri: Priority
6757  *
6758  * This routine disables a lun for oas operations.  The routines does so by
6759  * doing the following :
6760  *
6761  *   1) Checks to see if the device data for the lun is created.
6762  *   2) If present, clears the flag indicating this lun is for OAS.
6763  *   3) If the lun is not available by the system, the device data is
6764  *   freed.
6765  *
6766  * Return codes:
6767  *   false - Error
6768  *   true - Success
6769  **/
6770 bool
lpfc_disable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)6771 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6772 		     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6773 {
6774 
6775 	struct lpfc_device_data *lun_info;
6776 	unsigned long flags;
6777 
6778 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6779 	    !phba->cfg_fof)
6780 		return false;
6781 
6782 	spin_lock_irqsave(&phba->devicelock, flags);
6783 
6784 	/* Check to see if the lun is available. */
6785 	lun_info = __lpfc_get_device_data(phba,
6786 					  &phba->luns, vport_wwpn,
6787 					  target_wwpn, lun);
6788 	if (lun_info) {
6789 		lun_info->oas_enabled = false;
6790 		lun_info->priority = pri;
6791 		if (!lun_info->available)
6792 			lpfc_delete_device_data(phba, lun_info);
6793 		spin_unlock_irqrestore(&phba->devicelock, flags);
6794 		return true;
6795 	}
6796 
6797 	spin_unlock_irqrestore(&phba->devicelock, flags);
6798 	return false;
6799 }
6800 
6801 static int
lpfc_no_command(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)6802 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6803 {
6804 	return SCSI_MLQUEUE_HOST_BUSY;
6805 }
6806 
6807 static int
lpfc_no_slave(struct scsi_device * sdev)6808 lpfc_no_slave(struct scsi_device *sdev)
6809 {
6810 	return -ENODEV;
6811 }
6812 
6813 struct scsi_host_template lpfc_template_nvme = {
6814 	.module			= THIS_MODULE,
6815 	.name			= LPFC_DRIVER_NAME,
6816 	.proc_name		= LPFC_DRIVER_NAME,
6817 	.info			= lpfc_info,
6818 	.queuecommand		= lpfc_no_command,
6819 	.slave_alloc		= lpfc_no_slave,
6820 	.slave_configure	= lpfc_no_slave,
6821 	.scan_finished		= lpfc_scan_finished,
6822 	.this_id		= -1,
6823 	.sg_tablesize		= 1,
6824 	.cmd_per_lun		= 1,
6825 	.shost_groups		= lpfc_hba_groups,
6826 	.max_sectors		= 0xFFFFFFFF,
6827 	.vendor_id		= LPFC_NL_VENDOR_ID,
6828 	.track_queue_depth	= 0,
6829 };
6830 
6831 struct scsi_host_template lpfc_template = {
6832 	.module			= THIS_MODULE,
6833 	.name			= LPFC_DRIVER_NAME,
6834 	.proc_name		= LPFC_DRIVER_NAME,
6835 	.info			= lpfc_info,
6836 	.queuecommand		= lpfc_queuecommand,
6837 	.eh_timed_out		= fc_eh_timed_out,
6838 	.eh_should_retry_cmd    = fc_eh_should_retry_cmd,
6839 	.eh_abort_handler	= lpfc_abort_handler,
6840 	.eh_device_reset_handler = lpfc_device_reset_handler,
6841 	.eh_target_reset_handler = lpfc_target_reset_handler,
6842 	.eh_host_reset_handler  = lpfc_host_reset_handler,
6843 	.slave_alloc		= lpfc_slave_alloc,
6844 	.slave_configure	= lpfc_slave_configure,
6845 	.slave_destroy		= lpfc_slave_destroy,
6846 	.scan_finished		= lpfc_scan_finished,
6847 	.this_id		= -1,
6848 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
6849 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
6850 	.shost_groups		= lpfc_hba_groups,
6851 	.max_sectors		= 0xFFFFFFFF,
6852 	.vendor_id		= LPFC_NL_VENDOR_ID,
6853 	.change_queue_depth	= scsi_change_queue_depth,
6854 	.track_queue_depth	= 1,
6855 };
6856