1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11 
12 #include <scsi/scsi_tcq.h>
13 
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15 
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
qla2x00_get_cmd_direction(srb_t * sp)24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26 	uint16_t cflags;
27 
28 	cflags = 0;
29 
30 	/* Set transfer direction */
31 	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 		cflags = CF_WRITE;
33 		sp->fcport->vha->hw->qla_stats.output_bytes +=
34 		    scsi_bufflen(sp->cmd);
35 	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 		cflags = CF_READ;
37 		sp->fcport->vha->hw->qla_stats.input_bytes +=
38 		    scsi_bufflen(sp->cmd);
39 	}
40 	return (cflags);
41 }
42 
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54 	uint16_t iocbs;
55 
56 	iocbs = 1;
57 	if (dsds > 3) {
58 		iocbs += (dsds - 3) / 7;
59 		if ((dsds - 3) % 7)
60 			iocbs++;
61 	}
62 	return (iocbs);
63 }
64 
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76 	uint16_t iocbs;
77 
78 	iocbs = 1;
79 	if (dsds > 2) {
80 		iocbs += (dsds - 2) / 5;
81 		if ((dsds - 2) % 5)
82 			iocbs++;
83 	}
84 	return (iocbs);
85 }
86 
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host * vha)94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96 	cont_entry_t *cont_pkt;
97 	struct req_que *req = vha->req;
98 	/* Adjust ring index. */
99 	req->ring_index++;
100 	if (req->ring_index == req->length) {
101 		req->ring_index = 0;
102 		req->ring_ptr = req->ring;
103 	} else {
104 		req->ring_ptr++;
105 	}
106 
107 	cont_pkt = (cont_entry_t *)req->ring_ptr;
108 
109 	/* Load packet defaults. */
110 	*((uint32_t *)(&cont_pkt->entry_type)) =
111 	    __constant_cpu_to_le32(CONTINUE_TYPE);
112 
113 	return (cont_pkt);
114 }
115 
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t * vha)123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125 	cont_a64_entry_t *cont_pkt;
126 
127 	struct req_que *req = vha->req;
128 	/* Adjust ring index. */
129 	req->ring_index++;
130 	if (req->ring_index == req->length) {
131 		req->ring_index = 0;
132 		req->ring_ptr = req->ring;
133 	} else {
134 		req->ring_ptr++;
135 	}
136 
137 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 
139 	/* Load packet defaults. */
140 	*((uint32_t *)(&cont_pkt->entry_type)) =
141 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142 
143 	return (cont_pkt);
144 }
145 
146 static inline int
qla24xx_configure_prot_mode(srb_t * sp,uint16_t * fw_prot_opts)147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149 	uint8_t	guard = scsi_host_get_guard(sp->cmd->device->host);
150 
151 	/* We only support T10 DIF right now */
152 	if (guard != SHOST_DIX_GUARD_CRC) {
153 		DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
154 		return 0;
155 	}
156 
157 	/* We always use DIFF Bundling for best performance */
158 	*fw_prot_opts = 0;
159 
160 	/* Translate SCSI opcode to a protection opcode */
161 	switch (scsi_get_prot_op(sp->cmd)) {
162 	case SCSI_PROT_READ_STRIP:
163 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 		break;
165 	case SCSI_PROT_WRITE_INSERT:
166 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
167 		break;
168 	case SCSI_PROT_READ_INSERT:
169 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
170 		break;
171 	case SCSI_PROT_WRITE_STRIP:
172 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 		break;
174 	case SCSI_PROT_READ_PASS:
175 		*fw_prot_opts |= PO_MODE_DIF_PASS;
176 		break;
177 	case SCSI_PROT_WRITE_PASS:
178 		*fw_prot_opts |= PO_MODE_DIF_PASS;
179 		break;
180 	default:	/* Normal Request */
181 		*fw_prot_opts |= PO_MODE_DIF_PASS;
182 		break;
183 	}
184 
185 	return scsi_prot_sg_count(sp->cmd);
186 }
187 
188 /*
189  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190  * capable IOCB types.
191  *
192  * @sp: SRB command to process
193  * @cmd_pkt: Command type 2 IOCB
194  * @tot_dsds: Total number of segments to transfer
195  */
qla2x00_build_scsi_iocbs_32(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197     uint16_t tot_dsds)
198 {
199 	uint16_t	avail_dsds;
200 	uint32_t	*cur_dsd;
201 	scsi_qla_host_t	*vha;
202 	struct scsi_cmnd *cmd;
203 	struct scatterlist *sg;
204 	int i;
205 
206 	cmd = sp->cmd;
207 
208 	/* Update entry type to indicate Command Type 2 IOCB */
209 	*((uint32_t *)(&cmd_pkt->entry_type)) =
210 	    __constant_cpu_to_le32(COMMAND_TYPE);
211 
212 	/* No data transfer */
213 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 		return;
216 	}
217 
218 	vha = sp->fcport->vha;
219 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220 
221 	/* Three DSDs are available in the Command Type 2 IOCB */
222 	avail_dsds = 3;
223 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224 
225 	/* Load data segments */
226 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 		cont_entry_t *cont_pkt;
228 
229 		/* Allocate additional continuation packets? */
230 		if (avail_dsds == 0) {
231 			/*
232 			 * Seven DSDs are available in the Continuation
233 			 * Type 0 IOCB.
234 			 */
235 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 			avail_dsds = 7;
238 		}
239 
240 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 		avail_dsds--;
243 	}
244 }
245 
246 /**
247  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248  * capable IOCB types.
249  *
250  * @sp: SRB command to process
251  * @cmd_pkt: Command type 3 IOCB
252  * @tot_dsds: Total number of segments to transfer
253  */
qla2x00_build_scsi_iocbs_64(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255     uint16_t tot_dsds)
256 {
257 	uint16_t	avail_dsds;
258 	uint32_t	*cur_dsd;
259 	scsi_qla_host_t	*vha;
260 	struct scsi_cmnd *cmd;
261 	struct scatterlist *sg;
262 	int i;
263 
264 	cmd = sp->cmd;
265 
266 	/* Update entry type to indicate Command Type 3 IOCB */
267 	*((uint32_t *)(&cmd_pkt->entry_type)) =
268 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
269 
270 	/* No data transfer */
271 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 		return;
274 	}
275 
276 	vha = sp->fcport->vha;
277 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278 
279 	/* Two DSDs are available in the Command Type 3 IOCB */
280 	avail_dsds = 2;
281 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282 
283 	/* Load data segments */
284 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 		dma_addr_t	sle_dma;
286 		cont_a64_entry_t *cont_pkt;
287 
288 		/* Allocate additional continuation packets? */
289 		if (avail_dsds == 0) {
290 			/*
291 			 * Five DSDs are available in the Continuation
292 			 * Type 1 IOCB.
293 			 */
294 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
295 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 			avail_dsds = 5;
297 		}
298 
299 		sle_dma = sg_dma_address(sg);
300 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 		avail_dsds--;
304 	}
305 }
306 
307 /**
308  * qla2x00_start_scsi() - Send a SCSI command to the ISP
309  * @sp: command to send to the ISP
310  *
311  * Returns non-zero if a failure occurred, else zero.
312  */
313 int
qla2x00_start_scsi(srb_t * sp)314 qla2x00_start_scsi(srb_t *sp)
315 {
316 	int		ret, nseg;
317 	unsigned long   flags;
318 	scsi_qla_host_t	*vha;
319 	struct scsi_cmnd *cmd;
320 	uint32_t	*clr_ptr;
321 	uint32_t        index;
322 	uint32_t	handle;
323 	cmd_entry_t	*cmd_pkt;
324 	uint16_t	cnt;
325 	uint16_t	req_cnt;
326 	uint16_t	tot_dsds;
327 	struct device_reg_2xxx __iomem *reg;
328 	struct qla_hw_data *ha;
329 	struct req_que *req;
330 	struct rsp_que *rsp;
331 	char		tag[2];
332 
333 	/* Setup device pointers. */
334 	ret = 0;
335 	vha = sp->fcport->vha;
336 	ha = vha->hw;
337 	reg = &ha->iobase->isp;
338 	cmd = sp->cmd;
339 	req = ha->req_q_map[0];
340 	rsp = ha->rsp_q_map[0];
341 	/* So we know we haven't pci_map'ed anything yet */
342 	tot_dsds = 0;
343 
344 	/* Send marker if required */
345 	if (vha->marker_needed != 0) {
346 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
347 							!= QLA_SUCCESS)
348 			return (QLA_FUNCTION_FAILED);
349 		vha->marker_needed = 0;
350 	}
351 
352 	/* Acquire ring specific lock */
353 	spin_lock_irqsave(&ha->hardware_lock, flags);
354 
355 	/* Check for room in outstanding command list. */
356 	handle = req->current_outstanding_cmd;
357 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
358 		handle++;
359 		if (handle == MAX_OUTSTANDING_COMMANDS)
360 			handle = 1;
361 		if (!req->outstanding_cmds[handle])
362 			break;
363 	}
364 	if (index == MAX_OUTSTANDING_COMMANDS)
365 		goto queuing_error;
366 
367 	/* Map the sg table so we have an accurate count of sg entries needed */
368 	if (scsi_sg_count(cmd)) {
369 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
370 		    scsi_sg_count(cmd), cmd->sc_data_direction);
371 		if (unlikely(!nseg))
372 			goto queuing_error;
373 	} else
374 		nseg = 0;
375 
376 	tot_dsds = nseg;
377 
378 	/* Calculate the number of request entries needed. */
379 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
380 	if (req->cnt < (req_cnt + 2)) {
381 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
382 		if (req->ring_index < cnt)
383 			req->cnt = cnt - req->ring_index;
384 		else
385 			req->cnt = req->length -
386 			    (req->ring_index - cnt);
387 	}
388 	if (req->cnt < (req_cnt + 2))
389 		goto queuing_error;
390 
391 	/* Build command packet */
392 	req->current_outstanding_cmd = handle;
393 	req->outstanding_cmds[handle] = sp;
394 	sp->handle = handle;
395 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
396 	req->cnt -= req_cnt;
397 
398 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
399 	cmd_pkt->handle = handle;
400 	/* Zero out remaining portion of packet. */
401 	clr_ptr = (uint32_t *)cmd_pkt + 2;
402 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
403 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
404 
405 	/* Set target ID and LUN number*/
406 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
407 	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
408 
409 	/* Update tagged queuing modifier */
410 	if (scsi_populate_tag_msg(cmd, tag)) {
411 		switch (tag[0]) {
412 		case HEAD_OF_QUEUE_TAG:
413 			cmd_pkt->control_flags =
414 			    __constant_cpu_to_le16(CF_HEAD_TAG);
415 			break;
416 		case ORDERED_QUEUE_TAG:
417 			cmd_pkt->control_flags =
418 			    __constant_cpu_to_le16(CF_ORDERED_TAG);
419 			break;
420 		default:
421 			cmd_pkt->control_flags =
422 			    __constant_cpu_to_le16(CF_SIMPLE_TAG);
423 			break;
424 		}
425 	}
426 
427 	/* Load SCSI command packet. */
428 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
429 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
430 
431 	/* Build IOCB segments */
432 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
433 
434 	/* Set total data segment count. */
435 	cmd_pkt->entry_count = (uint8_t)req_cnt;
436 	wmb();
437 
438 	/* Adjust ring index. */
439 	req->ring_index++;
440 	if (req->ring_index == req->length) {
441 		req->ring_index = 0;
442 		req->ring_ptr = req->ring;
443 	} else
444 		req->ring_ptr++;
445 
446 	sp->flags |= SRB_DMA_VALID;
447 
448 	/* Set chip new ring index. */
449 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
450 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
451 
452 	/* Manage unprocessed RIO/ZIO commands in response queue. */
453 	if (vha->flags.process_response_queue &&
454 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
455 		qla2x00_process_response_queue(rsp);
456 
457 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
458 	return (QLA_SUCCESS);
459 
460 queuing_error:
461 	if (tot_dsds)
462 		scsi_dma_unmap(cmd);
463 
464 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
465 
466 	return (QLA_FUNCTION_FAILED);
467 }
468 
469 /**
470  * qla2x00_marker() - Send a marker IOCB to the firmware.
471  * @ha: HA context
472  * @loop_id: loop ID
473  * @lun: LUN
474  * @type: marker modifier
475  *
476  * Can be called from both normal and interrupt context.
477  *
478  * Returns non-zero if a failure occurred, else zero.
479  */
480 static int
__qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint16_t lun,uint8_t type)481 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
482 			struct rsp_que *rsp, uint16_t loop_id,
483 			uint16_t lun, uint8_t type)
484 {
485 	mrk_entry_t *mrk;
486 	struct mrk_entry_24xx *mrk24;
487 	struct qla_hw_data *ha = vha->hw;
488 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
489 
490 	mrk24 = NULL;
491 	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
492 	if (mrk == NULL) {
493 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
494 		    __func__, base_vha->host_no));
495 
496 		return (QLA_FUNCTION_FAILED);
497 	}
498 
499 	mrk->entry_type = MARKER_TYPE;
500 	mrk->modifier = type;
501 	if (type != MK_SYNC_ALL) {
502 		if (IS_FWI2_CAPABLE(ha)) {
503 			mrk24 = (struct mrk_entry_24xx *) mrk;
504 			mrk24->nport_handle = cpu_to_le16(loop_id);
505 			mrk24->lun[1] = LSB(lun);
506 			mrk24->lun[2] = MSB(lun);
507 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
508 			mrk24->vp_index = vha->vp_idx;
509 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
510 		} else {
511 			SET_TARGET_ID(ha, mrk->target, loop_id);
512 			mrk->lun = cpu_to_le16(lun);
513 		}
514 	}
515 	wmb();
516 
517 	qla2x00_isp_cmd(vha, req);
518 
519 	return (QLA_SUCCESS);
520 }
521 
522 int
qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint16_t lun,uint8_t type)523 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
524 		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
525 		uint8_t type)
526 {
527 	int ret;
528 	unsigned long flags = 0;
529 
530 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
531 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
532 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
533 
534 	return (ret);
535 }
536 
537 /**
538  * qla2x00_isp_cmd() - Modify the request ring pointer.
539  * @ha: HA context
540  *
541  * Note: The caller must hold the hardware lock before calling this routine.
542  */
543 static void
qla2x00_isp_cmd(struct scsi_qla_host * vha,struct req_que * req)544 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
545 {
546 	struct qla_hw_data *ha = vha->hw;
547 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
548 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
549 
550 	DEBUG5(printk("%s(): IOCB data:\n", __func__));
551 	DEBUG5(qla2x00_dump_buffer(
552 	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
553 
554 	/* Adjust ring index. */
555 	req->ring_index++;
556 	if (req->ring_index == req->length) {
557 		req->ring_index = 0;
558 		req->ring_ptr = req->ring;
559 	} else
560 		req->ring_ptr++;
561 
562 	/* Set chip new ring index. */
563 	if (IS_QLA82XX(ha)) {
564 		uint32_t dbval = 0x04 | (ha->portnum << 5);
565 
566 		/* write, read and verify logic */
567 		dbval = dbval | (req->id << 8) | (req->ring_index << 16);
568 		if (ql2xdbwr)
569 			qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
570 		else {
571 			WRT_REG_DWORD(
572 				(unsigned long __iomem *)ha->nxdb_wr_ptr,
573 				dbval);
574 			wmb();
575 			while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
576 				WRT_REG_DWORD((unsigned long __iomem *)
577 					ha->nxdb_wr_ptr, dbval);
578 				wmb();
579 			}
580 		}
581 	} else if (ha->mqenable) {
582 		/* Set chip new ring index. */
583 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
584 		RD_REG_DWORD(&ioreg->hccr);
585 	} else {
586 		if (IS_FWI2_CAPABLE(ha)) {
587 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
588 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
589 		} else {
590 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
591 				req->ring_index);
592 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
593 		}
594 	}
595 
596 }
597 
598 /**
599  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600  * Continuation Type 1 IOCBs to allocate.
601  *
602  * @dsds: number of data segment decriptors needed
603  *
604  * Returns the number of IOCB entries needed to store @dsds.
605  */
606 inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)607 qla24xx_calc_iocbs(uint16_t dsds)
608 {
609 	uint16_t iocbs;
610 
611 	iocbs = 1;
612 	if (dsds > 1) {
613 		iocbs += (dsds - 1) / 5;
614 		if ((dsds - 1) % 5)
615 			iocbs++;
616 	}
617 	DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
618 	    __func__, iocbs));
619 	return iocbs;
620 }
621 
622 /**
623  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
624  * IOCB types.
625  *
626  * @sp: SRB command to process
627  * @cmd_pkt: Command type 3 IOCB
628  * @tot_dsds: Total number of segments to transfer
629  */
630 inline void
qla24xx_build_scsi_iocbs(srb_t * sp,struct cmd_type_7 * cmd_pkt,uint16_t tot_dsds)631 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
632     uint16_t tot_dsds)
633 {
634 	uint16_t	avail_dsds;
635 	uint32_t	*cur_dsd;
636 	scsi_qla_host_t	*vha;
637 	struct scsi_cmnd *cmd;
638 	struct scatterlist *sg;
639 	int i;
640 	struct req_que *req;
641 
642 	cmd = sp->cmd;
643 
644 	/* Update entry type to indicate Command Type 3 IOCB */
645 	*((uint32_t *)(&cmd_pkt->entry_type)) =
646 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
647 
648 	/* No data transfer */
649 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
650 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
651 		return;
652 	}
653 
654 	vha = sp->fcport->vha;
655 	req = vha->req;
656 
657 	/* Set transfer direction */
658 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
659 		cmd_pkt->task_mgmt_flags =
660 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
661 		sp->fcport->vha->hw->qla_stats.output_bytes +=
662 		    scsi_bufflen(sp->cmd);
663 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
664 		cmd_pkt->task_mgmt_flags =
665 		    __constant_cpu_to_le16(TMF_READ_DATA);
666 		sp->fcport->vha->hw->qla_stats.input_bytes +=
667 		    scsi_bufflen(sp->cmd);
668 	}
669 
670 	/* One DSD is available in the Command Type 3 IOCB */
671 	avail_dsds = 1;
672 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
673 
674 	/* Load data segments */
675 
676 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
677 		dma_addr_t	sle_dma;
678 		cont_a64_entry_t *cont_pkt;
679 
680 		/* Allocate additional continuation packets? */
681 		if (avail_dsds == 0) {
682 			/*
683 			 * Five DSDs are available in the Continuation
684 			 * Type 1 IOCB.
685 			 */
686 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
687 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
688 			avail_dsds = 5;
689 		}
690 
691 		sle_dma = sg_dma_address(sg);
692 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
693 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
694 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
695 		avail_dsds--;
696 	}
697 }
698 
699 struct fw_dif_context {
700 	uint32_t ref_tag;
701 	uint16_t app_tag;
702 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
703 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
704 };
705 
706 /*
707  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
708  *
709  */
710 static inline void
qla24xx_set_t10dif_tags(struct scsi_cmnd * cmd,struct fw_dif_context * pkt,unsigned int protcnt)711 qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
712     unsigned int protcnt)
713 {
714 	struct sd_dif_tuple *spt;
715 	unsigned char op = scsi_get_prot_op(cmd);
716 
717 	switch (scsi_get_prot_type(cmd)) {
718 	/* For TYPE 0 protection: no checking */
719 	case SCSI_PROT_DIF_TYPE0:
720 		pkt->ref_tag_mask[0] = 0x00;
721 		pkt->ref_tag_mask[1] = 0x00;
722 		pkt->ref_tag_mask[2] = 0x00;
723 		pkt->ref_tag_mask[3] = 0x00;
724 		break;
725 
726 	/*
727 	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
728 	 * match LBA in CDB + N
729 	 */
730 	case SCSI_PROT_DIF_TYPE2:
731 		if (!ql2xenablehba_err_chk)
732 			break;
733 
734 		if (scsi_prot_sg_count(cmd)) {
735 			spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
736 			    scsi_prot_sglist(cmd)[0].offset;
737 			pkt->app_tag = swab32(spt->app_tag);
738 			pkt->app_tag_mask[0] =  0xff;
739 			pkt->app_tag_mask[1] =  0xff;
740 		}
741 
742 		pkt->ref_tag = cpu_to_le32((uint32_t)
743 		    (0xffffffff & scsi_get_lba(cmd)));
744 
745 		/* enable ALL bytes of the ref tag */
746 		pkt->ref_tag_mask[0] = 0xff;
747 		pkt->ref_tag_mask[1] = 0xff;
748 		pkt->ref_tag_mask[2] = 0xff;
749 		pkt->ref_tag_mask[3] = 0xff;
750 		break;
751 
752 	/* For Type 3 protection: 16 bit GUARD only */
753 	case SCSI_PROT_DIF_TYPE3:
754 		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
755 			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
756 								0x00;
757 		break;
758 
759 	/*
760 	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
761 	 * 16 bit app tag.
762 	 */
763 	case SCSI_PROT_DIF_TYPE1:
764 		if (!ql2xenablehba_err_chk)
765 			break;
766 
767 		if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
768 		    op == SCSI_PROT_WRITE_PASS)) {
769 			spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
770 			    scsi_prot_sglist(cmd)[0].offset;
771 			DEBUG18(printk(KERN_DEBUG
772 			    "%s(): LBA from user %p, lba = 0x%x\n",
773 			    __func__, spt, (int)spt->ref_tag));
774 			pkt->ref_tag = swab32(spt->ref_tag);
775 			pkt->app_tag_mask[0] = 0x0;
776 			pkt->app_tag_mask[1] = 0x0;
777 		} else {
778 			pkt->ref_tag = cpu_to_le32((uint32_t)
779 			    (0xffffffff & scsi_get_lba(cmd)));
780 			pkt->app_tag = __constant_cpu_to_le16(0);
781 			pkt->app_tag_mask[0] = 0x0;
782 			pkt->app_tag_mask[1] = 0x0;
783 		}
784 		/* enable ALL bytes of the ref tag */
785 		pkt->ref_tag_mask[0] = 0xff;
786 		pkt->ref_tag_mask[1] = 0xff;
787 		pkt->ref_tag_mask[2] = 0xff;
788 		pkt->ref_tag_mask[3] = 0xff;
789 		break;
790 	}
791 
792 	DEBUG18(printk(KERN_DEBUG
793 	    "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
794 	    " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
795 	    " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
796 	    (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
797 }
798 
799 
800 static int
qla24xx_walk_and_build_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)801 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
802 	uint16_t tot_dsds)
803 {
804 	void *next_dsd;
805 	uint8_t avail_dsds = 0;
806 	uint32_t dsd_list_len;
807 	struct dsd_dma *dsd_ptr;
808 	struct scatterlist *sg;
809 	uint32_t *cur_dsd = dsd;
810 	int	i;
811 	uint16_t	used_dsds = tot_dsds;
812 
813 	uint8_t		*cp;
814 
815 	scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
816 		dma_addr_t	sle_dma;
817 
818 		/* Allocate additional continuation packets? */
819 		if (avail_dsds == 0) {
820 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
821 					QLA_DSDS_PER_IOCB : used_dsds;
822 			dsd_list_len = (avail_dsds + 1) * 12;
823 			used_dsds -= avail_dsds;
824 
825 			/* allocate tracking DS */
826 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
827 			if (!dsd_ptr)
828 				return 1;
829 
830 			/* allocate new list */
831 			dsd_ptr->dsd_addr = next_dsd =
832 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
833 				&dsd_ptr->dsd_list_dma);
834 
835 			if (!next_dsd) {
836 				/*
837 				 * Need to cleanup only this dsd_ptr, rest
838 				 * will be done by sp_free_dma()
839 				 */
840 				kfree(dsd_ptr);
841 				return 1;
842 			}
843 
844 			list_add_tail(&dsd_ptr->list,
845 			    &((struct crc_context *)sp->ctx)->dsd_list);
846 
847 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
848 
849 			/* add new list to cmd iocb or last list */
850 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
851 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
852 			*cur_dsd++ = dsd_list_len;
853 			cur_dsd = (uint32_t *)next_dsd;
854 		}
855 		sle_dma = sg_dma_address(sg);
856 		DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
857 		    " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
858 		    MSD(sle_dma), sg_dma_len(sg)));
859 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
860 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
861 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
862 		avail_dsds--;
863 
864 		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
865 			cp = page_address(sg_page(sg)) + sg->offset;
866 			DEBUG18(printk("%s(): User Data buffer= %p:\n",
867 			    __func__ , cp));
868 		}
869 	}
870 	/* Null termination */
871 	*cur_dsd++ = 0;
872 	*cur_dsd++ = 0;
873 	*cur_dsd++ = 0;
874 	return 0;
875 }
876 
877 static int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)878 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
879 							uint32_t *dsd,
880 	uint16_t tot_dsds)
881 {
882 	void *next_dsd;
883 	uint8_t avail_dsds = 0;
884 	uint32_t dsd_list_len;
885 	struct dsd_dma *dsd_ptr;
886 	struct scatterlist *sg;
887 	int	i;
888 	struct scsi_cmnd *cmd;
889 	uint32_t *cur_dsd = dsd;
890 	uint16_t	used_dsds = tot_dsds;
891 
892 	uint8_t		*cp;
893 
894 
895 	cmd = sp->cmd;
896 	scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
897 		dma_addr_t	sle_dma;
898 
899 		/* Allocate additional continuation packets? */
900 		if (avail_dsds == 0) {
901 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
902 						QLA_DSDS_PER_IOCB : used_dsds;
903 			dsd_list_len = (avail_dsds + 1) * 12;
904 			used_dsds -= avail_dsds;
905 
906 			/* allocate tracking DS */
907 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
908 			if (!dsd_ptr)
909 				return 1;
910 
911 			/* allocate new list */
912 			dsd_ptr->dsd_addr = next_dsd =
913 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
914 				&dsd_ptr->dsd_list_dma);
915 
916 			if (!next_dsd) {
917 				/*
918 				 * Need to cleanup only this dsd_ptr, rest
919 				 * will be done by sp_free_dma()
920 				 */
921 				kfree(dsd_ptr);
922 				return 1;
923 			}
924 
925 			list_add_tail(&dsd_ptr->list,
926 			    &((struct crc_context *)sp->ctx)->dsd_list);
927 
928 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
929 
930 			/* add new list to cmd iocb or last list */
931 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
932 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
933 			*cur_dsd++ = dsd_list_len;
934 			cur_dsd = (uint32_t *)next_dsd;
935 		}
936 		sle_dma = sg_dma_address(sg);
937 		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
938 			DEBUG18(printk(KERN_DEBUG
939 			    "%s(): %p, sg entry %d - addr =0x%x"
940 			    "0x%x, len =%d\n", __func__ , cur_dsd, i,
941 			    LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
942 		}
943 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
944 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
945 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
946 
947 		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
948 			cp = page_address(sg_page(sg)) + sg->offset;
949 			DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
950 			    __func__ , cp));
951 		}
952 		avail_dsds--;
953 	}
954 	/* Null termination */
955 	*cur_dsd++ = 0;
956 	*cur_dsd++ = 0;
957 	*cur_dsd++ = 0;
958 	return 0;
959 }
960 
961 /**
962  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
963  *							Type 6 IOCB types.
964  *
965  * @sp: SRB command to process
966  * @cmd_pkt: Command type 3 IOCB
967  * @tot_dsds: Total number of segments to transfer
968  */
969 static inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t * sp,struct cmd_type_crc_2 * cmd_pkt,uint16_t tot_dsds,uint16_t tot_prot_dsds,uint16_t fw_prot_opts)970 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
971     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
972 {
973 	uint32_t		*cur_dsd, *fcp_dl;
974 	scsi_qla_host_t		*vha;
975 	struct scsi_cmnd	*cmd;
976 	struct scatterlist	*cur_seg;
977 	int			sgc;
978 	uint32_t		total_bytes;
979 	uint32_t		data_bytes;
980 	uint32_t		dif_bytes;
981 	uint8_t			bundling = 1;
982 	uint16_t		blk_size;
983 	uint8_t			*clr_ptr;
984 	struct crc_context	*crc_ctx_pkt = NULL;
985 	struct qla_hw_data	*ha;
986 	uint8_t			additional_fcpcdb_len;
987 	uint16_t		fcp_cmnd_len;
988 	struct fcp_cmnd		*fcp_cmnd;
989 	dma_addr_t		crc_ctx_dma;
990 	char			tag[2];
991 
992 	cmd = sp->cmd;
993 
994 	sgc = 0;
995 	/* Update entry type to indicate Command Type CRC_2 IOCB */
996 	*((uint32_t *)(&cmd_pkt->entry_type)) =
997 	    __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
998 
999 	/* No data transfer */
1000 	data_bytes = scsi_bufflen(cmd);
1001 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1002 		DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1003 		    __func__, data_bytes));
1004 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1005 		return QLA_SUCCESS;
1006 	}
1007 
1008 	vha = sp->fcport->vha;
1009 	ha = vha->hw;
1010 
1011 	DEBUG18(printk(KERN_DEBUG
1012 	    "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
1013 	    vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
1014 
1015 	cmd_pkt->vp_index = sp->fcport->vp_idx;
1016 
1017 	/* Set transfer direction */
1018 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1019 		cmd_pkt->control_flags =
1020 		    __constant_cpu_to_le16(CF_WRITE_DATA);
1021 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1022 		cmd_pkt->control_flags =
1023 		    __constant_cpu_to_le16(CF_READ_DATA);
1024 	}
1025 
1026 	tot_prot_dsds = scsi_prot_sg_count(cmd);
1027 	if (!tot_prot_dsds)
1028 		bundling = 0;
1029 
1030 	/* Allocate CRC context from global pool */
1031 	crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1032 	    GFP_ATOMIC, &crc_ctx_dma);
1033 
1034 	if (!crc_ctx_pkt)
1035 		goto crc_queuing_error;
1036 
1037 	/* Zero out CTX area. */
1038 	clr_ptr = (uint8_t *)crc_ctx_pkt;
1039 	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1040 
1041 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1042 
1043 	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1044 
1045 	/* Set handle */
1046 	crc_ctx_pkt->handle = cmd_pkt->handle;
1047 
1048 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1049 
1050 	qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1051 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1052 
1053 	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1054 	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1055 	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1056 
1057 	/* Determine SCSI command length -- align to 4 byte boundary */
1058 	if (cmd->cmd_len > 16) {
1059 		DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1060 		    __func__));
1061 		additional_fcpcdb_len = cmd->cmd_len - 16;
1062 		if ((cmd->cmd_len % 4) != 0) {
1063 			/* SCSI cmd > 16 bytes must be multiple of 4 */
1064 			goto crc_queuing_error;
1065 		}
1066 		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1067 	} else {
1068 		additional_fcpcdb_len = 0;
1069 		fcp_cmnd_len = 12 + 16 + 4;
1070 	}
1071 
1072 	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1073 
1074 	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1075 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1076 		fcp_cmnd->additional_cdb_len |= 1;
1077 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1078 		fcp_cmnd->additional_cdb_len |= 2;
1079 
1080 	int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1081 	host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
1082 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1083 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1084 	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1085 	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1086 	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1087 	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1088 	fcp_cmnd->task_management = 0;
1089 
1090 	/*
1091 	 * Update tagged queuing modifier if using command tag queuing
1092 	 */
1093 	if (scsi_populate_tag_msg(cmd, tag)) {
1094 		switch (tag[0]) {
1095 		case HEAD_OF_QUEUE_TAG:
1096 		    fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1097 		    break;
1098 		case ORDERED_QUEUE_TAG:
1099 		    fcp_cmnd->task_attribute = TSK_ORDERED;
1100 		    break;
1101 		default:
1102 		    fcp_cmnd->task_attribute = 0;
1103 		    break;
1104 		}
1105 	} else {
1106 		fcp_cmnd->task_attribute = 0;
1107 	}
1108 
1109 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1110 
1111 	DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1112 	    "entries %d, data bytes %d, Protection entries %d\n",
1113 	    __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1114 	    data_bytes, tot_prot_dsds));
1115 
1116 	/* Compute dif len and adjust data len to incude protection */
1117 	total_bytes = data_bytes;
1118 	dif_bytes = 0;
1119 	blk_size = cmd->device->sector_size;
1120 	if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1121 		dif_bytes = (data_bytes / blk_size) * 8;
1122 		total_bytes += dif_bytes;
1123 	}
1124 
1125 	if (!ql2xenablehba_err_chk)
1126 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1127 
1128 	if (!bundling) {
1129 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1130 	} else {
1131 		/*
1132 		 * Configure Bundling if we need to fetch interlaving
1133 		 * protection PCI accesses
1134 		 */
1135 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1136 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1137 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1138 							tot_prot_dsds);
1139 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1140 	}
1141 
1142 	/* Finish the common fields of CRC pkt */
1143 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1144 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1145 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1146 	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1147 	/* Fibre channel byte count */
1148 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1149 	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1150 	    additional_fcpcdb_len);
1151 	*fcp_dl = htonl(total_bytes);
1152 
1153 	DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 	    " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1155 	    vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1156 	    crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1157 
1158 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1159 		DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1160 		    __func__, data_bytes));
1161 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1162 		return QLA_SUCCESS;
1163 	}
1164 	/* Walks data segments */
1165 
1166 	cmd_pkt->control_flags |=
1167 	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1168 	if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1169 	    (tot_dsds - tot_prot_dsds)))
1170 		goto crc_queuing_error;
1171 
1172 	if (bundling && tot_prot_dsds) {
1173 		/* Walks dif segments */
1174 		cur_seg = scsi_prot_sglist(cmd);
1175 		cmd_pkt->control_flags |=
1176 			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1177 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1178 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1179 		    tot_prot_dsds))
1180 			goto crc_queuing_error;
1181 	}
1182 	return QLA_SUCCESS;
1183 
1184 crc_queuing_error:
1185 	DEBUG18(qla_printk(KERN_INFO, ha,
1186 	    "CMD sent FAILED crc_q error:sp = %p\n", sp));
1187 	/* Cleanup will be performed by the caller */
1188 
1189 	return QLA_FUNCTION_FAILED;
1190 }
1191 
1192 /**
1193  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1194  * @sp: command to send to the ISP
1195  *
1196  * Returns non-zero if a failure occurred, else zero.
1197  */
1198 int
qla24xx_start_scsi(srb_t * sp)1199 qla24xx_start_scsi(srb_t *sp)
1200 {
1201 	int		ret, nseg;
1202 	unsigned long   flags;
1203 	uint32_t	*clr_ptr;
1204 	uint32_t        index;
1205 	uint32_t	handle;
1206 	struct cmd_type_7 *cmd_pkt;
1207 	uint16_t	cnt;
1208 	uint16_t	req_cnt;
1209 	uint16_t	tot_dsds;
1210 	struct req_que *req = NULL;
1211 	struct rsp_que *rsp = NULL;
1212 	struct scsi_cmnd *cmd = sp->cmd;
1213 	struct scsi_qla_host *vha = sp->fcport->vha;
1214 	struct qla_hw_data *ha = vha->hw;
1215 	char		tag[2];
1216 
1217 	/* Setup device pointers. */
1218 	ret = 0;
1219 
1220 	qla25xx_set_que(sp, &rsp);
1221 	req = vha->req;
1222 
1223 	/* So we know we haven't pci_map'ed anything yet */
1224 	tot_dsds = 0;
1225 
1226 	/* Send marker if required */
1227 	if (vha->marker_needed != 0) {
1228 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
1229 							!= QLA_SUCCESS)
1230 			return QLA_FUNCTION_FAILED;
1231 		vha->marker_needed = 0;
1232 	}
1233 
1234 	/* Acquire ring specific lock */
1235 	spin_lock_irqsave(&ha->hardware_lock, flags);
1236 
1237 	/* Check for room in outstanding command list. */
1238 	handle = req->current_outstanding_cmd;
1239 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1240 		handle++;
1241 		if (handle == MAX_OUTSTANDING_COMMANDS)
1242 			handle = 1;
1243 		if (!req->outstanding_cmds[handle])
1244 			break;
1245 	}
1246 	if (index == MAX_OUTSTANDING_COMMANDS)
1247 		goto queuing_error;
1248 
1249 	/* Map the sg table so we have an accurate count of sg entries needed */
1250 	if (scsi_sg_count(cmd)) {
1251 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1252 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1253 		if (unlikely(!nseg))
1254 			goto queuing_error;
1255 	} else
1256 		nseg = 0;
1257 
1258 	tot_dsds = nseg;
1259 
1260 	req_cnt = qla24xx_calc_iocbs(tot_dsds);
1261 	if (req->cnt < (req_cnt + 2)) {
1262 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1263 
1264 		if (req->ring_index < cnt)
1265 			req->cnt = cnt - req->ring_index;
1266 		else
1267 			req->cnt = req->length -
1268 				(req->ring_index - cnt);
1269 	}
1270 	if (req->cnt < (req_cnt + 2))
1271 		goto queuing_error;
1272 
1273 	/* Build command packet. */
1274 	req->current_outstanding_cmd = handle;
1275 	req->outstanding_cmds[handle] = sp;
1276 	sp->handle = handle;
1277 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1278 	req->cnt -= req_cnt;
1279 
1280 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1281 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1282 
1283 	/* Zero out remaining portion of packet. */
1284 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1285 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1286 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1287 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1288 
1289 	/* Set NPORT-ID and LUN number*/
1290 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1291 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1292 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1293 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1294 	cmd_pkt->vp_index = sp->fcport->vp_idx;
1295 
1296 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1297 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1298 
1299 	/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1300 	if (scsi_populate_tag_msg(cmd, tag)) {
1301 		switch (tag[0]) {
1302 		case HEAD_OF_QUEUE_TAG:
1303 			cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1304 			break;
1305 		case ORDERED_QUEUE_TAG:
1306 			cmd_pkt->task = TSK_ORDERED;
1307 			break;
1308 		}
1309 	}
1310 
1311 	/* Load SCSI command packet. */
1312 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1313 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1314 
1315 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1316 
1317 	/* Build IOCB segments */
1318 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1319 
1320 	/* Set total data segment count. */
1321 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1322 	/* Specify response queue number where completion should happen */
1323 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1324 	wmb();
1325 
1326 	/* Adjust ring index. */
1327 	req->ring_index++;
1328 	if (req->ring_index == req->length) {
1329 		req->ring_index = 0;
1330 		req->ring_ptr = req->ring;
1331 	} else
1332 		req->ring_ptr++;
1333 
1334 	sp->flags |= SRB_DMA_VALID;
1335 
1336 	/* Set chip new ring index. */
1337 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1338 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1339 
1340 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1341 	if (vha->flags.process_response_queue &&
1342 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1343 		qla24xx_process_response_queue(vha, rsp);
1344 
1345 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1346 	return QLA_SUCCESS;
1347 
1348 queuing_error:
1349 	if (tot_dsds)
1350 		scsi_dma_unmap(cmd);
1351 
1352 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1353 
1354 	return QLA_FUNCTION_FAILED;
1355 }
1356 
1357 
1358 /**
1359  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1360  * @sp: command to send to the ISP
1361  *
1362  * Returns non-zero if a failure occurred, else zero.
1363  */
1364 int
qla24xx_dif_start_scsi(srb_t * sp)1365 qla24xx_dif_start_scsi(srb_t *sp)
1366 {
1367 	int			nseg;
1368 	unsigned long		flags;
1369 	uint32_t		*clr_ptr;
1370 	uint32_t		index;
1371 	uint32_t		handle;
1372 	uint16_t		cnt;
1373 	uint16_t		req_cnt = 0;
1374 	uint16_t		tot_dsds;
1375 	uint16_t		tot_prot_dsds;
1376 	uint16_t		fw_prot_opts = 0;
1377 	struct req_que		*req = NULL;
1378 	struct rsp_que		*rsp = NULL;
1379 	struct scsi_cmnd	*cmd = sp->cmd;
1380 	struct scsi_qla_host	*vha = sp->fcport->vha;
1381 	struct qla_hw_data	*ha = vha->hw;
1382 	struct cmd_type_crc_2	*cmd_pkt;
1383 	uint32_t		status = 0;
1384 
1385 #define QDSS_GOT_Q_SPACE	BIT_0
1386 
1387 	/* Only process protection or >16 cdb in this routine */
1388 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1389 		if (cmd->cmd_len <= 16)
1390 			return qla24xx_start_scsi(sp);
1391 	}
1392 
1393 	/* Setup device pointers. */
1394 
1395 	qla25xx_set_que(sp, &rsp);
1396 	req = vha->req;
1397 
1398 	/* So we know we haven't pci_map'ed anything yet */
1399 	tot_dsds = 0;
1400 
1401 	/* Send marker if required */
1402 	if (vha->marker_needed != 0) {
1403 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1404 		    QLA_SUCCESS)
1405 			return QLA_FUNCTION_FAILED;
1406 		vha->marker_needed = 0;
1407 	}
1408 
1409 	/* Acquire ring specific lock */
1410 	spin_lock_irqsave(&ha->hardware_lock, flags);
1411 
1412 	/* Check for room in outstanding command list. */
1413 	handle = req->current_outstanding_cmd;
1414 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1415 		handle++;
1416 		if (handle == MAX_OUTSTANDING_COMMANDS)
1417 			handle = 1;
1418 		if (!req->outstanding_cmds[handle])
1419 			break;
1420 	}
1421 
1422 	if (index == MAX_OUTSTANDING_COMMANDS)
1423 		goto queuing_error;
1424 
1425 	/* Compute number of required data segments */
1426 	/* Map the sg table so we have an accurate count of sg entries needed */
1427 	if (scsi_sg_count(cmd)) {
1428 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1429 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1430 		if (unlikely(!nseg))
1431 			goto queuing_error;
1432 		else
1433 			sp->flags |= SRB_DMA_VALID;
1434 	} else
1435 		nseg = 0;
1436 
1437 	/* number of required data segments */
1438 	tot_dsds = nseg;
1439 
1440 	/* Compute number of required protection segments */
1441 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1442 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1443 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1444 		if (unlikely(!nseg))
1445 			goto queuing_error;
1446 		else
1447 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1448 	} else {
1449 		nseg = 0;
1450 	}
1451 
1452 	req_cnt = 1;
1453 	/* Total Data and protection sg segment(s) */
1454 	tot_prot_dsds = nseg;
1455 	tot_dsds += nseg;
1456 	if (req->cnt < (req_cnt + 2)) {
1457 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1458 
1459 		if (req->ring_index < cnt)
1460 			req->cnt = cnt - req->ring_index;
1461 		else
1462 			req->cnt = req->length -
1463 				(req->ring_index - cnt);
1464 	}
1465 
1466 	if (req->cnt < (req_cnt + 2))
1467 		goto queuing_error;
1468 
1469 	status |= QDSS_GOT_Q_SPACE;
1470 
1471 	/* Build header part of command packet (excluding the OPCODE). */
1472 	req->current_outstanding_cmd = handle;
1473 	req->outstanding_cmds[handle] = sp;
1474 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1475 	req->cnt -= req_cnt;
1476 
1477 	/* Fill-in common area */
1478 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1479 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1480 
1481 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1482 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1483 
1484 	/* Set NPORT-ID and LUN number*/
1485 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1486 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1487 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1488 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1489 
1490 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1491 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1492 
1493 	/* Total Data and protection segment(s) */
1494 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1495 
1496 	/* Build IOCB segments and adjust for data protection segments */
1497 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1498 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1499 		QLA_SUCCESS)
1500 		goto queuing_error;
1501 
1502 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1503 	/* Specify response queue number where completion should happen */
1504 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1505 	cmd_pkt->timeout = __constant_cpu_to_le16(0);
1506 	wmb();
1507 
1508 	/* Adjust ring index. */
1509 	req->ring_index++;
1510 	if (req->ring_index == req->length) {
1511 		req->ring_index = 0;
1512 		req->ring_ptr = req->ring;
1513 	} else
1514 		req->ring_ptr++;
1515 
1516 	/* Set chip new ring index. */
1517 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1518 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1519 
1520 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1521 	if (vha->flags.process_response_queue &&
1522 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1523 		qla24xx_process_response_queue(vha, rsp);
1524 
1525 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1526 
1527 	return QLA_SUCCESS;
1528 
1529 queuing_error:
1530 	if (status & QDSS_GOT_Q_SPACE) {
1531 		req->outstanding_cmds[handle] = NULL;
1532 		req->cnt += req_cnt;
1533 	}
1534 	/* Cleanup will be performed by the caller (queuecommand) */
1535 
1536 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537 
1538 	DEBUG18(qla_printk(KERN_INFO, ha,
1539 	    "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1540 	return QLA_FUNCTION_FAILED;
1541 }
1542 
1543 
qla25xx_set_que(srb_t * sp,struct rsp_que ** rsp)1544 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1545 {
1546 	struct scsi_cmnd *cmd = sp->cmd;
1547 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1548 	int affinity = cmd->request->cpu;
1549 
1550 	if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1551 		affinity < ha->max_rsp_queues - 1)
1552 		*rsp = ha->rsp_q_map[affinity + 1];
1553 	 else
1554 		*rsp = ha->rsp_q_map[0];
1555 }
1556 
1557 /* Generic Control-SRB manipulation functions. */
1558 void *
qla2x00_alloc_iocbs(scsi_qla_host_t * vha,srb_t * sp)1559 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1560 {
1561 	struct qla_hw_data *ha = vha->hw;
1562 	struct req_que *req = ha->req_q_map[0];
1563 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1564 	uint32_t index, handle;
1565 	request_t *pkt;
1566 	uint16_t cnt, req_cnt;
1567 
1568 	pkt = NULL;
1569 	req_cnt = 1;
1570 	handle = 0;
1571 
1572 	if (!sp)
1573 		goto skip_cmd_array;
1574 
1575 	/* Check for room in outstanding command list. */
1576 	handle = req->current_outstanding_cmd;
1577 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1578 		handle++;
1579 		if (handle == MAX_OUTSTANDING_COMMANDS)
1580 			handle = 1;
1581 		if (!req->outstanding_cmds[handle])
1582 			break;
1583 	}
1584 	if (index == MAX_OUTSTANDING_COMMANDS)
1585 		goto queuing_error;
1586 
1587 	/* Prep command array. */
1588 	req->current_outstanding_cmd = handle;
1589 	req->outstanding_cmds[handle] = sp;
1590 	sp->handle = handle;
1591 
1592 skip_cmd_array:
1593 	/* Check for room on request queue. */
1594 	if (req->cnt < req_cnt) {
1595 		if (ha->mqenable)
1596 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1597 		else if (IS_QLA82XX(ha))
1598 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1599 		else if (IS_FWI2_CAPABLE(ha))
1600 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1601 		else
1602 			cnt = qla2x00_debounce_register(
1603 			    ISP_REQ_Q_OUT(ha, &reg->isp));
1604 
1605 		if  (req->ring_index < cnt)
1606 			req->cnt = cnt - req->ring_index;
1607 		else
1608 			req->cnt = req->length -
1609 			    (req->ring_index - cnt);
1610 	}
1611 	if (req->cnt < req_cnt)
1612 		goto queuing_error;
1613 
1614 	/* Prep packet */
1615 	req->cnt -= req_cnt;
1616 	pkt = req->ring_ptr;
1617 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
1618 	pkt->entry_count = req_cnt;
1619 	pkt->handle = handle;
1620 
1621 queuing_error:
1622 	return pkt;
1623 }
1624 
1625 static void
qla2x00_start_iocbs(srb_t * sp)1626 qla2x00_start_iocbs(srb_t *sp)
1627 {
1628 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1629 	struct req_que *req = ha->req_q_map[0];
1630 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1631 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1632 
1633 	if (IS_QLA82XX(ha)) {
1634 		qla82xx_start_iocbs(sp);
1635 	} else {
1636 		/* Adjust ring index. */
1637 		req->ring_index++;
1638 		if (req->ring_index == req->length) {
1639 			req->ring_index = 0;
1640 			req->ring_ptr = req->ring;
1641 		} else
1642 			req->ring_ptr++;
1643 
1644 		/* Set chip new ring index. */
1645 		if (ha->mqenable) {
1646 			WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1647 			RD_REG_DWORD(&ioreg->hccr);
1648 		} else if (IS_QLA82XX(ha)) {
1649 			qla82xx_start_iocbs(sp);
1650 		} else if (IS_FWI2_CAPABLE(ha)) {
1651 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1652 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1653 		} else {
1654 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1655 				req->ring_index);
1656 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1657 		}
1658 	}
1659 }
1660 
1661 static void
qla24xx_login_iocb(srb_t * sp,struct logio_entry_24xx * logio)1662 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1663 {
1664 	struct srb_ctx *ctx = sp->ctx;
1665 	struct srb_iocb *lio = ctx->u.iocb_cmd;
1666 
1667 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1668 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1669 	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1670 		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1671 	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1672 		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1673 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1674 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1675 	logio->port_id[1] = sp->fcport->d_id.b.area;
1676 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1677 	logio->vp_index = sp->fcport->vp_idx;
1678 }
1679 
1680 static void
qla2x00_login_iocb(srb_t * sp,struct mbx_entry * mbx)1681 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1682 {
1683 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1684 	struct srb_ctx *ctx = sp->ctx;
1685 	struct srb_iocb *lio = ctx->u.iocb_cmd;
1686 	uint16_t opts;
1687 
1688 	mbx->entry_type = MBX_IOCB_TYPE;
1689 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1690 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1691 	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1692 	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1693 	if (HAS_EXTENDED_IDS(ha)) {
1694 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1695 		mbx->mb10 = cpu_to_le16(opts);
1696 	} else {
1697 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1698 	}
1699 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1700 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1701 	    sp->fcport->d_id.b.al_pa);
1702 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1703 }
1704 
1705 static void
qla24xx_logout_iocb(srb_t * sp,struct logio_entry_24xx * logio)1706 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1707 {
1708 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1709 	logio->control_flags =
1710 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1711 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1712 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1713 	logio->port_id[1] = sp->fcport->d_id.b.area;
1714 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1715 	logio->vp_index = sp->fcport->vp_idx;
1716 }
1717 
1718 static void
qla2x00_logout_iocb(srb_t * sp,struct mbx_entry * mbx)1719 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1720 {
1721 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1722 
1723 	mbx->entry_type = MBX_IOCB_TYPE;
1724 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1725 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1726 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1727 	    cpu_to_le16(sp->fcport->loop_id):
1728 	    cpu_to_le16(sp->fcport->loop_id << 8);
1729 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1730 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1731 	    sp->fcport->d_id.b.al_pa);
1732 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1733 	/* Implicit: mbx->mbx10 = 0. */
1734 }
1735 
1736 static void
qla24xx_adisc_iocb(srb_t * sp,struct logio_entry_24xx * logio)1737 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1738 {
1739 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1740 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1741 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1742 	logio->vp_index = sp->fcport->vp_idx;
1743 }
1744 
1745 static void
qla2x00_adisc_iocb(srb_t * sp,struct mbx_entry * mbx)1746 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1747 {
1748 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1749 
1750 	mbx->entry_type = MBX_IOCB_TYPE;
1751 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1752 	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1753 	if (HAS_EXTENDED_IDS(ha)) {
1754 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1755 		mbx->mb10 = cpu_to_le16(BIT_0);
1756 	} else {
1757 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1758 	}
1759 	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1760 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1761 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1762 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1763 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1764 }
1765 
1766 static void
qla24xx_tm_iocb(srb_t * sp,struct tsk_mgmt_entry * tsk)1767 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1768 {
1769 	uint32_t flags;
1770 	unsigned int lun;
1771 	struct fc_port *fcport = sp->fcport;
1772 	scsi_qla_host_t *vha = fcport->vha;
1773 	struct qla_hw_data *ha = vha->hw;
1774 	struct srb_ctx *ctx = sp->ctx;
1775 	struct srb_iocb *iocb = ctx->u.iocb_cmd;
1776 	struct req_que *req = vha->req;
1777 
1778 	flags = iocb->u.tmf.flags;
1779 	lun = iocb->u.tmf.lun;
1780 
1781 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1782 	tsk->entry_count = 1;
1783 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1784 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1785 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1786 	tsk->control_flags = cpu_to_le32(flags);
1787 	tsk->port_id[0] = fcport->d_id.b.al_pa;
1788 	tsk->port_id[1] = fcport->d_id.b.area;
1789 	tsk->port_id[2] = fcport->d_id.b.domain;
1790 	tsk->vp_index = fcport->vp_idx;
1791 
1792 	if (flags == TCF_LUN_RESET) {
1793 		int_to_scsilun(lun, &tsk->lun);
1794 		host_to_fcp_swap((uint8_t *)&tsk->lun,
1795 			sizeof(tsk->lun));
1796 	}
1797 }
1798 
1799 static void
qla24xx_els_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)1800 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1801 {
1802 	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1803 
1804         els_iocb->entry_type = ELS_IOCB_TYPE;
1805         els_iocb->entry_count = 1;
1806         els_iocb->sys_define = 0;
1807         els_iocb->entry_status = 0;
1808         els_iocb->handle = sp->handle;
1809         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1810         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1811         els_iocb->vp_index = sp->fcport->vp_idx;
1812         els_iocb->sof_type = EST_SOFI3;
1813         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1814 
1815 	els_iocb->opcode =
1816 	    (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1817 	    bsg_job->request->rqst_data.r_els.els_code :
1818 	    bsg_job->request->rqst_data.h_els.command_code;
1819         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1820         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1821         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1822         els_iocb->control_flags = 0;
1823         els_iocb->rx_byte_count =
1824             cpu_to_le32(bsg_job->reply_payload.payload_len);
1825         els_iocb->tx_byte_count =
1826             cpu_to_le32(bsg_job->request_payload.payload_len);
1827 
1828         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1829             (bsg_job->request_payload.sg_list)));
1830         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1831             (bsg_job->request_payload.sg_list)));
1832         els_iocb->tx_len = cpu_to_le32(sg_dma_len
1833             (bsg_job->request_payload.sg_list));
1834 
1835         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1836             (bsg_job->reply_payload.sg_list)));
1837         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1838             (bsg_job->reply_payload.sg_list)));
1839         els_iocb->rx_len = cpu_to_le32(sg_dma_len
1840             (bsg_job->reply_payload.sg_list));
1841 }
1842 
1843 static void
qla2x00_ct_iocb(srb_t * sp,ms_iocb_entry_t * ct_iocb)1844 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1845 {
1846 	uint16_t        avail_dsds;
1847 	uint32_t        *cur_dsd;
1848 	struct scatterlist *sg;
1849 	int index;
1850 	uint16_t tot_dsds;
1851 	scsi_qla_host_t *vha = sp->fcport->vha;
1852 	struct qla_hw_data *ha = vha->hw;
1853 	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1854 	int loop_iterartion = 0;
1855 	int cont_iocb_prsnt = 0;
1856 	int entry_count = 1;
1857 
1858 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1859 	ct_iocb->entry_type = CT_IOCB_TYPE;
1860 	ct_iocb->entry_status = 0;
1861 	ct_iocb->handle1 = sp->handle;
1862 	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1863 	ct_iocb->status = __constant_cpu_to_le16(0);
1864 	ct_iocb->control_flags = __constant_cpu_to_le16(0);
1865 	ct_iocb->timeout = 0;
1866 	ct_iocb->cmd_dsd_count =
1867 	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1868 	ct_iocb->total_dsd_count =
1869 	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1870 	ct_iocb->req_bytecount =
1871 	    cpu_to_le32(bsg_job->request_payload.payload_len);
1872 	ct_iocb->rsp_bytecount =
1873 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
1874 
1875 	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1876 	    (bsg_job->request_payload.sg_list)));
1877 	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1878 	    (bsg_job->request_payload.sg_list)));
1879 	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1880 
1881 	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1882 	    (bsg_job->reply_payload.sg_list)));
1883 	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1884 	    (bsg_job->reply_payload.sg_list)));
1885 	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1886 
1887 	avail_dsds = 1;
1888 	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1889 	index = 0;
1890 	tot_dsds = bsg_job->reply_payload.sg_cnt;
1891 
1892 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1893 		dma_addr_t       sle_dma;
1894 		cont_a64_entry_t *cont_pkt;
1895 
1896 		/* Allocate additional continuation packets? */
1897 		if (avail_dsds == 0) {
1898 			/*
1899 			* Five DSDs are available in the Cont.
1900 			* Type 1 IOCB.
1901 			       */
1902 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1903 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1904 			avail_dsds = 5;
1905 			cont_iocb_prsnt = 1;
1906 			entry_count++;
1907 		}
1908 
1909 		sle_dma = sg_dma_address(sg);
1910 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1911 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1912 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1913 		loop_iterartion++;
1914 		avail_dsds--;
1915 	}
1916 	ct_iocb->entry_count = entry_count;
1917 }
1918 
1919 static void
qla24xx_ct_iocb(srb_t * sp,struct ct_entry_24xx * ct_iocb)1920 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1921 {
1922 	uint16_t        avail_dsds;
1923 	uint32_t        *cur_dsd;
1924 	struct scatterlist *sg;
1925 	int index;
1926 	uint16_t tot_dsds;
1927         scsi_qla_host_t *vha = sp->fcport->vha;
1928 	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1929 	int loop_iterartion = 0;
1930 	int cont_iocb_prsnt = 0;
1931 	int entry_count = 1;
1932 
1933 	ct_iocb->entry_type = CT_IOCB_TYPE;
1934         ct_iocb->entry_status = 0;
1935         ct_iocb->sys_define = 0;
1936         ct_iocb->handle = sp->handle;
1937 
1938 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1939 	ct_iocb->vp_index = sp->fcport->vp_idx;
1940         ct_iocb->comp_status = __constant_cpu_to_le16(0);
1941 
1942 	ct_iocb->cmd_dsd_count =
1943             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1944         ct_iocb->timeout = 0;
1945         ct_iocb->rsp_dsd_count =
1946             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1947         ct_iocb->rsp_byte_count =
1948             cpu_to_le32(bsg_job->reply_payload.payload_len);
1949         ct_iocb->cmd_byte_count =
1950             cpu_to_le32(bsg_job->request_payload.payload_len);
1951         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1952             (bsg_job->request_payload.sg_list)));
1953         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1954            (bsg_job->request_payload.sg_list)));
1955         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1956             (bsg_job->request_payload.sg_list));
1957 
1958 	avail_dsds = 1;
1959 	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1960 	index = 0;
1961 	tot_dsds = bsg_job->reply_payload.sg_cnt;
1962 
1963 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1964 		dma_addr_t       sle_dma;
1965 		cont_a64_entry_t *cont_pkt;
1966 
1967 		/* Allocate additional continuation packets? */
1968 		if (avail_dsds == 0) {
1969 			/*
1970 			* Five DSDs are available in the Cont.
1971 			* Type 1 IOCB.
1972 			       */
1973 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1974 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1975 			avail_dsds = 5;
1976 			cont_iocb_prsnt = 1;
1977 			entry_count++;
1978 		}
1979 
1980 		sle_dma = sg_dma_address(sg);
1981 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1982 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1983 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1984 		loop_iterartion++;
1985 		avail_dsds--;
1986 	}
1987         ct_iocb->entry_count = entry_count;
1988 }
1989 
1990 int
qla2x00_start_sp(srb_t * sp)1991 qla2x00_start_sp(srb_t *sp)
1992 {
1993 	int rval;
1994 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1995 	void *pkt;
1996 	struct srb_ctx *ctx = sp->ctx;
1997 	unsigned long flags;
1998 
1999 	rval = QLA_FUNCTION_FAILED;
2000 	spin_lock_irqsave(&ha->hardware_lock, flags);
2001 	pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2002 	if (!pkt)
2003 		goto done;
2004 
2005 	rval = QLA_SUCCESS;
2006 	switch (ctx->type) {
2007 	case SRB_LOGIN_CMD:
2008 		IS_FWI2_CAPABLE(ha) ?
2009 		    qla24xx_login_iocb(sp, pkt) :
2010 		    qla2x00_login_iocb(sp, pkt);
2011 		break;
2012 	case SRB_LOGOUT_CMD:
2013 		IS_FWI2_CAPABLE(ha) ?
2014 		    qla24xx_logout_iocb(sp, pkt) :
2015 		    qla2x00_logout_iocb(sp, pkt);
2016 		break;
2017 	case SRB_ELS_CMD_RPT:
2018 	case SRB_ELS_CMD_HST:
2019 		qla24xx_els_iocb(sp, pkt);
2020 		break;
2021 	case SRB_CT_CMD:
2022 		IS_FWI2_CAPABLE(ha) ?
2023 		qla24xx_ct_iocb(sp, pkt) :
2024 		qla2x00_ct_iocb(sp, pkt);
2025 		break;
2026 	case SRB_ADISC_CMD:
2027 		IS_FWI2_CAPABLE(ha) ?
2028 		    qla24xx_adisc_iocb(sp, pkt) :
2029 		    qla2x00_adisc_iocb(sp, pkt);
2030 		break;
2031 	case SRB_TM_CMD:
2032 		qla24xx_tm_iocb(sp, pkt);
2033 		break;
2034 	default:
2035 		break;
2036 	}
2037 
2038 	wmb();
2039 	qla2x00_start_iocbs(sp);
2040 done:
2041 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2042 	return rval;
2043 }
2044