1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla25xx_set_que(srb_t *, struct rsp_que **);
15 /**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21 static inline uint16_t
qla2x00_get_cmd_direction(srb_t * sp)22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26
27 cflags = 0;
28
29 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31 cflags = CF_WRITE;
32 sp->fcport->vha->hw->qla_stats.output_bytes +=
33 scsi_bufflen(cmd);
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35 cflags = CF_READ;
36 sp->fcport->vha->hw->qla_stats.input_bytes +=
37 scsi_bufflen(cmd);
38 }
39 return (cflags);
40 }
41
42 /**
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
45 *
46 * @dsds: number of data segment decriptors needed
47 *
48 * Returns the number of IOCB entries needed to store @dsds.
49 */
50 uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53 uint16_t iocbs;
54
55 iocbs = 1;
56 if (dsds > 3) {
57 iocbs += (dsds - 3) / 7;
58 if ((dsds - 3) % 7)
59 iocbs++;
60 }
61 return (iocbs);
62 }
63
64 /**
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
67 *
68 * @dsds: number of data segment decriptors needed
69 *
70 * Returns the number of IOCB entries needed to store @dsds.
71 */
72 uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75 uint16_t iocbs;
76
77 iocbs = 1;
78 if (dsds > 2) {
79 iocbs += (dsds - 2) / 5;
80 if ((dsds - 2) % 5)
81 iocbs++;
82 }
83 return (iocbs);
84 }
85
86 /**
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88 * @ha: HA context
89 *
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 */
92 static inline cont_entry_t *
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host * vha)93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
98 req->ring_index++;
99 if (req->ring_index == req->length) {
100 req->ring_index = 0;
101 req->ring_ptr = req->ring;
102 } else {
103 req->ring_ptr++;
104 }
105
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
111
112 return (cont_pkt);
113 }
114
115 /**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121 static inline cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t * vha,struct req_que * req)122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
131 } else {
132 req->ring_ptr++;
133 }
134
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140
141 return (cont_pkt);
142 }
143
144 static inline int
qla24xx_configure_prot_mode(srb_t * sp,uint16_t * fw_prot_opts)145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146 {
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
149
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0;
159
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(cmd)) {
162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 break;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 break;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170 break;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 break;
183 }
184
185 return scsi_prot_sg_count(cmd);
186 }
187
188 /*
189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
191 *
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
195 */
qla2x00_build_scsi_iocbs_32(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 uint16_t tot_dsds)
198 {
199 uint16_t avail_dsds;
200 uint32_t *cur_dsd;
201 scsi_qla_host_t *vha;
202 struct scsi_cmnd *cmd;
203 struct scatterlist *sg;
204 int i;
205
206 cmd = GET_CMD_SP(sp);
207
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
211
212 /* No data transfer */
213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 return;
216 }
217
218 vha = sp->fcport->vha;
219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221 /* Three DSDs are available in the Command Type 2 IOCB */
222 avail_dsds = 3;
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225 /* Load data segments */
226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
228
229 /* Allocate additional continuation packets? */
230 if (avail_dsds == 0) {
231 /*
232 * Seven DSDs are available in the Continuation
233 * Type 0 IOCB.
234 */
235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 avail_dsds = 7;
238 }
239
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 avail_dsds--;
243 }
244 }
245
246 /**
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
249 *
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
253 */
qla2x00_build_scsi_iocbs_64(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 uint16_t tot_dsds)
256 {
257 uint16_t avail_dsds;
258 uint32_t *cur_dsd;
259 scsi_qla_host_t *vha;
260 struct scsi_cmnd *cmd;
261 struct scatterlist *sg;
262 int i;
263
264 cmd = GET_CMD_SP(sp);
265
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270 /* No data transfer */
271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 return;
274 }
275
276 vha = sp->fcport->vha;
277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279 /* Two DSDs are available in the Command Type 3 IOCB */
280 avail_dsds = 2;
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283 /* Load data segments */
284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 dma_addr_t sle_dma;
286 cont_a64_entry_t *cont_pkt;
287
288 /* Allocate additional continuation packets? */
289 if (avail_dsds == 0) {
290 /*
291 * Five DSDs are available in the Continuation
292 * Type 1 IOCB.
293 */
294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 avail_dsds = 5;
297 }
298
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 avail_dsds--;
304 }
305 }
306
307 /**
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
310 *
311 * Returns non-zero if a failure occurred, else zero.
312 */
313 int
qla2x00_start_scsi(srb_t * sp)314 qla2x00_start_scsi(srb_t *sp)
315 {
316 int ret, nseg;
317 unsigned long flags;
318 scsi_qla_host_t *vha;
319 struct scsi_cmnd *cmd;
320 uint32_t *clr_ptr;
321 uint32_t index;
322 uint32_t handle;
323 cmd_entry_t *cmd_pkt;
324 uint16_t cnt;
325 uint16_t req_cnt;
326 uint16_t tot_dsds;
327 struct device_reg_2xxx __iomem *reg;
328 struct qla_hw_data *ha;
329 struct req_que *req;
330 struct rsp_que *rsp;
331 char tag[2];
332
333 /* Setup device pointers. */
334 ret = 0;
335 vha = sp->fcport->vha;
336 ha = vha->hw;
337 reg = &ha->iobase->isp;
338 cmd = GET_CMD_SP(sp);
339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
341 /* So we know we haven't pci_map'ed anything yet */
342 tot_dsds = 0;
343
344 /* Send marker if required */
345 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 QLA_SUCCESS) {
348 return (QLA_FUNCTION_FAILED);
349 }
350 vha->marker_needed = 0;
351 }
352
353 /* Acquire ring specific lock */
354 spin_lock_irqsave(&ha->hardware_lock, flags);
355
356 /* Check for room in outstanding command list. */
357 handle = req->current_outstanding_cmd;
358 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359 handle++;
360 if (handle == MAX_OUTSTANDING_COMMANDS)
361 handle = 1;
362 if (!req->outstanding_cmds[handle])
363 break;
364 }
365 if (index == MAX_OUTSTANDING_COMMANDS)
366 goto queuing_error;
367
368 /* Map the sg table so we have an accurate count of sg entries needed */
369 if (scsi_sg_count(cmd)) {
370 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371 scsi_sg_count(cmd), cmd->sc_data_direction);
372 if (unlikely(!nseg))
373 goto queuing_error;
374 } else
375 nseg = 0;
376
377 tot_dsds = nseg;
378
379 /* Calculate the number of request entries needed. */
380 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
381 if (req->cnt < (req_cnt + 2)) {
382 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
383 if (req->ring_index < cnt)
384 req->cnt = cnt - req->ring_index;
385 else
386 req->cnt = req->length -
387 (req->ring_index - cnt);
388 }
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
391
392 /* Build command packet */
393 req->current_outstanding_cmd = handle;
394 req->outstanding_cmds[handle] = sp;
395 sp->handle = handle;
396 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
397 req->cnt -= req_cnt;
398
399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
400 cmd_pkt->handle = handle;
401 /* Zero out remaining portion of packet. */
402 clr_ptr = (uint32_t *)cmd_pkt + 2;
403 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
404 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
405
406 /* Set target ID and LUN number*/
407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
408 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
409
410 /* Update tagged queuing modifier */
411 if (scsi_populate_tag_msg(cmd, tag)) {
412 switch (tag[0]) {
413 case HEAD_OF_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_HEAD_TAG);
416 break;
417 case ORDERED_QUEUE_TAG:
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_ORDERED_TAG);
420 break;
421 default:
422 cmd_pkt->control_flags =
423 __constant_cpu_to_le16(CF_SIMPLE_TAG);
424 break;
425 }
426 } else {
427 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
428 }
429
430 /* Load SCSI command packet. */
431 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
432 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
433
434 /* Build IOCB segments */
435 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
436
437 /* Set total data segment count. */
438 cmd_pkt->entry_count = (uint8_t)req_cnt;
439 wmb();
440
441 /* Adjust ring index. */
442 req->ring_index++;
443 if (req->ring_index == req->length) {
444 req->ring_index = 0;
445 req->ring_ptr = req->ring;
446 } else
447 req->ring_ptr++;
448
449 sp->flags |= SRB_DMA_VALID;
450
451 /* Set chip new ring index. */
452 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
453 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
454
455 /* Manage unprocessed RIO/ZIO commands in response queue. */
456 if (vha->flags.process_response_queue &&
457 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
458 qla2x00_process_response_queue(rsp);
459
460 spin_unlock_irqrestore(&ha->hardware_lock, flags);
461 return (QLA_SUCCESS);
462
463 queuing_error:
464 if (tot_dsds)
465 scsi_dma_unmap(cmd);
466
467 spin_unlock_irqrestore(&ha->hardware_lock, flags);
468
469 return (QLA_FUNCTION_FAILED);
470 }
471
472 /**
473 * qla2x00_start_iocbs() - Execute the IOCB command
474 */
475 static void
qla2x00_start_iocbs(struct scsi_qla_host * vha,struct req_que * req)476 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
477 {
478 struct qla_hw_data *ha = vha->hw;
479 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
480
481 if (IS_QLA82XX(ha)) {
482 qla82xx_start_iocbs(vha);
483 } else {
484 /* Adjust ring index. */
485 req->ring_index++;
486 if (req->ring_index == req->length) {
487 req->ring_index = 0;
488 req->ring_ptr = req->ring;
489 } else
490 req->ring_ptr++;
491
492 /* Set chip new ring index. */
493 if (ha->mqenable || IS_QLA83XX(ha)) {
494 WRT_REG_DWORD(req->req_q_in, req->ring_index);
495 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
496 } else if (IS_FWI2_CAPABLE(ha)) {
497 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
498 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
499 } else {
500 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
501 req->ring_index);
502 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
503 }
504 }
505 }
506
507 /**
508 * qla2x00_marker() - Send a marker IOCB to the firmware.
509 * @ha: HA context
510 * @loop_id: loop ID
511 * @lun: LUN
512 * @type: marker modifier
513 *
514 * Can be called from both normal and interrupt context.
515 *
516 * Returns non-zero if a failure occurred, else zero.
517 */
518 static int
__qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint16_t lun,uint8_t type)519 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
520 struct rsp_que *rsp, uint16_t loop_id,
521 uint16_t lun, uint8_t type)
522 {
523 mrk_entry_t *mrk;
524 struct mrk_entry_24xx *mrk24;
525 struct qla_hw_data *ha = vha->hw;
526 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
527
528 mrk24 = NULL;
529 req = ha->req_q_map[0];
530 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
531 if (mrk == NULL) {
532 ql_log(ql_log_warn, base_vha, 0x3026,
533 "Failed to allocate Marker IOCB.\n");
534
535 return (QLA_FUNCTION_FAILED);
536 }
537
538 mrk->entry_type = MARKER_TYPE;
539 mrk->modifier = type;
540 if (type != MK_SYNC_ALL) {
541 if (IS_FWI2_CAPABLE(ha)) {
542 mrk24 = (struct mrk_entry_24xx *) mrk;
543 mrk24->nport_handle = cpu_to_le16(loop_id);
544 mrk24->lun[1] = LSB(lun);
545 mrk24->lun[2] = MSB(lun);
546 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
547 mrk24->vp_index = vha->vp_idx;
548 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
549 } else {
550 SET_TARGET_ID(ha, mrk->target, loop_id);
551 mrk->lun = cpu_to_le16(lun);
552 }
553 }
554 wmb();
555
556 qla2x00_start_iocbs(vha, req);
557
558 return (QLA_SUCCESS);
559 }
560
561 int
qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint16_t lun,uint8_t type)562 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
563 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
564 uint8_t type)
565 {
566 int ret;
567 unsigned long flags = 0;
568
569 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
570 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
571 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
572
573 return (ret);
574 }
575
576 /**
577 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
578 * Continuation Type 1 IOCBs to allocate.
579 *
580 * @dsds: number of data segment decriptors needed
581 *
582 * Returns the number of IOCB entries needed to store @dsds.
583 */
584 inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t * vha,uint16_t dsds)585 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
586 {
587 uint16_t iocbs;
588
589 iocbs = 1;
590 if (dsds > 1) {
591 iocbs += (dsds - 1) / 5;
592 if ((dsds - 1) % 5)
593 iocbs++;
594 }
595 return iocbs;
596 }
597
598 static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t * sp,struct cmd_type_6 * cmd_pkt,uint16_t tot_dsds)599 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
600 uint16_t tot_dsds)
601 {
602 uint32_t *cur_dsd = NULL;
603 scsi_qla_host_t *vha;
604 struct qla_hw_data *ha;
605 struct scsi_cmnd *cmd;
606 struct scatterlist *cur_seg;
607 uint32_t *dsd_seg;
608 void *next_dsd;
609 uint8_t avail_dsds;
610 uint8_t first_iocb = 1;
611 uint32_t dsd_list_len;
612 struct dsd_dma *dsd_ptr;
613 struct ct6_dsd *ctx;
614
615 cmd = GET_CMD_SP(sp);
616
617 /* Update entry type to indicate Command Type 3 IOCB */
618 *((uint32_t *)(&cmd_pkt->entry_type)) =
619 __constant_cpu_to_le32(COMMAND_TYPE_6);
620
621 /* No data transfer */
622 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
623 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
624 return 0;
625 }
626
627 vha = sp->fcport->vha;
628 ha = vha->hw;
629
630 /* Set transfer direction */
631 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
632 cmd_pkt->control_flags =
633 __constant_cpu_to_le16(CF_WRITE_DATA);
634 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
635 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
636 cmd_pkt->control_flags =
637 __constant_cpu_to_le16(CF_READ_DATA);
638 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
639 }
640
641 cur_seg = scsi_sglist(cmd);
642 ctx = GET_CMD_CTX_SP(sp);
643
644 while (tot_dsds) {
645 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
646 QLA_DSDS_PER_IOCB : tot_dsds;
647 tot_dsds -= avail_dsds;
648 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
649
650 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
651 struct dsd_dma, list);
652 next_dsd = dsd_ptr->dsd_addr;
653 list_del(&dsd_ptr->list);
654 ha->gbl_dsd_avail--;
655 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
656 ctx->dsd_use_cnt++;
657 ha->gbl_dsd_inuse++;
658
659 if (first_iocb) {
660 first_iocb = 0;
661 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
662 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
663 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
664 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
665 } else {
666 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
667 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
668 *cur_dsd++ = cpu_to_le32(dsd_list_len);
669 }
670 cur_dsd = (uint32_t *)next_dsd;
671 while (avail_dsds) {
672 dma_addr_t sle_dma;
673
674 sle_dma = sg_dma_address(cur_seg);
675 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
676 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
677 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
678 cur_seg = sg_next(cur_seg);
679 avail_dsds--;
680 }
681 }
682
683 /* Null termination */
684 *cur_dsd++ = 0;
685 *cur_dsd++ = 0;
686 *cur_dsd++ = 0;
687 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
688 return 0;
689 }
690
691 /*
692 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
693 * for Command Type 6.
694 *
695 * @dsds: number of data segment decriptors needed
696 *
697 * Returns the number of dsd list needed to store @dsds.
698 */
699 inline uint16_t
qla24xx_calc_dsd_lists(uint16_t dsds)700 qla24xx_calc_dsd_lists(uint16_t dsds)
701 {
702 uint16_t dsd_lists = 0;
703
704 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
705 if (dsds % QLA_DSDS_PER_IOCB)
706 dsd_lists++;
707 return dsd_lists;
708 }
709
710
711 /**
712 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
713 * IOCB types.
714 *
715 * @sp: SRB command to process
716 * @cmd_pkt: Command type 3 IOCB
717 * @tot_dsds: Total number of segments to transfer
718 */
719 inline void
qla24xx_build_scsi_iocbs(srb_t * sp,struct cmd_type_7 * cmd_pkt,uint16_t tot_dsds)720 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
721 uint16_t tot_dsds)
722 {
723 uint16_t avail_dsds;
724 uint32_t *cur_dsd;
725 scsi_qla_host_t *vha;
726 struct scsi_cmnd *cmd;
727 struct scatterlist *sg;
728 int i;
729 struct req_que *req;
730
731 cmd = GET_CMD_SP(sp);
732
733 /* Update entry type to indicate Command Type 3 IOCB */
734 *((uint32_t *)(&cmd_pkt->entry_type)) =
735 __constant_cpu_to_le32(COMMAND_TYPE_7);
736
737 /* No data transfer */
738 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
739 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
740 return;
741 }
742
743 vha = sp->fcport->vha;
744 req = vha->req;
745
746 /* Set transfer direction */
747 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
748 cmd_pkt->task_mgmt_flags =
749 __constant_cpu_to_le16(TMF_WRITE_DATA);
750 sp->fcport->vha->hw->qla_stats.output_bytes +=
751 scsi_bufflen(cmd);
752 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
753 cmd_pkt->task_mgmt_flags =
754 __constant_cpu_to_le16(TMF_READ_DATA);
755 sp->fcport->vha->hw->qla_stats.input_bytes +=
756 scsi_bufflen(cmd);
757 }
758
759 /* One DSD is available in the Command Type 3 IOCB */
760 avail_dsds = 1;
761 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
762
763 /* Load data segments */
764
765 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
766 dma_addr_t sle_dma;
767 cont_a64_entry_t *cont_pkt;
768
769 /* Allocate additional continuation packets? */
770 if (avail_dsds == 0) {
771 /*
772 * Five DSDs are available in the Continuation
773 * Type 1 IOCB.
774 */
775 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
776 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
777 avail_dsds = 5;
778 }
779
780 sle_dma = sg_dma_address(sg);
781 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
782 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
783 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
784 avail_dsds--;
785 }
786 }
787
788 struct fw_dif_context {
789 uint32_t ref_tag;
790 uint16_t app_tag;
791 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
792 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
793 };
794
795 /*
796 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
797 *
798 */
799 static inline void
qla24xx_set_t10dif_tags(srb_t * sp,struct fw_dif_context * pkt,unsigned int protcnt)800 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
801 unsigned int protcnt)
802 {
803 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
804 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
805
806 switch (scsi_get_prot_type(cmd)) {
807 case SCSI_PROT_DIF_TYPE0:
808 /*
809 * No check for ql2xenablehba_err_chk, as it would be an
810 * I/O error if hba tag generation is not done.
811 */
812 pkt->ref_tag = cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd)));
814
815 if (!qla2x00_hba_err_chk_enabled(sp))
816 break;
817
818 pkt->ref_tag_mask[0] = 0xff;
819 pkt->ref_tag_mask[1] = 0xff;
820 pkt->ref_tag_mask[2] = 0xff;
821 pkt->ref_tag_mask[3] = 0xff;
822 break;
823
824 /*
825 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
826 * match LBA in CDB + N
827 */
828 case SCSI_PROT_DIF_TYPE2:
829 pkt->app_tag = __constant_cpu_to_le16(0);
830 pkt->app_tag_mask[0] = 0x0;
831 pkt->app_tag_mask[1] = 0x0;
832
833 pkt->ref_tag = cpu_to_le32((uint32_t)
834 (0xffffffff & scsi_get_lba(cmd)));
835
836 if (!qla2x00_hba_err_chk_enabled(sp))
837 break;
838
839 /* enable ALL bytes of the ref tag */
840 pkt->ref_tag_mask[0] = 0xff;
841 pkt->ref_tag_mask[1] = 0xff;
842 pkt->ref_tag_mask[2] = 0xff;
843 pkt->ref_tag_mask[3] = 0xff;
844 break;
845
846 /* For Type 3 protection: 16 bit GUARD only */
847 case SCSI_PROT_DIF_TYPE3:
848 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
849 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
850 0x00;
851 break;
852
853 /*
854 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
855 * 16 bit app tag.
856 */
857 case SCSI_PROT_DIF_TYPE1:
858 pkt->ref_tag = cpu_to_le32((uint32_t)
859 (0xffffffff & scsi_get_lba(cmd)));
860 pkt->app_tag = __constant_cpu_to_le16(0);
861 pkt->app_tag_mask[0] = 0x0;
862 pkt->app_tag_mask[1] = 0x0;
863
864 if (!qla2x00_hba_err_chk_enabled(sp))
865 break;
866
867 /* enable ALL bytes of the ref tag */
868 pkt->ref_tag_mask[0] = 0xff;
869 pkt->ref_tag_mask[1] = 0xff;
870 pkt->ref_tag_mask[2] = 0xff;
871 pkt->ref_tag_mask[3] = 0xff;
872 break;
873 }
874
875 ql_dbg(ql_dbg_io, vha, 0x3009,
876 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
877 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
878 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
879 scsi_get_prot_type(cmd), cmd);
880 }
881
882 struct qla2_sgx {
883 dma_addr_t dma_addr; /* OUT */
884 uint32_t dma_len; /* OUT */
885
886 uint32_t tot_bytes; /* IN */
887 struct scatterlist *cur_sg; /* IN */
888
889 /* for book keeping, bzero on initial invocation */
890 uint32_t bytes_consumed;
891 uint32_t num_bytes;
892 uint32_t tot_partial;
893
894 /* for debugging */
895 uint32_t num_sg;
896 srb_t *sp;
897 };
898
899 static int
qla24xx_get_one_block_sg(uint32_t blk_sz,struct qla2_sgx * sgx,uint32_t * partial)900 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
901 uint32_t *partial)
902 {
903 struct scatterlist *sg;
904 uint32_t cumulative_partial, sg_len;
905 dma_addr_t sg_dma_addr;
906
907 if (sgx->num_bytes == sgx->tot_bytes)
908 return 0;
909
910 sg = sgx->cur_sg;
911 cumulative_partial = sgx->tot_partial;
912
913 sg_dma_addr = sg_dma_address(sg);
914 sg_len = sg_dma_len(sg);
915
916 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
917
918 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
919 sgx->dma_len = (blk_sz - cumulative_partial);
920 sgx->tot_partial = 0;
921 sgx->num_bytes += blk_sz;
922 *partial = 0;
923 } else {
924 sgx->dma_len = sg_len - sgx->bytes_consumed;
925 sgx->tot_partial += sgx->dma_len;
926 *partial = 1;
927 }
928
929 sgx->bytes_consumed += sgx->dma_len;
930
931 if (sg_len == sgx->bytes_consumed) {
932 sg = sg_next(sg);
933 sgx->num_sg++;
934 sgx->cur_sg = sg;
935 sgx->bytes_consumed = 0;
936 }
937
938 return 1;
939 }
940
941 static int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)942 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
943 uint32_t *dsd, uint16_t tot_dsds)
944 {
945 void *next_dsd;
946 uint8_t avail_dsds = 0;
947 uint32_t dsd_list_len;
948 struct dsd_dma *dsd_ptr;
949 struct scatterlist *sg_prot;
950 uint32_t *cur_dsd = dsd;
951 uint16_t used_dsds = tot_dsds;
952
953 uint32_t prot_int;
954 uint32_t partial;
955 struct qla2_sgx sgx;
956 dma_addr_t sle_dma;
957 uint32_t sle_dma_len, tot_prot_dma_len = 0;
958 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
959
960 prot_int = cmd->device->sector_size;
961
962 memset(&sgx, 0, sizeof(struct qla2_sgx));
963 sgx.tot_bytes = scsi_bufflen(cmd);
964 sgx.cur_sg = scsi_sglist(cmd);
965 sgx.sp = sp;
966
967 sg_prot = scsi_prot_sglist(cmd);
968
969 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
970
971 sle_dma = sgx.dma_addr;
972 sle_dma_len = sgx.dma_len;
973 alloc_and_fill:
974 /* Allocate additional continuation packets? */
975 if (avail_dsds == 0) {
976 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
977 QLA_DSDS_PER_IOCB : used_dsds;
978 dsd_list_len = (avail_dsds + 1) * 12;
979 used_dsds -= avail_dsds;
980
981 /* allocate tracking DS */
982 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
983 if (!dsd_ptr)
984 return 1;
985
986 /* allocate new list */
987 dsd_ptr->dsd_addr = next_dsd =
988 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
989 &dsd_ptr->dsd_list_dma);
990
991 if (!next_dsd) {
992 /*
993 * Need to cleanup only this dsd_ptr, rest
994 * will be done by sp_free_dma()
995 */
996 kfree(dsd_ptr);
997 return 1;
998 }
999
1000 list_add_tail(&dsd_ptr->list,
1001 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1002
1003 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1004
1005 /* add new list to cmd iocb or last list */
1006 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1007 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1008 *cur_dsd++ = dsd_list_len;
1009 cur_dsd = (uint32_t *)next_dsd;
1010 }
1011 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1012 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1013 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1014 avail_dsds--;
1015
1016 if (partial == 0) {
1017 /* Got a full protection interval */
1018 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1019 sle_dma_len = 8;
1020
1021 tot_prot_dma_len += sle_dma_len;
1022 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1023 tot_prot_dma_len = 0;
1024 sg_prot = sg_next(sg_prot);
1025 }
1026
1027 partial = 1; /* So as to not re-enter this block */
1028 goto alloc_and_fill;
1029 }
1030 }
1031 /* Null termination */
1032 *cur_dsd++ = 0;
1033 *cur_dsd++ = 0;
1034 *cur_dsd++ = 0;
1035 return 0;
1036 }
1037
1038 static int
qla24xx_walk_and_build_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)1039 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1040 uint16_t tot_dsds)
1041 {
1042 void *next_dsd;
1043 uint8_t avail_dsds = 0;
1044 uint32_t dsd_list_len;
1045 struct dsd_dma *dsd_ptr;
1046 struct scatterlist *sg;
1047 uint32_t *cur_dsd = dsd;
1048 int i;
1049 uint16_t used_dsds = tot_dsds;
1050 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1051 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1052
1053 uint8_t *cp;
1054
1055 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1056 dma_addr_t sle_dma;
1057
1058 /* Allocate additional continuation packets? */
1059 if (avail_dsds == 0) {
1060 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1061 QLA_DSDS_PER_IOCB : used_dsds;
1062 dsd_list_len = (avail_dsds + 1) * 12;
1063 used_dsds -= avail_dsds;
1064
1065 /* allocate tracking DS */
1066 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1067 if (!dsd_ptr)
1068 return 1;
1069
1070 /* allocate new list */
1071 dsd_ptr->dsd_addr = next_dsd =
1072 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1073 &dsd_ptr->dsd_list_dma);
1074
1075 if (!next_dsd) {
1076 /*
1077 * Need to cleanup only this dsd_ptr, rest
1078 * will be done by sp_free_dma()
1079 */
1080 kfree(dsd_ptr);
1081 return 1;
1082 }
1083
1084 list_add_tail(&dsd_ptr->list,
1085 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1086
1087 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1088
1089 /* add new list to cmd iocb or last list */
1090 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1091 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1092 *cur_dsd++ = dsd_list_len;
1093 cur_dsd = (uint32_t *)next_dsd;
1094 }
1095 sle_dma = sg_dma_address(sg);
1096 ql_dbg(ql_dbg_io, vha, 0x300a,
1097 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1098 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1099 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1100 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1101 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1102 avail_dsds--;
1103
1104 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1105 cp = page_address(sg_page(sg)) + sg->offset;
1106 ql_dbg(ql_dbg_io, vha, 0x300b,
1107 "User data buffer=%p for cmd=%p.\n", cp, cmd);
1108 }
1109 }
1110 /* Null termination */
1111 *cur_dsd++ = 0;
1112 *cur_dsd++ = 0;
1113 *cur_dsd++ = 0;
1114 return 0;
1115 }
1116
1117 static int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)1118 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1119 uint32_t *dsd,
1120 uint16_t tot_dsds)
1121 {
1122 void *next_dsd;
1123 uint8_t avail_dsds = 0;
1124 uint32_t dsd_list_len;
1125 struct dsd_dma *dsd_ptr;
1126 struct scatterlist *sg;
1127 int i;
1128 struct scsi_cmnd *cmd;
1129 uint32_t *cur_dsd = dsd;
1130 uint16_t used_dsds = tot_dsds;
1131 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1132 uint8_t *cp;
1133
1134 cmd = GET_CMD_SP(sp);
1135 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1136 dma_addr_t sle_dma;
1137
1138 /* Allocate additional continuation packets? */
1139 if (avail_dsds == 0) {
1140 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1141 QLA_DSDS_PER_IOCB : used_dsds;
1142 dsd_list_len = (avail_dsds + 1) * 12;
1143 used_dsds -= avail_dsds;
1144
1145 /* allocate tracking DS */
1146 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1147 if (!dsd_ptr)
1148 return 1;
1149
1150 /* allocate new list */
1151 dsd_ptr->dsd_addr = next_dsd =
1152 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1153 &dsd_ptr->dsd_list_dma);
1154
1155 if (!next_dsd) {
1156 /*
1157 * Need to cleanup only this dsd_ptr, rest
1158 * will be done by sp_free_dma()
1159 */
1160 kfree(dsd_ptr);
1161 return 1;
1162 }
1163
1164 list_add_tail(&dsd_ptr->list,
1165 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1166
1167 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1168
1169 /* add new list to cmd iocb or last list */
1170 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1171 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1172 *cur_dsd++ = dsd_list_len;
1173 cur_dsd = (uint32_t *)next_dsd;
1174 }
1175 sle_dma = sg_dma_address(sg);
1176 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1177 ql_dbg(ql_dbg_io, vha, 0x3027,
1178 "%s(): %p, sg_entry %d - "
1179 "addr=0x%x0x%x, len=%d.\n",
1180 __func__, cur_dsd, i,
1181 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1182 }
1183 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1184 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1185 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1186
1187 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1188 cp = page_address(sg_page(sg)) + sg->offset;
1189 ql_dbg(ql_dbg_io, vha, 0x3028,
1190 "%s(): Protection Data buffer = %p.\n", __func__,
1191 cp);
1192 }
1193 avail_dsds--;
1194 }
1195 /* Null termination */
1196 *cur_dsd++ = 0;
1197 *cur_dsd++ = 0;
1198 *cur_dsd++ = 0;
1199 return 0;
1200 }
1201
1202 /**
1203 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1204 * Type 6 IOCB types.
1205 *
1206 * @sp: SRB command to process
1207 * @cmd_pkt: Command type 3 IOCB
1208 * @tot_dsds: Total number of segments to transfer
1209 */
1210 static inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t * sp,struct cmd_type_crc_2 * cmd_pkt,uint16_t tot_dsds,uint16_t tot_prot_dsds,uint16_t fw_prot_opts)1211 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1212 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1213 {
1214 uint32_t *cur_dsd, *fcp_dl;
1215 scsi_qla_host_t *vha;
1216 struct scsi_cmnd *cmd;
1217 struct scatterlist *cur_seg;
1218 int sgc;
1219 uint32_t total_bytes = 0;
1220 uint32_t data_bytes;
1221 uint32_t dif_bytes;
1222 uint8_t bundling = 1;
1223 uint16_t blk_size;
1224 uint8_t *clr_ptr;
1225 struct crc_context *crc_ctx_pkt = NULL;
1226 struct qla_hw_data *ha;
1227 uint8_t additional_fcpcdb_len;
1228 uint16_t fcp_cmnd_len;
1229 struct fcp_cmnd *fcp_cmnd;
1230 dma_addr_t crc_ctx_dma;
1231 char tag[2];
1232
1233 cmd = GET_CMD_SP(sp);
1234
1235 sgc = 0;
1236 /* Update entry type to indicate Command Type CRC_2 IOCB */
1237 *((uint32_t *)(&cmd_pkt->entry_type)) =
1238 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1239
1240 vha = sp->fcport->vha;
1241 ha = vha->hw;
1242
1243 /* No data transfer */
1244 data_bytes = scsi_bufflen(cmd);
1245 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1246 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1247 return QLA_SUCCESS;
1248 }
1249
1250 cmd_pkt->vp_index = sp->fcport->vp_idx;
1251
1252 /* Set transfer direction */
1253 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1254 cmd_pkt->control_flags =
1255 __constant_cpu_to_le16(CF_WRITE_DATA);
1256 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1257 cmd_pkt->control_flags =
1258 __constant_cpu_to_le16(CF_READ_DATA);
1259 }
1260
1261 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1262 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1263 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1264 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1265 bundling = 0;
1266
1267 /* Allocate CRC context from global pool */
1268 crc_ctx_pkt = sp->u.scmd.ctx =
1269 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1270
1271 if (!crc_ctx_pkt)
1272 goto crc_queuing_error;
1273
1274 /* Zero out CTX area. */
1275 clr_ptr = (uint8_t *)crc_ctx_pkt;
1276 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1277
1278 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1279
1280 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1281
1282 /* Set handle */
1283 crc_ctx_pkt->handle = cmd_pkt->handle;
1284
1285 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1286
1287 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1288 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1289
1290 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1291 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1292 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1293
1294 /* Determine SCSI command length -- align to 4 byte boundary */
1295 if (cmd->cmd_len > 16) {
1296 additional_fcpcdb_len = cmd->cmd_len - 16;
1297 if ((cmd->cmd_len % 4) != 0) {
1298 /* SCSI cmd > 16 bytes must be multiple of 4 */
1299 goto crc_queuing_error;
1300 }
1301 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1302 } else {
1303 additional_fcpcdb_len = 0;
1304 fcp_cmnd_len = 12 + 16 + 4;
1305 }
1306
1307 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1308
1309 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1310 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1311 fcp_cmnd->additional_cdb_len |= 1;
1312 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1313 fcp_cmnd->additional_cdb_len |= 2;
1314
1315 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1316 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1317 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1318 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1319 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1320 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1321 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1322 fcp_cmnd->task_management = 0;
1323
1324 /*
1325 * Update tagged queuing modifier if using command tag queuing
1326 */
1327 if (scsi_populate_tag_msg(cmd, tag)) {
1328 switch (tag[0]) {
1329 case HEAD_OF_QUEUE_TAG:
1330 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1331 break;
1332 case ORDERED_QUEUE_TAG:
1333 fcp_cmnd->task_attribute = TSK_ORDERED;
1334 break;
1335 default:
1336 fcp_cmnd->task_attribute = TSK_SIMPLE;
1337 break;
1338 }
1339 } else {
1340 fcp_cmnd->task_attribute = TSK_SIMPLE;
1341 }
1342
1343 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1344
1345 /* Compute dif len and adjust data len to incude protection */
1346 dif_bytes = 0;
1347 blk_size = cmd->device->sector_size;
1348 dif_bytes = (data_bytes / blk_size) * 8;
1349
1350 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1351 case SCSI_PROT_READ_INSERT:
1352 case SCSI_PROT_WRITE_STRIP:
1353 total_bytes = data_bytes;
1354 data_bytes += dif_bytes;
1355 break;
1356
1357 case SCSI_PROT_READ_STRIP:
1358 case SCSI_PROT_WRITE_INSERT:
1359 case SCSI_PROT_READ_PASS:
1360 case SCSI_PROT_WRITE_PASS:
1361 total_bytes = data_bytes + dif_bytes;
1362 break;
1363 default:
1364 BUG();
1365 }
1366
1367 if (!qla2x00_hba_err_chk_enabled(sp))
1368 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1369
1370 if (!bundling) {
1371 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1372 } else {
1373 /*
1374 * Configure Bundling if we need to fetch interlaving
1375 * protection PCI accesses
1376 */
1377 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1378 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1379 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1380 tot_prot_dsds);
1381 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1382 }
1383
1384 /* Finish the common fields of CRC pkt */
1385 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1386 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1387 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1388 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1389 /* Fibre channel byte count */
1390 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1391 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1392 additional_fcpcdb_len);
1393 *fcp_dl = htonl(total_bytes);
1394
1395 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1396 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1397 return QLA_SUCCESS;
1398 }
1399 /* Walks data segments */
1400
1401 cmd_pkt->control_flags |=
1402 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1403
1404 if (!bundling && tot_prot_dsds) {
1405 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1406 cur_dsd, tot_dsds))
1407 goto crc_queuing_error;
1408 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1409 (tot_dsds - tot_prot_dsds)))
1410 goto crc_queuing_error;
1411
1412 if (bundling && tot_prot_dsds) {
1413 /* Walks dif segments */
1414 cur_seg = scsi_prot_sglist(cmd);
1415 cmd_pkt->control_flags |=
1416 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1417 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1418 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1419 tot_prot_dsds))
1420 goto crc_queuing_error;
1421 }
1422 return QLA_SUCCESS;
1423
1424 crc_queuing_error:
1425 /* Cleanup will be performed by the caller */
1426
1427 return QLA_FUNCTION_FAILED;
1428 }
1429
1430 /**
1431 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1432 * @sp: command to send to the ISP
1433 *
1434 * Returns non-zero if a failure occurred, else zero.
1435 */
1436 int
qla24xx_start_scsi(srb_t * sp)1437 qla24xx_start_scsi(srb_t *sp)
1438 {
1439 int ret, nseg;
1440 unsigned long flags;
1441 uint32_t *clr_ptr;
1442 uint32_t index;
1443 uint32_t handle;
1444 struct cmd_type_7 *cmd_pkt;
1445 uint16_t cnt;
1446 uint16_t req_cnt;
1447 uint16_t tot_dsds;
1448 struct req_que *req = NULL;
1449 struct rsp_que *rsp = NULL;
1450 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1451 struct scsi_qla_host *vha = sp->fcport->vha;
1452 struct qla_hw_data *ha = vha->hw;
1453 char tag[2];
1454
1455 /* Setup device pointers. */
1456 ret = 0;
1457
1458 qla25xx_set_que(sp, &rsp);
1459 req = vha->req;
1460
1461 /* So we know we haven't pci_map'ed anything yet */
1462 tot_dsds = 0;
1463
1464 /* Send marker if required */
1465 if (vha->marker_needed != 0) {
1466 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1467 QLA_SUCCESS)
1468 return QLA_FUNCTION_FAILED;
1469 vha->marker_needed = 0;
1470 }
1471
1472 /* Acquire ring specific lock */
1473 spin_lock_irqsave(&ha->hardware_lock, flags);
1474
1475 /* Check for room in outstanding command list. */
1476 handle = req->current_outstanding_cmd;
1477 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1478 handle++;
1479 if (handle == MAX_OUTSTANDING_COMMANDS)
1480 handle = 1;
1481 if (!req->outstanding_cmds[handle])
1482 break;
1483 }
1484 if (index == MAX_OUTSTANDING_COMMANDS) {
1485 goto queuing_error;
1486 }
1487
1488 /* Map the sg table so we have an accurate count of sg entries needed */
1489 if (scsi_sg_count(cmd)) {
1490 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1491 scsi_sg_count(cmd), cmd->sc_data_direction);
1492 if (unlikely(!nseg))
1493 goto queuing_error;
1494 } else
1495 nseg = 0;
1496
1497 tot_dsds = nseg;
1498 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1499 if (req->cnt < (req_cnt + 2)) {
1500 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1501
1502 if (req->ring_index < cnt)
1503 req->cnt = cnt - req->ring_index;
1504 else
1505 req->cnt = req->length -
1506 (req->ring_index - cnt);
1507 }
1508 if (req->cnt < (req_cnt + 2))
1509 goto queuing_error;
1510
1511 /* Build command packet. */
1512 req->current_outstanding_cmd = handle;
1513 req->outstanding_cmds[handle] = sp;
1514 sp->handle = handle;
1515 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1516 req->cnt -= req_cnt;
1517
1518 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1519 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1520
1521 /* Zero out remaining portion of packet. */
1522 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1523 clr_ptr = (uint32_t *)cmd_pkt + 2;
1524 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1525 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1526
1527 /* Set NPORT-ID and LUN number*/
1528 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1529 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1530 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1531 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1532 cmd_pkt->vp_index = sp->fcport->vp_idx;
1533
1534 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1535 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1536
1537 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1538 if (scsi_populate_tag_msg(cmd, tag)) {
1539 switch (tag[0]) {
1540 case HEAD_OF_QUEUE_TAG:
1541 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1542 break;
1543 case ORDERED_QUEUE_TAG:
1544 cmd_pkt->task = TSK_ORDERED;
1545 break;
1546 default:
1547 cmd_pkt->task = TSK_SIMPLE;
1548 break;
1549 }
1550 } else {
1551 cmd_pkt->task = TSK_SIMPLE;
1552 }
1553
1554 /* Load SCSI command packet. */
1555 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1556 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1557
1558 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1559
1560 /* Build IOCB segments */
1561 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1562
1563 /* Set total data segment count. */
1564 cmd_pkt->entry_count = (uint8_t)req_cnt;
1565 /* Specify response queue number where completion should happen */
1566 cmd_pkt->entry_status = (uint8_t) rsp->id;
1567 wmb();
1568 /* Adjust ring index. */
1569 req->ring_index++;
1570 if (req->ring_index == req->length) {
1571 req->ring_index = 0;
1572 req->ring_ptr = req->ring;
1573 } else
1574 req->ring_ptr++;
1575
1576 sp->flags |= SRB_DMA_VALID;
1577
1578 /* Set chip new ring index. */
1579 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1580 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1581
1582 /* Manage unprocessed RIO/ZIO commands in response queue. */
1583 if (vha->flags.process_response_queue &&
1584 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1585 qla24xx_process_response_queue(vha, rsp);
1586
1587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1588 return QLA_SUCCESS;
1589
1590 queuing_error:
1591 if (tot_dsds)
1592 scsi_dma_unmap(cmd);
1593
1594 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1595
1596 return QLA_FUNCTION_FAILED;
1597 }
1598
1599
1600 /**
1601 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1602 * @sp: command to send to the ISP
1603 *
1604 * Returns non-zero if a failure occurred, else zero.
1605 */
1606 int
qla24xx_dif_start_scsi(srb_t * sp)1607 qla24xx_dif_start_scsi(srb_t *sp)
1608 {
1609 int nseg;
1610 unsigned long flags;
1611 uint32_t *clr_ptr;
1612 uint32_t index;
1613 uint32_t handle;
1614 uint16_t cnt;
1615 uint16_t req_cnt = 0;
1616 uint16_t tot_dsds;
1617 uint16_t tot_prot_dsds;
1618 uint16_t fw_prot_opts = 0;
1619 struct req_que *req = NULL;
1620 struct rsp_que *rsp = NULL;
1621 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1622 struct scsi_qla_host *vha = sp->fcport->vha;
1623 struct qla_hw_data *ha = vha->hw;
1624 struct cmd_type_crc_2 *cmd_pkt;
1625 uint32_t status = 0;
1626
1627 #define QDSS_GOT_Q_SPACE BIT_0
1628
1629 /* Only process protection or >16 cdb in this routine */
1630 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1631 if (cmd->cmd_len <= 16)
1632 return qla24xx_start_scsi(sp);
1633 }
1634
1635 /* Setup device pointers. */
1636
1637 qla25xx_set_que(sp, &rsp);
1638 req = vha->req;
1639
1640 /* So we know we haven't pci_map'ed anything yet */
1641 tot_dsds = 0;
1642
1643 /* Send marker if required */
1644 if (vha->marker_needed != 0) {
1645 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1646 QLA_SUCCESS)
1647 return QLA_FUNCTION_FAILED;
1648 vha->marker_needed = 0;
1649 }
1650
1651 /* Acquire ring specific lock */
1652 spin_lock_irqsave(&ha->hardware_lock, flags);
1653
1654 /* Check for room in outstanding command list. */
1655 handle = req->current_outstanding_cmd;
1656 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1657 handle++;
1658 if (handle == MAX_OUTSTANDING_COMMANDS)
1659 handle = 1;
1660 if (!req->outstanding_cmds[handle])
1661 break;
1662 }
1663
1664 if (index == MAX_OUTSTANDING_COMMANDS)
1665 goto queuing_error;
1666
1667 /* Compute number of required data segments */
1668 /* Map the sg table so we have an accurate count of sg entries needed */
1669 if (scsi_sg_count(cmd)) {
1670 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1671 scsi_sg_count(cmd), cmd->sc_data_direction);
1672 if (unlikely(!nseg))
1673 goto queuing_error;
1674 else
1675 sp->flags |= SRB_DMA_VALID;
1676
1677 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1678 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1679 struct qla2_sgx sgx;
1680 uint32_t partial;
1681
1682 memset(&sgx, 0, sizeof(struct qla2_sgx));
1683 sgx.tot_bytes = scsi_bufflen(cmd);
1684 sgx.cur_sg = scsi_sglist(cmd);
1685 sgx.sp = sp;
1686
1687 nseg = 0;
1688 while (qla24xx_get_one_block_sg(
1689 cmd->device->sector_size, &sgx, &partial))
1690 nseg++;
1691 }
1692 } else
1693 nseg = 0;
1694
1695 /* number of required data segments */
1696 tot_dsds = nseg;
1697
1698 /* Compute number of required protection segments */
1699 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1700 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1701 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1702 if (unlikely(!nseg))
1703 goto queuing_error;
1704 else
1705 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1706
1707 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1708 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1709 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1710 }
1711 } else {
1712 nseg = 0;
1713 }
1714
1715 req_cnt = 1;
1716 /* Total Data and protection sg segment(s) */
1717 tot_prot_dsds = nseg;
1718 tot_dsds += nseg;
1719 if (req->cnt < (req_cnt + 2)) {
1720 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1721
1722 if (req->ring_index < cnt)
1723 req->cnt = cnt - req->ring_index;
1724 else
1725 req->cnt = req->length -
1726 (req->ring_index - cnt);
1727 }
1728
1729 if (req->cnt < (req_cnt + 2))
1730 goto queuing_error;
1731
1732 status |= QDSS_GOT_Q_SPACE;
1733
1734 /* Build header part of command packet (excluding the OPCODE). */
1735 req->current_outstanding_cmd = handle;
1736 req->outstanding_cmds[handle] = sp;
1737 sp->handle = handle;
1738 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1739 req->cnt -= req_cnt;
1740
1741 /* Fill-in common area */
1742 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1743 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1744
1745 clr_ptr = (uint32_t *)cmd_pkt + 2;
1746 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1747
1748 /* Set NPORT-ID and LUN number*/
1749 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1750 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1751 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1752 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1753
1754 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1755 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1756
1757 /* Total Data and protection segment(s) */
1758 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1759
1760 /* Build IOCB segments and adjust for data protection segments */
1761 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1762 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1763 QLA_SUCCESS)
1764 goto queuing_error;
1765
1766 cmd_pkt->entry_count = (uint8_t)req_cnt;
1767 /* Specify response queue number where completion should happen */
1768 cmd_pkt->entry_status = (uint8_t) rsp->id;
1769 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1770 wmb();
1771
1772 /* Adjust ring index. */
1773 req->ring_index++;
1774 if (req->ring_index == req->length) {
1775 req->ring_index = 0;
1776 req->ring_ptr = req->ring;
1777 } else
1778 req->ring_ptr++;
1779
1780 /* Set chip new ring index. */
1781 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1782 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1783
1784 /* Manage unprocessed RIO/ZIO commands in response queue. */
1785 if (vha->flags.process_response_queue &&
1786 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1787 qla24xx_process_response_queue(vha, rsp);
1788
1789 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1790
1791 return QLA_SUCCESS;
1792
1793 queuing_error:
1794 if (status & QDSS_GOT_Q_SPACE) {
1795 req->outstanding_cmds[handle] = NULL;
1796 req->cnt += req_cnt;
1797 }
1798 /* Cleanup will be performed by the caller (queuecommand) */
1799
1800 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1801 return QLA_FUNCTION_FAILED;
1802 }
1803
1804
qla25xx_set_que(srb_t * sp,struct rsp_que ** rsp)1805 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1806 {
1807 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1808 struct qla_hw_data *ha = sp->fcport->vha->hw;
1809 int affinity = cmd->request->cpu;
1810
1811 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1812 affinity < ha->max_rsp_queues - 1)
1813 *rsp = ha->rsp_q_map[affinity + 1];
1814 else
1815 *rsp = ha->rsp_q_map[0];
1816 }
1817
1818 /* Generic Control-SRB manipulation functions. */
1819 void *
qla2x00_alloc_iocbs(scsi_qla_host_t * vha,srb_t * sp)1820 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1821 {
1822 struct qla_hw_data *ha = vha->hw;
1823 struct req_que *req = ha->req_q_map[0];
1824 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1825 uint32_t index, handle;
1826 request_t *pkt;
1827 uint16_t cnt, req_cnt;
1828
1829 pkt = NULL;
1830 req_cnt = 1;
1831 handle = 0;
1832
1833 if (!sp)
1834 goto skip_cmd_array;
1835
1836 /* Check for room in outstanding command list. */
1837 handle = req->current_outstanding_cmd;
1838 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1839 handle++;
1840 if (handle == MAX_OUTSTANDING_COMMANDS)
1841 handle = 1;
1842 if (!req->outstanding_cmds[handle])
1843 break;
1844 }
1845 if (index == MAX_OUTSTANDING_COMMANDS) {
1846 ql_log(ql_log_warn, vha, 0x700b,
1847 "No room on oustanding cmd array.\n");
1848 goto queuing_error;
1849 }
1850
1851 /* Prep command array. */
1852 req->current_outstanding_cmd = handle;
1853 req->outstanding_cmds[handle] = sp;
1854 sp->handle = handle;
1855
1856 /* Adjust entry-counts as needed. */
1857 if (sp->type != SRB_SCSI_CMD)
1858 req_cnt = sp->iocbs;
1859
1860 skip_cmd_array:
1861 /* Check for room on request queue. */
1862 if (req->cnt < req_cnt) {
1863 if (ha->mqenable || IS_QLA83XX(ha))
1864 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1865 else if (IS_QLA82XX(ha))
1866 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1867 else if (IS_FWI2_CAPABLE(ha))
1868 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1869 else
1870 cnt = qla2x00_debounce_register(
1871 ISP_REQ_Q_OUT(ha, ®->isp));
1872
1873 if (req->ring_index < cnt)
1874 req->cnt = cnt - req->ring_index;
1875 else
1876 req->cnt = req->length -
1877 (req->ring_index - cnt);
1878 }
1879 if (req->cnt < req_cnt)
1880 goto queuing_error;
1881
1882 /* Prep packet */
1883 req->cnt -= req_cnt;
1884 pkt = req->ring_ptr;
1885 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1886 pkt->entry_count = req_cnt;
1887 pkt->handle = handle;
1888
1889 queuing_error:
1890 return pkt;
1891 }
1892
1893 static void
qla24xx_login_iocb(srb_t * sp,struct logio_entry_24xx * logio)1894 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1895 {
1896 struct srb_iocb *lio = &sp->u.iocb_cmd;
1897
1898 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1899 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1900 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1901 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1902 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1903 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1904 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1905 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1906 logio->port_id[1] = sp->fcport->d_id.b.area;
1907 logio->port_id[2] = sp->fcport->d_id.b.domain;
1908 logio->vp_index = sp->fcport->vp_idx;
1909 }
1910
1911 static void
qla2x00_login_iocb(srb_t * sp,struct mbx_entry * mbx)1912 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1913 {
1914 struct qla_hw_data *ha = sp->fcport->vha->hw;
1915 struct srb_iocb *lio = &sp->u.iocb_cmd;
1916 uint16_t opts;
1917
1918 mbx->entry_type = MBX_IOCB_TYPE;
1919 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1920 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1921 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1922 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1923 if (HAS_EXTENDED_IDS(ha)) {
1924 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1925 mbx->mb10 = cpu_to_le16(opts);
1926 } else {
1927 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1928 }
1929 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1930 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1931 sp->fcport->d_id.b.al_pa);
1932 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1933 }
1934
1935 static void
qla24xx_logout_iocb(srb_t * sp,struct logio_entry_24xx * logio)1936 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1937 {
1938 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1939 logio->control_flags =
1940 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1941 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1942 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1943 logio->port_id[1] = sp->fcport->d_id.b.area;
1944 logio->port_id[2] = sp->fcport->d_id.b.domain;
1945 logio->vp_index = sp->fcport->vp_idx;
1946 }
1947
1948 static void
qla2x00_logout_iocb(srb_t * sp,struct mbx_entry * mbx)1949 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1950 {
1951 struct qla_hw_data *ha = sp->fcport->vha->hw;
1952
1953 mbx->entry_type = MBX_IOCB_TYPE;
1954 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1955 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1956 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1957 cpu_to_le16(sp->fcport->loop_id):
1958 cpu_to_le16(sp->fcport->loop_id << 8);
1959 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1960 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1961 sp->fcport->d_id.b.al_pa);
1962 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1963 /* Implicit: mbx->mbx10 = 0. */
1964 }
1965
1966 static void
qla24xx_adisc_iocb(srb_t * sp,struct logio_entry_24xx * logio)1967 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1968 {
1969 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1970 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1971 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1972 logio->vp_index = sp->fcport->vp_idx;
1973 }
1974
1975 static void
qla2x00_adisc_iocb(srb_t * sp,struct mbx_entry * mbx)1976 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1977 {
1978 struct qla_hw_data *ha = sp->fcport->vha->hw;
1979
1980 mbx->entry_type = MBX_IOCB_TYPE;
1981 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1982 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1983 if (HAS_EXTENDED_IDS(ha)) {
1984 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1985 mbx->mb10 = cpu_to_le16(BIT_0);
1986 } else {
1987 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1988 }
1989 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1990 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1991 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1992 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1993 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1994 }
1995
1996 static void
qla24xx_tm_iocb(srb_t * sp,struct tsk_mgmt_entry * tsk)1997 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1998 {
1999 uint32_t flags;
2000 unsigned int lun;
2001 struct fc_port *fcport = sp->fcport;
2002 scsi_qla_host_t *vha = fcport->vha;
2003 struct qla_hw_data *ha = vha->hw;
2004 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2005 struct req_que *req = vha->req;
2006
2007 flags = iocb->u.tmf.flags;
2008 lun = iocb->u.tmf.lun;
2009
2010 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2011 tsk->entry_count = 1;
2012 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2013 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2014 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2015 tsk->control_flags = cpu_to_le32(flags);
2016 tsk->port_id[0] = fcport->d_id.b.al_pa;
2017 tsk->port_id[1] = fcport->d_id.b.area;
2018 tsk->port_id[2] = fcport->d_id.b.domain;
2019 tsk->vp_index = fcport->vp_idx;
2020
2021 if (flags == TCF_LUN_RESET) {
2022 int_to_scsilun(lun, &tsk->lun);
2023 host_to_fcp_swap((uint8_t *)&tsk->lun,
2024 sizeof(tsk->lun));
2025 }
2026 }
2027
2028 static void
qla24xx_els_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)2029 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2030 {
2031 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2032
2033 els_iocb->entry_type = ELS_IOCB_TYPE;
2034 els_iocb->entry_count = 1;
2035 els_iocb->sys_define = 0;
2036 els_iocb->entry_status = 0;
2037 els_iocb->handle = sp->handle;
2038 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2039 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2040 els_iocb->vp_index = sp->fcport->vp_idx;
2041 els_iocb->sof_type = EST_SOFI3;
2042 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2043
2044 els_iocb->opcode =
2045 sp->type == SRB_ELS_CMD_RPT ?
2046 bsg_job->request->rqst_data.r_els.els_code :
2047 bsg_job->request->rqst_data.h_els.command_code;
2048 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2049 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2050 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2051 els_iocb->control_flags = 0;
2052 els_iocb->rx_byte_count =
2053 cpu_to_le32(bsg_job->reply_payload.payload_len);
2054 els_iocb->tx_byte_count =
2055 cpu_to_le32(bsg_job->request_payload.payload_len);
2056
2057 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2058 (bsg_job->request_payload.sg_list)));
2059 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2060 (bsg_job->request_payload.sg_list)));
2061 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2062 (bsg_job->request_payload.sg_list));
2063
2064 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2065 (bsg_job->reply_payload.sg_list)));
2066 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2067 (bsg_job->reply_payload.sg_list)));
2068 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2069 (bsg_job->reply_payload.sg_list));
2070 }
2071
2072 static void
qla2x00_ct_iocb(srb_t * sp,ms_iocb_entry_t * ct_iocb)2073 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2074 {
2075 uint16_t avail_dsds;
2076 uint32_t *cur_dsd;
2077 struct scatterlist *sg;
2078 int index;
2079 uint16_t tot_dsds;
2080 scsi_qla_host_t *vha = sp->fcport->vha;
2081 struct qla_hw_data *ha = vha->hw;
2082 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2083 int loop_iterartion = 0;
2084 int cont_iocb_prsnt = 0;
2085 int entry_count = 1;
2086
2087 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2088 ct_iocb->entry_type = CT_IOCB_TYPE;
2089 ct_iocb->entry_status = 0;
2090 ct_iocb->handle1 = sp->handle;
2091 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2092 ct_iocb->status = __constant_cpu_to_le16(0);
2093 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2094 ct_iocb->timeout = 0;
2095 ct_iocb->cmd_dsd_count =
2096 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2097 ct_iocb->total_dsd_count =
2098 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2099 ct_iocb->req_bytecount =
2100 cpu_to_le32(bsg_job->request_payload.payload_len);
2101 ct_iocb->rsp_bytecount =
2102 cpu_to_le32(bsg_job->reply_payload.payload_len);
2103
2104 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2105 (bsg_job->request_payload.sg_list)));
2106 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2107 (bsg_job->request_payload.sg_list)));
2108 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2109
2110 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2111 (bsg_job->reply_payload.sg_list)));
2112 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2113 (bsg_job->reply_payload.sg_list)));
2114 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2115
2116 avail_dsds = 1;
2117 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2118 index = 0;
2119 tot_dsds = bsg_job->reply_payload.sg_cnt;
2120
2121 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2122 dma_addr_t sle_dma;
2123 cont_a64_entry_t *cont_pkt;
2124
2125 /* Allocate additional continuation packets? */
2126 if (avail_dsds == 0) {
2127 /*
2128 * Five DSDs are available in the Cont.
2129 * Type 1 IOCB.
2130 */
2131 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2132 vha->hw->req_q_map[0]);
2133 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2134 avail_dsds = 5;
2135 cont_iocb_prsnt = 1;
2136 entry_count++;
2137 }
2138
2139 sle_dma = sg_dma_address(sg);
2140 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2141 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2142 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2143 loop_iterartion++;
2144 avail_dsds--;
2145 }
2146 ct_iocb->entry_count = entry_count;
2147 }
2148
2149 static void
qla24xx_ct_iocb(srb_t * sp,struct ct_entry_24xx * ct_iocb)2150 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2151 {
2152 uint16_t avail_dsds;
2153 uint32_t *cur_dsd;
2154 struct scatterlist *sg;
2155 int index;
2156 uint16_t tot_dsds;
2157 scsi_qla_host_t *vha = sp->fcport->vha;
2158 struct qla_hw_data *ha = vha->hw;
2159 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2160 int loop_iterartion = 0;
2161 int cont_iocb_prsnt = 0;
2162 int entry_count = 1;
2163
2164 ct_iocb->entry_type = CT_IOCB_TYPE;
2165 ct_iocb->entry_status = 0;
2166 ct_iocb->sys_define = 0;
2167 ct_iocb->handle = sp->handle;
2168
2169 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2170 ct_iocb->vp_index = sp->fcport->vp_idx;
2171 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2172
2173 ct_iocb->cmd_dsd_count =
2174 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2175 ct_iocb->timeout = 0;
2176 ct_iocb->rsp_dsd_count =
2177 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2178 ct_iocb->rsp_byte_count =
2179 cpu_to_le32(bsg_job->reply_payload.payload_len);
2180 ct_iocb->cmd_byte_count =
2181 cpu_to_le32(bsg_job->request_payload.payload_len);
2182 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2183 (bsg_job->request_payload.sg_list)));
2184 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2185 (bsg_job->request_payload.sg_list)));
2186 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2187 (bsg_job->request_payload.sg_list));
2188
2189 avail_dsds = 1;
2190 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2191 index = 0;
2192 tot_dsds = bsg_job->reply_payload.sg_cnt;
2193
2194 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2195 dma_addr_t sle_dma;
2196 cont_a64_entry_t *cont_pkt;
2197
2198 /* Allocate additional continuation packets? */
2199 if (avail_dsds == 0) {
2200 /*
2201 * Five DSDs are available in the Cont.
2202 * Type 1 IOCB.
2203 */
2204 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2205 ha->req_q_map[0]);
2206 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2207 avail_dsds = 5;
2208 cont_iocb_prsnt = 1;
2209 entry_count++;
2210 }
2211
2212 sle_dma = sg_dma_address(sg);
2213 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2214 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2215 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2216 loop_iterartion++;
2217 avail_dsds--;
2218 }
2219 ct_iocb->entry_count = entry_count;
2220 }
2221
2222 /*
2223 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2224 * @sp: command to send to the ISP
2225 *
2226 * Returns non-zero if a failure occurred, else zero.
2227 */
2228 int
qla82xx_start_scsi(srb_t * sp)2229 qla82xx_start_scsi(srb_t *sp)
2230 {
2231 int ret, nseg;
2232 unsigned long flags;
2233 struct scsi_cmnd *cmd;
2234 uint32_t *clr_ptr;
2235 uint32_t index;
2236 uint32_t handle;
2237 uint16_t cnt;
2238 uint16_t req_cnt;
2239 uint16_t tot_dsds;
2240 struct device_reg_82xx __iomem *reg;
2241 uint32_t dbval;
2242 uint32_t *fcp_dl;
2243 uint8_t additional_cdb_len;
2244 struct ct6_dsd *ctx;
2245 struct scsi_qla_host *vha = sp->fcport->vha;
2246 struct qla_hw_data *ha = vha->hw;
2247 struct req_que *req = NULL;
2248 struct rsp_que *rsp = NULL;
2249 char tag[2];
2250
2251 /* Setup device pointers. */
2252 ret = 0;
2253 reg = &ha->iobase->isp82;
2254 cmd = GET_CMD_SP(sp);
2255 req = vha->req;
2256 rsp = ha->rsp_q_map[0];
2257
2258 /* So we know we haven't pci_map'ed anything yet */
2259 tot_dsds = 0;
2260
2261 dbval = 0x04 | (ha->portnum << 5);
2262
2263 /* Send marker if required */
2264 if (vha->marker_needed != 0) {
2265 if (qla2x00_marker(vha, req,
2266 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2267 ql_log(ql_log_warn, vha, 0x300c,
2268 "qla2x00_marker failed for cmd=%p.\n", cmd);
2269 return QLA_FUNCTION_FAILED;
2270 }
2271 vha->marker_needed = 0;
2272 }
2273
2274 /* Acquire ring specific lock */
2275 spin_lock_irqsave(&ha->hardware_lock, flags);
2276
2277 /* Check for room in outstanding command list. */
2278 handle = req->current_outstanding_cmd;
2279 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2280 handle++;
2281 if (handle == MAX_OUTSTANDING_COMMANDS)
2282 handle = 1;
2283 if (!req->outstanding_cmds[handle])
2284 break;
2285 }
2286 if (index == MAX_OUTSTANDING_COMMANDS)
2287 goto queuing_error;
2288
2289 /* Map the sg table so we have an accurate count of sg entries needed */
2290 if (scsi_sg_count(cmd)) {
2291 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2292 scsi_sg_count(cmd), cmd->sc_data_direction);
2293 if (unlikely(!nseg))
2294 goto queuing_error;
2295 } else
2296 nseg = 0;
2297
2298 tot_dsds = nseg;
2299
2300 if (tot_dsds > ql2xshiftctondsd) {
2301 struct cmd_type_6 *cmd_pkt;
2302 uint16_t more_dsd_lists = 0;
2303 struct dsd_dma *dsd_ptr;
2304 uint16_t i;
2305
2306 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2307 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2308 ql_dbg(ql_dbg_io, vha, 0x300d,
2309 "Num of DSD list %d is than %d for cmd=%p.\n",
2310 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2311 cmd);
2312 goto queuing_error;
2313 }
2314
2315 if (more_dsd_lists <= ha->gbl_dsd_avail)
2316 goto sufficient_dsds;
2317 else
2318 more_dsd_lists -= ha->gbl_dsd_avail;
2319
2320 for (i = 0; i < more_dsd_lists; i++) {
2321 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2322 if (!dsd_ptr) {
2323 ql_log(ql_log_fatal, vha, 0x300e,
2324 "Failed to allocate memory for dsd_dma "
2325 "for cmd=%p.\n", cmd);
2326 goto queuing_error;
2327 }
2328
2329 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2330 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2331 if (!dsd_ptr->dsd_addr) {
2332 kfree(dsd_ptr);
2333 ql_log(ql_log_fatal, vha, 0x300f,
2334 "Failed to allocate memory for dsd_addr "
2335 "for cmd=%p.\n", cmd);
2336 goto queuing_error;
2337 }
2338 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2339 ha->gbl_dsd_avail++;
2340 }
2341
2342 sufficient_dsds:
2343 req_cnt = 1;
2344
2345 if (req->cnt < (req_cnt + 2)) {
2346 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2347 ®->req_q_out[0]);
2348 if (req->ring_index < cnt)
2349 req->cnt = cnt - req->ring_index;
2350 else
2351 req->cnt = req->length -
2352 (req->ring_index - cnt);
2353 }
2354
2355 if (req->cnt < (req_cnt + 2))
2356 goto queuing_error;
2357
2358 ctx = sp->u.scmd.ctx =
2359 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2360 if (!ctx) {
2361 ql_log(ql_log_fatal, vha, 0x3010,
2362 "Failed to allocate ctx for cmd=%p.\n", cmd);
2363 goto queuing_error;
2364 }
2365
2366 memset(ctx, 0, sizeof(struct ct6_dsd));
2367 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2368 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2369 if (!ctx->fcp_cmnd) {
2370 ql_log(ql_log_fatal, vha, 0x3011,
2371 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2372 goto queuing_error_fcp_cmnd;
2373 }
2374
2375 /* Initialize the DSD list and dma handle */
2376 INIT_LIST_HEAD(&ctx->dsd_list);
2377 ctx->dsd_use_cnt = 0;
2378
2379 if (cmd->cmd_len > 16) {
2380 additional_cdb_len = cmd->cmd_len - 16;
2381 if ((cmd->cmd_len % 4) != 0) {
2382 /* SCSI command bigger than 16 bytes must be
2383 * multiple of 4
2384 */
2385 ql_log(ql_log_warn, vha, 0x3012,
2386 "scsi cmd len %d not multiple of 4 "
2387 "for cmd=%p.\n", cmd->cmd_len, cmd);
2388 goto queuing_error_fcp_cmnd;
2389 }
2390 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2391 } else {
2392 additional_cdb_len = 0;
2393 ctx->fcp_cmnd_len = 12 + 16 + 4;
2394 }
2395
2396 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2397 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2398
2399 /* Zero out remaining portion of packet. */
2400 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2401 clr_ptr = (uint32_t *)cmd_pkt + 2;
2402 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2403 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2404
2405 /* Set NPORT-ID and LUN number*/
2406 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2407 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2408 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2409 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2410 cmd_pkt->vp_index = sp->fcport->vp_idx;
2411
2412 /* Build IOCB segments */
2413 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2414 goto queuing_error_fcp_cmnd;
2415
2416 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2417 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2418
2419 /* build FCP_CMND IU */
2420 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2421 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2422 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2423
2424 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2425 ctx->fcp_cmnd->additional_cdb_len |= 1;
2426 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2427 ctx->fcp_cmnd->additional_cdb_len |= 2;
2428
2429 /*
2430 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2431 */
2432 if (scsi_populate_tag_msg(cmd, tag)) {
2433 switch (tag[0]) {
2434 case HEAD_OF_QUEUE_TAG:
2435 ctx->fcp_cmnd->task_attribute =
2436 TSK_HEAD_OF_QUEUE;
2437 break;
2438 case ORDERED_QUEUE_TAG:
2439 ctx->fcp_cmnd->task_attribute =
2440 TSK_ORDERED;
2441 break;
2442 }
2443 }
2444
2445 /* Populate the FCP_PRIO. */
2446 if (ha->flags.fcp_prio_enabled)
2447 ctx->fcp_cmnd->task_attribute |=
2448 sp->fcport->fcp_prio << 3;
2449
2450 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2451
2452 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2453 additional_cdb_len);
2454 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2455
2456 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2457 cmd_pkt->fcp_cmnd_dseg_address[0] =
2458 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2459 cmd_pkt->fcp_cmnd_dseg_address[1] =
2460 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2461
2462 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2463 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2464 /* Set total data segment count. */
2465 cmd_pkt->entry_count = (uint8_t)req_cnt;
2466 /* Specify response queue number where
2467 * completion should happen
2468 */
2469 cmd_pkt->entry_status = (uint8_t) rsp->id;
2470 } else {
2471 struct cmd_type_7 *cmd_pkt;
2472 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2473 if (req->cnt < (req_cnt + 2)) {
2474 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2475 ®->req_q_out[0]);
2476 if (req->ring_index < cnt)
2477 req->cnt = cnt - req->ring_index;
2478 else
2479 req->cnt = req->length -
2480 (req->ring_index - cnt);
2481 }
2482 if (req->cnt < (req_cnt + 2))
2483 goto queuing_error;
2484
2485 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2486 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2487
2488 /* Zero out remaining portion of packet. */
2489 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2490 clr_ptr = (uint32_t *)cmd_pkt + 2;
2491 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2492 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2493
2494 /* Set NPORT-ID and LUN number*/
2495 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2496 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2497 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2498 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2499 cmd_pkt->vp_index = sp->fcport->vp_idx;
2500
2501 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2502 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2503 sizeof(cmd_pkt->lun));
2504
2505 /*
2506 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2507 */
2508 if (scsi_populate_tag_msg(cmd, tag)) {
2509 switch (tag[0]) {
2510 case HEAD_OF_QUEUE_TAG:
2511 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2512 break;
2513 case ORDERED_QUEUE_TAG:
2514 cmd_pkt->task = TSK_ORDERED;
2515 break;
2516 }
2517 }
2518
2519 /* Populate the FCP_PRIO. */
2520 if (ha->flags.fcp_prio_enabled)
2521 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2522
2523 /* Load SCSI command packet. */
2524 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2525 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2526
2527 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2528
2529 /* Build IOCB segments */
2530 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2531
2532 /* Set total data segment count. */
2533 cmd_pkt->entry_count = (uint8_t)req_cnt;
2534 /* Specify response queue number where
2535 * completion should happen.
2536 */
2537 cmd_pkt->entry_status = (uint8_t) rsp->id;
2538
2539 }
2540 /* Build command packet. */
2541 req->current_outstanding_cmd = handle;
2542 req->outstanding_cmds[handle] = sp;
2543 sp->handle = handle;
2544 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2545 req->cnt -= req_cnt;
2546 wmb();
2547
2548 /* Adjust ring index. */
2549 req->ring_index++;
2550 if (req->ring_index == req->length) {
2551 req->ring_index = 0;
2552 req->ring_ptr = req->ring;
2553 } else
2554 req->ring_ptr++;
2555
2556 sp->flags |= SRB_DMA_VALID;
2557
2558 /* Set chip new ring index. */
2559 /* write, read and verify logic */
2560 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2561 if (ql2xdbwr)
2562 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2563 else {
2564 WRT_REG_DWORD(
2565 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2566 dbval);
2567 wmb();
2568 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2569 WRT_REG_DWORD(
2570 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2571 dbval);
2572 wmb();
2573 }
2574 }
2575
2576 /* Manage unprocessed RIO/ZIO commands in response queue. */
2577 if (vha->flags.process_response_queue &&
2578 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2579 qla24xx_process_response_queue(vha, rsp);
2580
2581 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2582 return QLA_SUCCESS;
2583
2584 queuing_error_fcp_cmnd:
2585 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2586 queuing_error:
2587 if (tot_dsds)
2588 scsi_dma_unmap(cmd);
2589
2590 if (sp->u.scmd.ctx) {
2591 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2592 sp->u.scmd.ctx = NULL;
2593 }
2594 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2595
2596 return QLA_FUNCTION_FAILED;
2597 }
2598
2599 int
qla2x00_start_sp(srb_t * sp)2600 qla2x00_start_sp(srb_t *sp)
2601 {
2602 int rval;
2603 struct qla_hw_data *ha = sp->fcport->vha->hw;
2604 void *pkt;
2605 unsigned long flags;
2606
2607 rval = QLA_FUNCTION_FAILED;
2608 spin_lock_irqsave(&ha->hardware_lock, flags);
2609 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2610 if (!pkt) {
2611 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2612 "qla2x00_alloc_iocbs failed.\n");
2613 goto done;
2614 }
2615
2616 rval = QLA_SUCCESS;
2617 switch (sp->type) {
2618 case SRB_LOGIN_CMD:
2619 IS_FWI2_CAPABLE(ha) ?
2620 qla24xx_login_iocb(sp, pkt) :
2621 qla2x00_login_iocb(sp, pkt);
2622 break;
2623 case SRB_LOGOUT_CMD:
2624 IS_FWI2_CAPABLE(ha) ?
2625 qla24xx_logout_iocb(sp, pkt) :
2626 qla2x00_logout_iocb(sp, pkt);
2627 break;
2628 case SRB_ELS_CMD_RPT:
2629 case SRB_ELS_CMD_HST:
2630 qla24xx_els_iocb(sp, pkt);
2631 break;
2632 case SRB_CT_CMD:
2633 IS_FWI2_CAPABLE(ha) ?
2634 qla24xx_ct_iocb(sp, pkt) :
2635 qla2x00_ct_iocb(sp, pkt);
2636 break;
2637 case SRB_ADISC_CMD:
2638 IS_FWI2_CAPABLE(ha) ?
2639 qla24xx_adisc_iocb(sp, pkt) :
2640 qla2x00_adisc_iocb(sp, pkt);
2641 break;
2642 case SRB_TM_CMD:
2643 qla24xx_tm_iocb(sp, pkt);
2644 break;
2645 default:
2646 break;
2647 }
2648
2649 wmb();
2650 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2651 done:
2652 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2653 return rval;
2654 }
2655