1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
qla2xxx_free_fcport_work(struct work_struct * work)14 static void qla2xxx_free_fcport_work(struct work_struct *work)
15 {
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
17 free_work);
18
19 qla2x00_free_fcport(fcport);
20 }
21
22 /* BSG support for ELS/CT pass through */
qla2x00_bsg_job_done(srb_t * sp,int res)23 void qla2x00_bsg_job_done(srb_t *sp, int res)
24 {
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27
28 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
29 "%s: sp hdl %x, result=%x bsg ptr %p\n",
30 __func__, sp->handle, res, bsg_job);
31
32 /* ref: INIT */
33 kref_put(&sp->cmd_kref, qla2x00_sp_release);
34
35 bsg_reply->result = res;
36 bsg_job_done(bsg_job, bsg_reply->result,
37 bsg_reply->reply_payload_rcv_len);
38 }
39
qla2x00_bsg_sp_free(srb_t * sp)40 void qla2x00_bsg_sp_free(srb_t *sp)
41 {
42 struct qla_hw_data *ha = sp->vha->hw;
43 struct bsg_job *bsg_job = sp->u.bsg_job;
44 struct fc_bsg_request *bsg_request = bsg_job->request;
45 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
46
47 if (sp->type == SRB_FXIOCB_BCMD) {
48 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
49 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
50
51 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
52 dma_unmap_sg(&ha->pdev->dev,
53 bsg_job->request_payload.sg_list,
54 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
55
56 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
57 dma_unmap_sg(&ha->pdev->dev,
58 bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
60 } else {
61
62 if (sp->remap.remapped) {
63 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
64 sp->remap.rsp.dma);
65 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
66 sp->remap.req.dma);
67 } else {
68 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
69 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
70
71 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
72 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
73 }
74 }
75
76 if (sp->type == SRB_CT_CMD ||
77 sp->type == SRB_FXIOCB_BCMD ||
78 sp->type == SRB_ELS_CMD_HST) {
79 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
80 queue_work(ha->wq, &sp->fcport->free_work);
81 }
82
83 qla2x00_rel_sp(sp);
84 }
85
86 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t * vha,struct qla_fcp_prio_cfg * pri_cfg,uint8_t flag)87 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
88 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
89 {
90 int i, ret, num_valid;
91 uint8_t *bcode;
92 struct qla_fcp_prio_entry *pri_entry;
93 uint32_t *bcode_val_ptr, bcode_val;
94
95 ret = 1;
96 num_valid = 0;
97 bcode = (uint8_t *)pri_cfg;
98 bcode_val_ptr = (uint32_t *)pri_cfg;
99 bcode_val = (uint32_t)(*bcode_val_ptr);
100
101 if (bcode_val == 0xFFFFFFFF) {
102 /* No FCP Priority config data in flash */
103 ql_dbg(ql_dbg_user, vha, 0x7051,
104 "No FCP Priority config data.\n");
105 return 0;
106 }
107
108 if (memcmp(bcode, "HQOS", 4)) {
109 /* Invalid FCP priority data header*/
110 ql_dbg(ql_dbg_user, vha, 0x7052,
111 "Invalid FCP Priority data header. bcode=0x%x.\n",
112 bcode_val);
113 return 0;
114 }
115 if (flag != 1)
116 return ret;
117
118 pri_entry = &pri_cfg->entry[0];
119 for (i = 0; i < pri_cfg->num_entries; i++) {
120 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
121 num_valid++;
122 pri_entry++;
123 }
124
125 if (num_valid == 0) {
126 /* No valid FCP priority data entries */
127 ql_dbg(ql_dbg_user, vha, 0x7053,
128 "No valid FCP Priority data entries.\n");
129 ret = 0;
130 } else {
131 /* FCP priority data is valid */
132 ql_dbg(ql_dbg_user, vha, 0x7054,
133 "Valid FCP priority data. num entries = %d.\n",
134 num_valid);
135 }
136
137 return ret;
138 }
139
140 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job * bsg_job)141 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
142 {
143 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
144 struct fc_bsg_request *bsg_request = bsg_job->request;
145 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
146 scsi_qla_host_t *vha = shost_priv(host);
147 struct qla_hw_data *ha = vha->hw;
148 int ret = 0;
149 uint32_t len;
150 uint32_t oper;
151
152 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
153 ret = -EINVAL;
154 goto exit_fcp_prio_cfg;
155 }
156
157 /* Get the sub command */
158 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
159
160 /* Only set config is allowed if config memory is not allocated */
161 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
162 ret = -EINVAL;
163 goto exit_fcp_prio_cfg;
164 }
165 switch (oper) {
166 case QLFC_FCP_PRIO_DISABLE:
167 if (ha->flags.fcp_prio_enabled) {
168 ha->flags.fcp_prio_enabled = 0;
169 ha->fcp_prio_cfg->attributes &=
170 ~FCP_PRIO_ATTR_ENABLE;
171 qla24xx_update_all_fcp_prio(vha);
172 bsg_reply->result = DID_OK;
173 } else {
174 ret = -EINVAL;
175 bsg_reply->result = (DID_ERROR << 16);
176 goto exit_fcp_prio_cfg;
177 }
178 break;
179
180 case QLFC_FCP_PRIO_ENABLE:
181 if (!ha->flags.fcp_prio_enabled) {
182 if (ha->fcp_prio_cfg) {
183 ha->flags.fcp_prio_enabled = 1;
184 ha->fcp_prio_cfg->attributes |=
185 FCP_PRIO_ATTR_ENABLE;
186 qla24xx_update_all_fcp_prio(vha);
187 bsg_reply->result = DID_OK;
188 } else {
189 ret = -EINVAL;
190 bsg_reply->result = (DID_ERROR << 16);
191 goto exit_fcp_prio_cfg;
192 }
193 }
194 break;
195
196 case QLFC_FCP_PRIO_GET_CONFIG:
197 len = bsg_job->reply_payload.payload_len;
198 if (!len || len > FCP_PRIO_CFG_SIZE) {
199 ret = -EINVAL;
200 bsg_reply->result = (DID_ERROR << 16);
201 goto exit_fcp_prio_cfg;
202 }
203
204 bsg_reply->result = DID_OK;
205 bsg_reply->reply_payload_rcv_len =
206 sg_copy_from_buffer(
207 bsg_job->reply_payload.sg_list,
208 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
209 len);
210
211 break;
212
213 case QLFC_FCP_PRIO_SET_CONFIG:
214 len = bsg_job->request_payload.payload_len;
215 if (!len || len > FCP_PRIO_CFG_SIZE) {
216 bsg_reply->result = (DID_ERROR << 16);
217 ret = -EINVAL;
218 goto exit_fcp_prio_cfg;
219 }
220
221 if (!ha->fcp_prio_cfg) {
222 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
223 if (!ha->fcp_prio_cfg) {
224 ql_log(ql_log_warn, vha, 0x7050,
225 "Unable to allocate memory for fcp prio "
226 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
227 bsg_reply->result = (DID_ERROR << 16);
228 ret = -ENOMEM;
229 goto exit_fcp_prio_cfg;
230 }
231 }
232
233 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
234 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
235 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
236 FCP_PRIO_CFG_SIZE);
237
238 /* validate fcp priority data */
239
240 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
241 bsg_reply->result = (DID_ERROR << 16);
242 ret = -EINVAL;
243 /* If buffer was invalidatic int
244 * fcp_prio_cfg is of no use
245 */
246 vfree(ha->fcp_prio_cfg);
247 ha->fcp_prio_cfg = NULL;
248 goto exit_fcp_prio_cfg;
249 }
250
251 ha->flags.fcp_prio_enabled = 0;
252 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
253 ha->flags.fcp_prio_enabled = 1;
254 qla24xx_update_all_fcp_prio(vha);
255 bsg_reply->result = DID_OK;
256 break;
257 default:
258 ret = -EINVAL;
259 break;
260 }
261 exit_fcp_prio_cfg:
262 if (!ret)
263 bsg_job_done(bsg_job, bsg_reply->result,
264 bsg_reply->reply_payload_rcv_len);
265 return ret;
266 }
267
268 static int
qla2x00_process_els(struct bsg_job * bsg_job)269 qla2x00_process_els(struct bsg_job *bsg_job)
270 {
271 struct fc_bsg_request *bsg_request = bsg_job->request;
272 struct fc_rport *rport;
273 fc_port_t *fcport = NULL;
274 struct Scsi_Host *host;
275 scsi_qla_host_t *vha;
276 struct qla_hw_data *ha;
277 srb_t *sp;
278 const char *type;
279 int req_sg_cnt, rsp_sg_cnt;
280 int rval = (DID_ERROR << 16);
281 uint16_t nextlid = 0;
282 uint32_t els_cmd = 0;
283
284 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
285 rport = fc_bsg_to_rport(bsg_job);
286 fcport = *(fc_port_t **) rport->dd_data;
287 host = rport_to_shost(rport);
288 vha = shost_priv(host);
289 ha = vha->hw;
290 type = "FC_BSG_RPT_ELS";
291 } else {
292 host = fc_bsg_to_shost(bsg_job);
293 vha = shost_priv(host);
294 ha = vha->hw;
295 type = "FC_BSG_HST_ELS_NOLOGIN";
296 els_cmd = bsg_request->rqst_data.h_els.command_code;
297 if (els_cmd == ELS_AUTH_ELS)
298 return qla_edif_process_els(vha, bsg_job);
299 }
300
301 if (!vha->flags.online) {
302 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
303 rval = -EIO;
304 goto done;
305 }
306
307 /* pass through is supported only for ISP 4Gb or higher */
308 if (!IS_FWI2_CAPABLE(ha)) {
309 ql_dbg(ql_dbg_user, vha, 0x7001,
310 "ELS passthru not supported for ISP23xx based adapters.\n");
311 rval = -EPERM;
312 goto done;
313 }
314
315 /* Multiple SG's are not supported for ELS requests */
316 if (bsg_job->request_payload.sg_cnt > 1 ||
317 bsg_job->reply_payload.sg_cnt > 1) {
318 ql_dbg(ql_dbg_user, vha, 0x7002,
319 "Multiple SG's are not supported for ELS requests, "
320 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
321 bsg_job->request_payload.sg_cnt,
322 bsg_job->reply_payload.sg_cnt);
323 rval = -EPERM;
324 goto done;
325 }
326
327 /* ELS request for rport */
328 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
329 /* make sure the rport is logged in,
330 * if not perform fabric login
331 */
332 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
333 ql_dbg(ql_dbg_user, vha, 0x7003,
334 "Failed to login port %06X for ELS passthru.\n",
335 fcport->d_id.b24);
336 rval = -EIO;
337 goto done;
338 }
339 } else {
340 /* Allocate a dummy fcport structure, since functions
341 * preparing the IOCB and mailbox command retrieves port
342 * specific information from fcport structure. For Host based
343 * ELS commands there will be no fcport structure allocated
344 */
345 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
346 if (!fcport) {
347 rval = -ENOMEM;
348 goto done;
349 }
350
351 /* Initialize all required fields of fcport */
352 fcport->vha = vha;
353 fcport->d_id.b.al_pa =
354 bsg_request->rqst_data.h_els.port_id[0];
355 fcport->d_id.b.area =
356 bsg_request->rqst_data.h_els.port_id[1];
357 fcport->d_id.b.domain =
358 bsg_request->rqst_data.h_els.port_id[2];
359 fcport->loop_id =
360 (fcport->d_id.b.al_pa == 0xFD) ?
361 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
362 }
363
364 req_sg_cnt =
365 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
366 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
367 if (!req_sg_cnt) {
368 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
369 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
370 rval = -ENOMEM;
371 goto done_free_fcport;
372 }
373
374 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
375 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
376 if (!rsp_sg_cnt) {
377 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
378 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
379 rval = -ENOMEM;
380 goto done_free_fcport;
381 }
382
383 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
384 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
385 ql_log(ql_log_warn, vha, 0x7008,
386 "dma mapping resulted in different sg counts, "
387 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
388 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
389 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
390 rval = -EAGAIN;
391 goto done_unmap_sg;
392 }
393
394 /* Alloc SRB structure */
395 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
396 if (!sp) {
397 rval = -ENOMEM;
398 goto done_unmap_sg;
399 }
400
401 sp->type =
402 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
403 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
404 sp->name =
405 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
406 "bsg_els_rpt" : "bsg_els_hst");
407 sp->u.bsg_job = bsg_job;
408 sp->free = qla2x00_bsg_sp_free;
409 sp->done = qla2x00_bsg_job_done;
410
411 ql_dbg(ql_dbg_user, vha, 0x700a,
412 "bsg rqst type: %s els type: %x - loop-id=%x "
413 "portid=%-2x%02x%02x.\n", type,
414 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
415 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
416
417 rval = qla2x00_start_sp(sp);
418 if (rval != QLA_SUCCESS) {
419 ql_log(ql_log_warn, vha, 0x700e,
420 "qla2x00_start_sp failed = %d\n", rval);
421 qla2x00_rel_sp(sp);
422 rval = -EIO;
423 goto done_unmap_sg;
424 }
425 return rval;
426
427 done_unmap_sg:
428 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
429 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
430 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
431 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
432 goto done_free_fcport;
433
434 done_free_fcport:
435 if (bsg_request->msgcode != FC_BSG_RPT_ELS)
436 qla2x00_free_fcport(fcport);
437 done:
438 return rval;
439 }
440
441 static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)442 qla24xx_calc_ct_iocbs(uint16_t dsds)
443 {
444 uint16_t iocbs;
445
446 iocbs = 1;
447 if (dsds > 2) {
448 iocbs += (dsds - 2) / 5;
449 if ((dsds - 2) % 5)
450 iocbs++;
451 }
452 return iocbs;
453 }
454
455 static int
qla2x00_process_ct(struct bsg_job * bsg_job)456 qla2x00_process_ct(struct bsg_job *bsg_job)
457 {
458 srb_t *sp;
459 struct fc_bsg_request *bsg_request = bsg_job->request;
460 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
461 scsi_qla_host_t *vha = shost_priv(host);
462 struct qla_hw_data *ha = vha->hw;
463 int rval = (DID_ERROR << 16);
464 int req_sg_cnt, rsp_sg_cnt;
465 uint16_t loop_id;
466 struct fc_port *fcport;
467 char *type = "FC_BSG_HST_CT";
468
469 req_sg_cnt =
470 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
471 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
472 if (!req_sg_cnt) {
473 ql_log(ql_log_warn, vha, 0x700f,
474 "dma_map_sg return %d for request\n", req_sg_cnt);
475 rval = -ENOMEM;
476 goto done;
477 }
478
479 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
480 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
481 if (!rsp_sg_cnt) {
482 ql_log(ql_log_warn, vha, 0x7010,
483 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
484 rval = -ENOMEM;
485 goto done;
486 }
487
488 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
489 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
490 ql_log(ql_log_warn, vha, 0x7011,
491 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
492 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
493 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
494 rval = -EAGAIN;
495 goto done_unmap_sg;
496 }
497
498 if (!vha->flags.online) {
499 ql_log(ql_log_warn, vha, 0x7012,
500 "Host is not online.\n");
501 rval = -EIO;
502 goto done_unmap_sg;
503 }
504
505 loop_id =
506 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
507 >> 24;
508 switch (loop_id) {
509 case 0xFC:
510 loop_id = NPH_SNS;
511 break;
512 case 0xFA:
513 loop_id = vha->mgmt_svr_loop_id;
514 break;
515 default:
516 ql_dbg(ql_dbg_user, vha, 0x7013,
517 "Unknown loop id: %x.\n", loop_id);
518 rval = -EINVAL;
519 goto done_unmap_sg;
520 }
521
522 /* Allocate a dummy fcport structure, since functions preparing the
523 * IOCB and mailbox command retrieves port specific information
524 * from fcport structure. For Host based ELS commands there will be
525 * no fcport structure allocated
526 */
527 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
528 if (!fcport) {
529 ql_log(ql_log_warn, vha, 0x7014,
530 "Failed to allocate fcport.\n");
531 rval = -ENOMEM;
532 goto done_unmap_sg;
533 }
534
535 /* Initialize all required fields of fcport */
536 fcport->vha = vha;
537 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
538 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
539 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
540 fcport->loop_id = loop_id;
541
542 /* Alloc SRB structure */
543 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
544 if (!sp) {
545 ql_log(ql_log_warn, vha, 0x7015,
546 "qla2x00_get_sp failed.\n");
547 rval = -ENOMEM;
548 goto done_free_fcport;
549 }
550
551 sp->type = SRB_CT_CMD;
552 sp->name = "bsg_ct";
553 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
554 sp->u.bsg_job = bsg_job;
555 sp->free = qla2x00_bsg_sp_free;
556 sp->done = qla2x00_bsg_job_done;
557
558 ql_dbg(ql_dbg_user, vha, 0x7016,
559 "bsg rqst type: %s else type: %x - "
560 "loop-id=%x portid=%02x%02x%02x.\n", type,
561 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
562 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
563 fcport->d_id.b.al_pa);
564
565 rval = qla2x00_start_sp(sp);
566 if (rval != QLA_SUCCESS) {
567 ql_log(ql_log_warn, vha, 0x7017,
568 "qla2x00_start_sp failed=%d.\n", rval);
569 qla2x00_rel_sp(sp);
570 rval = -EIO;
571 goto done_free_fcport;
572 }
573 return rval;
574
575 done_free_fcport:
576 qla2x00_free_fcport(fcport);
577 done_unmap_sg:
578 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
579 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
580 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
581 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
582 done:
583 return rval;
584 }
585
586 /* Disable loopback mode */
587 static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,int wait,int wait2)588 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
589 int wait, int wait2)
590 {
591 int ret = 0;
592 int rval = 0;
593 uint16_t new_config[4];
594 struct qla_hw_data *ha = vha->hw;
595
596 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
597 goto done_reset_internal;
598
599 memset(new_config, 0 , sizeof(new_config));
600 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
601 ENABLE_INTERNAL_LOOPBACK ||
602 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
603 ENABLE_EXTERNAL_LOOPBACK) {
604 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
605 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
606 (new_config[0] & INTERNAL_LOOPBACK_MASK));
607 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
608
609 ha->notify_dcbx_comp = wait;
610 ha->notify_lb_portup_comp = wait2;
611
612 ret = qla81xx_set_port_config(vha, new_config);
613 if (ret != QLA_SUCCESS) {
614 ql_log(ql_log_warn, vha, 0x7025,
615 "Set port config failed.\n");
616 ha->notify_dcbx_comp = 0;
617 ha->notify_lb_portup_comp = 0;
618 rval = -EINVAL;
619 goto done_reset_internal;
620 }
621
622 /* Wait for DCBX complete event */
623 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
624 (DCBX_COMP_TIMEOUT * HZ))) {
625 ql_dbg(ql_dbg_user, vha, 0x7026,
626 "DCBX completion not received.\n");
627 ha->notify_dcbx_comp = 0;
628 ha->notify_lb_portup_comp = 0;
629 rval = -EINVAL;
630 goto done_reset_internal;
631 } else
632 ql_dbg(ql_dbg_user, vha, 0x7027,
633 "DCBX completion received.\n");
634
635 if (wait2 &&
636 !wait_for_completion_timeout(&ha->lb_portup_comp,
637 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
638 ql_dbg(ql_dbg_user, vha, 0x70c5,
639 "Port up completion not received.\n");
640 ha->notify_lb_portup_comp = 0;
641 rval = -EINVAL;
642 goto done_reset_internal;
643 } else
644 ql_dbg(ql_dbg_user, vha, 0x70c6,
645 "Port up completion received.\n");
646
647 ha->notify_dcbx_comp = 0;
648 ha->notify_lb_portup_comp = 0;
649 }
650 done_reset_internal:
651 return rval;
652 }
653
654 /*
655 * Set the port configuration to enable the internal or external loopback
656 * depending on the loopback mode.
657 */
658 static inline int
qla81xx_set_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,uint16_t * new_config,uint16_t mode)659 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
660 uint16_t *new_config, uint16_t mode)
661 {
662 int ret = 0;
663 int rval = 0;
664 unsigned long rem_tmo = 0, current_tmo = 0;
665 struct qla_hw_data *ha = vha->hw;
666
667 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
668 goto done_set_internal;
669
670 if (mode == INTERNAL_LOOPBACK)
671 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
672 else if (mode == EXTERNAL_LOOPBACK)
673 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
674 ql_dbg(ql_dbg_user, vha, 0x70be,
675 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
676
677 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
678
679 ha->notify_dcbx_comp = 1;
680 ret = qla81xx_set_port_config(vha, new_config);
681 if (ret != QLA_SUCCESS) {
682 ql_log(ql_log_warn, vha, 0x7021,
683 "set port config failed.\n");
684 ha->notify_dcbx_comp = 0;
685 rval = -EINVAL;
686 goto done_set_internal;
687 }
688
689 /* Wait for DCBX complete event */
690 current_tmo = DCBX_COMP_TIMEOUT * HZ;
691 while (1) {
692 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
693 current_tmo);
694 if (!ha->idc_extend_tmo || rem_tmo) {
695 ha->idc_extend_tmo = 0;
696 break;
697 }
698 current_tmo = ha->idc_extend_tmo * HZ;
699 ha->idc_extend_tmo = 0;
700 }
701
702 if (!rem_tmo) {
703 ql_dbg(ql_dbg_user, vha, 0x7022,
704 "DCBX completion not received.\n");
705 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
706 /*
707 * If the reset of the loopback mode doesn't work take a FCoE
708 * dump and reset the chip.
709 */
710 if (ret) {
711 qla2xxx_dump_fw(vha);
712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
713 }
714 rval = -EINVAL;
715 } else {
716 if (ha->flags.idc_compl_status) {
717 ql_dbg(ql_dbg_user, vha, 0x70c3,
718 "Bad status in IDC Completion AEN\n");
719 rval = -EINVAL;
720 ha->flags.idc_compl_status = 0;
721 } else
722 ql_dbg(ql_dbg_user, vha, 0x7023,
723 "DCBX completion received.\n");
724 }
725
726 ha->notify_dcbx_comp = 0;
727 ha->idc_extend_tmo = 0;
728
729 done_set_internal:
730 return rval;
731 }
732
733 static int
qla2x00_process_loopback(struct bsg_job * bsg_job)734 qla2x00_process_loopback(struct bsg_job *bsg_job)
735 {
736 struct fc_bsg_request *bsg_request = bsg_job->request;
737 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
738 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
739 scsi_qla_host_t *vha = shost_priv(host);
740 struct qla_hw_data *ha = vha->hw;
741 int rval;
742 uint8_t command_sent;
743 char *type;
744 struct msg_echo_lb elreq;
745 uint16_t response[MAILBOX_REGISTER_COUNT];
746 uint16_t config[4], new_config[4];
747 uint8_t *fw_sts_ptr;
748 void *req_data = NULL;
749 dma_addr_t req_data_dma;
750 uint32_t req_data_len;
751 uint8_t *rsp_data = NULL;
752 dma_addr_t rsp_data_dma;
753 uint32_t rsp_data_len;
754
755 if (!vha->flags.online) {
756 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
757 return -EIO;
758 }
759
760 memset(&elreq, 0, sizeof(elreq));
761
762 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
763 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
764 DMA_TO_DEVICE);
765
766 if (!elreq.req_sg_cnt) {
767 ql_log(ql_log_warn, vha, 0x701a,
768 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
769 return -ENOMEM;
770 }
771
772 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
773 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
774 DMA_FROM_DEVICE);
775
776 if (!elreq.rsp_sg_cnt) {
777 ql_log(ql_log_warn, vha, 0x701b,
778 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
779 rval = -ENOMEM;
780 goto done_unmap_req_sg;
781 }
782
783 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
784 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
785 ql_log(ql_log_warn, vha, 0x701c,
786 "dma mapping resulted in different sg counts, "
787 "request_sg_cnt: %x dma_request_sg_cnt: %x "
788 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
789 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
790 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
791 rval = -EAGAIN;
792 goto done_unmap_sg;
793 }
794 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
795 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
796 &req_data_dma, GFP_KERNEL);
797 if (!req_data) {
798 ql_log(ql_log_warn, vha, 0x701d,
799 "dma alloc failed for req_data.\n");
800 rval = -ENOMEM;
801 goto done_unmap_sg;
802 }
803
804 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
805 &rsp_data_dma, GFP_KERNEL);
806 if (!rsp_data) {
807 ql_log(ql_log_warn, vha, 0x7004,
808 "dma alloc failed for rsp_data.\n");
809 rval = -ENOMEM;
810 goto done_free_dma_req;
811 }
812
813 /* Copy the request buffer in req_data now */
814 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
815 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
816
817 elreq.send_dma = req_data_dma;
818 elreq.rcv_dma = rsp_data_dma;
819 elreq.transfer_size = req_data_len;
820
821 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
822 elreq.iteration_count =
823 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
824
825 if (atomic_read(&vha->loop_state) == LOOP_READY &&
826 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
827 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
828 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
829 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
830 elreq.options == EXTERNAL_LOOPBACK))) {
831 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
832 ql_dbg(ql_dbg_user, vha, 0x701e,
833 "BSG request type: %s.\n", type);
834 command_sent = INT_DEF_LB_ECHO_CMD;
835 rval = qla2x00_echo_test(vha, &elreq, response);
836 } else {
837 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
838 memset(config, 0, sizeof(config));
839 memset(new_config, 0, sizeof(new_config));
840
841 if (qla81xx_get_port_config(vha, config)) {
842 ql_log(ql_log_warn, vha, 0x701f,
843 "Get port config failed.\n");
844 rval = -EPERM;
845 goto done_free_dma_rsp;
846 }
847
848 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
849 ql_dbg(ql_dbg_user, vha, 0x70c4,
850 "Loopback operation already in "
851 "progress.\n");
852 rval = -EAGAIN;
853 goto done_free_dma_rsp;
854 }
855
856 ql_dbg(ql_dbg_user, vha, 0x70c0,
857 "elreq.options=%04x\n", elreq.options);
858
859 if (elreq.options == EXTERNAL_LOOPBACK)
860 if (IS_QLA8031(ha) || IS_QLA8044(ha))
861 rval = qla81xx_set_loopback_mode(vha,
862 config, new_config, elreq.options);
863 else
864 rval = qla81xx_reset_loopback_mode(vha,
865 config, 1, 0);
866 else
867 rval = qla81xx_set_loopback_mode(vha, config,
868 new_config, elreq.options);
869
870 if (rval) {
871 rval = -EPERM;
872 goto done_free_dma_rsp;
873 }
874
875 type = "FC_BSG_HST_VENDOR_LOOPBACK";
876 ql_dbg(ql_dbg_user, vha, 0x7028,
877 "BSG request type: %s.\n", type);
878
879 command_sent = INT_DEF_LB_LOOPBACK_CMD;
880 rval = qla2x00_loopback_test(vha, &elreq, response);
881
882 if (response[0] == MBS_COMMAND_ERROR &&
883 response[1] == MBS_LB_RESET) {
884 ql_log(ql_log_warn, vha, 0x7029,
885 "MBX command error, Aborting ISP.\n");
886 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
887 qla2xxx_wake_dpc(vha);
888 qla2x00_wait_for_chip_reset(vha);
889 /* Also reset the MPI */
890 if (IS_QLA81XX(ha)) {
891 if (qla81xx_restart_mpi_firmware(vha) !=
892 QLA_SUCCESS) {
893 ql_log(ql_log_warn, vha, 0x702a,
894 "MPI reset failed.\n");
895 }
896 }
897
898 rval = -EIO;
899 goto done_free_dma_rsp;
900 }
901
902 if (new_config[0]) {
903 int ret;
904
905 /* Revert back to original port config
906 * Also clear internal loopback
907 */
908 ret = qla81xx_reset_loopback_mode(vha,
909 new_config, 0, 1);
910 if (ret) {
911 /*
912 * If the reset of the loopback mode
913 * doesn't work take FCoE dump and then
914 * reset the chip.
915 */
916 qla2xxx_dump_fw(vha);
917 set_bit(ISP_ABORT_NEEDED,
918 &vha->dpc_flags);
919 }
920
921 }
922
923 } else {
924 type = "FC_BSG_HST_VENDOR_LOOPBACK";
925 ql_dbg(ql_dbg_user, vha, 0x702b,
926 "BSG request type: %s.\n", type);
927 command_sent = INT_DEF_LB_LOOPBACK_CMD;
928 rval = qla2x00_loopback_test(vha, &elreq, response);
929 }
930 }
931
932 if (rval) {
933 ql_log(ql_log_warn, vha, 0x702c,
934 "Vendor request %s failed.\n", type);
935
936 rval = 0;
937 bsg_reply->result = (DID_ERROR << 16);
938 bsg_reply->reply_payload_rcv_len = 0;
939 } else {
940 ql_dbg(ql_dbg_user, vha, 0x702d,
941 "Vendor request %s completed.\n", type);
942 bsg_reply->result = (DID_OK << 16);
943 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
944 bsg_job->reply_payload.sg_cnt, rsp_data,
945 rsp_data_len);
946 }
947
948 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
949 sizeof(response) + sizeof(uint8_t);
950 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
951 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
952 sizeof(response));
953 fw_sts_ptr += sizeof(response);
954 *fw_sts_ptr = command_sent;
955
956 done_free_dma_rsp:
957 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
958 rsp_data, rsp_data_dma);
959 done_free_dma_req:
960 dma_free_coherent(&ha->pdev->dev, req_data_len,
961 req_data, req_data_dma);
962 done_unmap_sg:
963 dma_unmap_sg(&ha->pdev->dev,
964 bsg_job->reply_payload.sg_list,
965 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
966 done_unmap_req_sg:
967 dma_unmap_sg(&ha->pdev->dev,
968 bsg_job->request_payload.sg_list,
969 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
970 if (!rval)
971 bsg_job_done(bsg_job, bsg_reply->result,
972 bsg_reply->reply_payload_rcv_len);
973 return rval;
974 }
975
976 static int
qla84xx_reset(struct bsg_job * bsg_job)977 qla84xx_reset(struct bsg_job *bsg_job)
978 {
979 struct fc_bsg_request *bsg_request = bsg_job->request;
980 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
981 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
982 scsi_qla_host_t *vha = shost_priv(host);
983 struct qla_hw_data *ha = vha->hw;
984 int rval = 0;
985 uint32_t flag;
986
987 if (!IS_QLA84XX(ha)) {
988 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
989 return -EINVAL;
990 }
991
992 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
993
994 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
995
996 if (rval) {
997 ql_log(ql_log_warn, vha, 0x7030,
998 "Vendor request 84xx reset failed.\n");
999 rval = (DID_ERROR << 16);
1000
1001 } else {
1002 ql_dbg(ql_dbg_user, vha, 0x7031,
1003 "Vendor request 84xx reset completed.\n");
1004 bsg_reply->result = DID_OK;
1005 bsg_job_done(bsg_job, bsg_reply->result,
1006 bsg_reply->reply_payload_rcv_len);
1007 }
1008
1009 return rval;
1010 }
1011
1012 static int
qla84xx_updatefw(struct bsg_job * bsg_job)1013 qla84xx_updatefw(struct bsg_job *bsg_job)
1014 {
1015 struct fc_bsg_request *bsg_request = bsg_job->request;
1016 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1017 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1018 scsi_qla_host_t *vha = shost_priv(host);
1019 struct qla_hw_data *ha = vha->hw;
1020 struct verify_chip_entry_84xx *mn = NULL;
1021 dma_addr_t mn_dma, fw_dma;
1022 void *fw_buf = NULL;
1023 int rval = 0;
1024 uint32_t sg_cnt;
1025 uint32_t data_len;
1026 uint16_t options;
1027 uint32_t flag;
1028 uint32_t fw_ver;
1029
1030 if (!IS_QLA84XX(ha)) {
1031 ql_dbg(ql_dbg_user, vha, 0x7032,
1032 "Not 84xx, exiting.\n");
1033 return -EINVAL;
1034 }
1035
1036 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1037 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1038 if (!sg_cnt) {
1039 ql_log(ql_log_warn, vha, 0x7033,
1040 "dma_map_sg returned %d for request.\n", sg_cnt);
1041 return -ENOMEM;
1042 }
1043
1044 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1045 ql_log(ql_log_warn, vha, 0x7034,
1046 "DMA mapping resulted in different sg counts, "
1047 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1048 bsg_job->request_payload.sg_cnt, sg_cnt);
1049 rval = -EAGAIN;
1050 goto done_unmap_sg;
1051 }
1052
1053 data_len = bsg_job->request_payload.payload_len;
1054 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1055 &fw_dma, GFP_KERNEL);
1056 if (!fw_buf) {
1057 ql_log(ql_log_warn, vha, 0x7035,
1058 "DMA alloc failed for fw_buf.\n");
1059 rval = -ENOMEM;
1060 goto done_unmap_sg;
1061 }
1062
1063 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1064 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1065
1066 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1067 if (!mn) {
1068 ql_log(ql_log_warn, vha, 0x7036,
1069 "DMA alloc failed for fw buffer.\n");
1070 rval = -ENOMEM;
1071 goto done_free_fw_buf;
1072 }
1073
1074 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1075 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1076
1077 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1078 mn->entry_count = 1;
1079
1080 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1081 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1082 options |= VCO_DIAG_FW;
1083
1084 mn->options = cpu_to_le16(options);
1085 mn->fw_ver = cpu_to_le32(fw_ver);
1086 mn->fw_size = cpu_to_le32(data_len);
1087 mn->fw_seq_size = cpu_to_le32(data_len);
1088 put_unaligned_le64(fw_dma, &mn->dsd.address);
1089 mn->dsd.length = cpu_to_le32(data_len);
1090 mn->data_seg_cnt = cpu_to_le16(1);
1091
1092 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1093
1094 if (rval) {
1095 ql_log(ql_log_warn, vha, 0x7037,
1096 "Vendor request 84xx updatefw failed.\n");
1097
1098 rval = (DID_ERROR << 16);
1099 } else {
1100 ql_dbg(ql_dbg_user, vha, 0x7038,
1101 "Vendor request 84xx updatefw completed.\n");
1102
1103 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1104 bsg_reply->result = DID_OK;
1105 }
1106
1107 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1108
1109 done_free_fw_buf:
1110 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1111
1112 done_unmap_sg:
1113 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1114 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1115
1116 if (!rval)
1117 bsg_job_done(bsg_job, bsg_reply->result,
1118 bsg_reply->reply_payload_rcv_len);
1119 return rval;
1120 }
1121
1122 static int
qla84xx_mgmt_cmd(struct bsg_job * bsg_job)1123 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1124 {
1125 struct fc_bsg_request *bsg_request = bsg_job->request;
1126 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1127 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1128 scsi_qla_host_t *vha = shost_priv(host);
1129 struct qla_hw_data *ha = vha->hw;
1130 struct access_chip_84xx *mn = NULL;
1131 dma_addr_t mn_dma, mgmt_dma;
1132 void *mgmt_b = NULL;
1133 int rval = 0;
1134 struct qla_bsg_a84_mgmt *ql84_mgmt;
1135 uint32_t sg_cnt;
1136 uint32_t data_len = 0;
1137 uint32_t dma_direction = DMA_NONE;
1138
1139 if (!IS_QLA84XX(ha)) {
1140 ql_log(ql_log_warn, vha, 0x703a,
1141 "Not 84xx, exiting.\n");
1142 return -EINVAL;
1143 }
1144
1145 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1146 if (!mn) {
1147 ql_log(ql_log_warn, vha, 0x703c,
1148 "DMA alloc failed for fw buffer.\n");
1149 return -ENOMEM;
1150 }
1151
1152 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1153 mn->entry_count = 1;
1154 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1155 switch (ql84_mgmt->mgmt.cmd) {
1156 case QLA84_MGMT_READ_MEM:
1157 case QLA84_MGMT_GET_INFO:
1158 sg_cnt = dma_map_sg(&ha->pdev->dev,
1159 bsg_job->reply_payload.sg_list,
1160 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1161 if (!sg_cnt) {
1162 ql_log(ql_log_warn, vha, 0x703d,
1163 "dma_map_sg returned %d for reply.\n", sg_cnt);
1164 rval = -ENOMEM;
1165 goto exit_mgmt;
1166 }
1167
1168 dma_direction = DMA_FROM_DEVICE;
1169
1170 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1171 ql_log(ql_log_warn, vha, 0x703e,
1172 "DMA mapping resulted in different sg counts, "
1173 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1174 bsg_job->reply_payload.sg_cnt, sg_cnt);
1175 rval = -EAGAIN;
1176 goto done_unmap_sg;
1177 }
1178
1179 data_len = bsg_job->reply_payload.payload_len;
1180
1181 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1182 &mgmt_dma, GFP_KERNEL);
1183 if (!mgmt_b) {
1184 ql_log(ql_log_warn, vha, 0x703f,
1185 "DMA alloc failed for mgmt_b.\n");
1186 rval = -ENOMEM;
1187 goto done_unmap_sg;
1188 }
1189
1190 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1191 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1192 mn->parameter1 =
1193 cpu_to_le32(
1194 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1195
1196 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1197 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1198 mn->parameter1 =
1199 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1200
1201 mn->parameter2 =
1202 cpu_to_le32(
1203 ql84_mgmt->mgmt.mgmtp.u.info.context);
1204 }
1205 break;
1206
1207 case QLA84_MGMT_WRITE_MEM:
1208 sg_cnt = dma_map_sg(&ha->pdev->dev,
1209 bsg_job->request_payload.sg_list,
1210 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1211
1212 if (!sg_cnt) {
1213 ql_log(ql_log_warn, vha, 0x7040,
1214 "dma_map_sg returned %d.\n", sg_cnt);
1215 rval = -ENOMEM;
1216 goto exit_mgmt;
1217 }
1218
1219 dma_direction = DMA_TO_DEVICE;
1220
1221 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1222 ql_log(ql_log_warn, vha, 0x7041,
1223 "DMA mapping resulted in different sg counts, "
1224 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1225 bsg_job->request_payload.sg_cnt, sg_cnt);
1226 rval = -EAGAIN;
1227 goto done_unmap_sg;
1228 }
1229
1230 data_len = bsg_job->request_payload.payload_len;
1231 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1232 &mgmt_dma, GFP_KERNEL);
1233 if (!mgmt_b) {
1234 ql_log(ql_log_warn, vha, 0x7042,
1235 "DMA alloc failed for mgmt_b.\n");
1236 rval = -ENOMEM;
1237 goto done_unmap_sg;
1238 }
1239
1240 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1241 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1242
1243 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1244 mn->parameter1 =
1245 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1246 break;
1247
1248 case QLA84_MGMT_CHNG_CONFIG:
1249 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1250 mn->parameter1 =
1251 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1252
1253 mn->parameter2 =
1254 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1255
1256 mn->parameter3 =
1257 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1258 break;
1259
1260 default:
1261 rval = -EIO;
1262 goto exit_mgmt;
1263 }
1264
1265 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1266 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1267 mn->dseg_count = cpu_to_le16(1);
1268 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1269 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1270 }
1271
1272 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1273
1274 if (rval) {
1275 ql_log(ql_log_warn, vha, 0x7043,
1276 "Vendor request 84xx mgmt failed.\n");
1277
1278 rval = (DID_ERROR << 16);
1279
1280 } else {
1281 ql_dbg(ql_dbg_user, vha, 0x7044,
1282 "Vendor request 84xx mgmt completed.\n");
1283
1284 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1285 bsg_reply->result = DID_OK;
1286
1287 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1288 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1289 bsg_reply->reply_payload_rcv_len =
1290 bsg_job->reply_payload.payload_len;
1291
1292 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1293 bsg_job->reply_payload.sg_cnt, mgmt_b,
1294 data_len);
1295 }
1296 }
1297
1298 done_unmap_sg:
1299 if (mgmt_b)
1300 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1301
1302 if (dma_direction == DMA_TO_DEVICE)
1303 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1304 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1305 else if (dma_direction == DMA_FROM_DEVICE)
1306 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1307 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1308
1309 exit_mgmt:
1310 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1311
1312 if (!rval)
1313 bsg_job_done(bsg_job, bsg_reply->result,
1314 bsg_reply->reply_payload_rcv_len);
1315 return rval;
1316 }
1317
1318 static int
qla24xx_iidma(struct bsg_job * bsg_job)1319 qla24xx_iidma(struct bsg_job *bsg_job)
1320 {
1321 struct fc_bsg_request *bsg_request = bsg_job->request;
1322 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1323 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1324 scsi_qla_host_t *vha = shost_priv(host);
1325 int rval = 0;
1326 struct qla_port_param *port_param = NULL;
1327 fc_port_t *fcport = NULL;
1328 int found = 0;
1329 uint16_t mb[MAILBOX_REGISTER_COUNT];
1330 uint8_t *rsp_ptr = NULL;
1331
1332 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1333 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1334 return -EINVAL;
1335 }
1336
1337 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1338 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1339 ql_log(ql_log_warn, vha, 0x7048,
1340 "Invalid destination type.\n");
1341 return -EINVAL;
1342 }
1343
1344 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1345 if (fcport->port_type != FCT_TARGET)
1346 continue;
1347
1348 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1349 fcport->port_name, sizeof(fcport->port_name)))
1350 continue;
1351
1352 found = 1;
1353 break;
1354 }
1355
1356 if (!found) {
1357 ql_log(ql_log_warn, vha, 0x7049,
1358 "Failed to find port.\n");
1359 return -EINVAL;
1360 }
1361
1362 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1363 ql_log(ql_log_warn, vha, 0x704a,
1364 "Port is not online.\n");
1365 return -EINVAL;
1366 }
1367
1368 if (fcport->flags & FCF_LOGIN_NEEDED) {
1369 ql_log(ql_log_warn, vha, 0x704b,
1370 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1371 return -EINVAL;
1372 }
1373
1374 if (port_param->mode)
1375 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1376 port_param->speed, mb);
1377 else
1378 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1379 &port_param->speed, mb);
1380
1381 if (rval) {
1382 ql_log(ql_log_warn, vha, 0x704c,
1383 "iiDMA cmd failed for %8phN -- "
1384 "%04x %x %04x %04x.\n", fcport->port_name,
1385 rval, fcport->fp_speed, mb[0], mb[1]);
1386 rval = (DID_ERROR << 16);
1387 } else {
1388 if (!port_param->mode) {
1389 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1390 sizeof(struct qla_port_param);
1391
1392 rsp_ptr = ((uint8_t *)bsg_reply) +
1393 sizeof(struct fc_bsg_reply);
1394
1395 memcpy(rsp_ptr, port_param,
1396 sizeof(struct qla_port_param));
1397 }
1398
1399 bsg_reply->result = DID_OK;
1400 bsg_job_done(bsg_job, bsg_reply->result,
1401 bsg_reply->reply_payload_rcv_len);
1402 }
1403
1404 return rval;
1405 }
1406
1407 static int
qla2x00_optrom_setup(struct bsg_job * bsg_job,scsi_qla_host_t * vha,uint8_t is_update)1408 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1409 uint8_t is_update)
1410 {
1411 struct fc_bsg_request *bsg_request = bsg_job->request;
1412 uint32_t start = 0;
1413 int valid = 0;
1414 struct qla_hw_data *ha = vha->hw;
1415
1416 if (unlikely(pci_channel_offline(ha->pdev)))
1417 return -EINVAL;
1418
1419 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1420 if (start > ha->optrom_size) {
1421 ql_log(ql_log_warn, vha, 0x7055,
1422 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1423 return -EINVAL;
1424 }
1425
1426 if (ha->optrom_state != QLA_SWAITING) {
1427 ql_log(ql_log_info, vha, 0x7056,
1428 "optrom_state %d.\n", ha->optrom_state);
1429 return -EBUSY;
1430 }
1431
1432 ha->optrom_region_start = start;
1433 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1434 if (is_update) {
1435 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1436 valid = 1;
1437 else if (start == (ha->flt_region_boot * 4) ||
1438 start == (ha->flt_region_fw * 4))
1439 valid = 1;
1440 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1441 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1442 IS_QLA28XX(ha))
1443 valid = 1;
1444 if (!valid) {
1445 ql_log(ql_log_warn, vha, 0x7058,
1446 "Invalid start region 0x%x/0x%x.\n", start,
1447 bsg_job->request_payload.payload_len);
1448 return -EINVAL;
1449 }
1450
1451 ha->optrom_region_size = start +
1452 bsg_job->request_payload.payload_len > ha->optrom_size ?
1453 ha->optrom_size - start :
1454 bsg_job->request_payload.payload_len;
1455 ha->optrom_state = QLA_SWRITING;
1456 } else {
1457 ha->optrom_region_size = start +
1458 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1459 ha->optrom_size - start :
1460 bsg_job->reply_payload.payload_len;
1461 ha->optrom_state = QLA_SREADING;
1462 }
1463
1464 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1465 if (!ha->optrom_buffer) {
1466 ql_log(ql_log_warn, vha, 0x7059,
1467 "Read: Unable to allocate memory for optrom retrieval "
1468 "(%x)\n", ha->optrom_region_size);
1469
1470 ha->optrom_state = QLA_SWAITING;
1471 return -ENOMEM;
1472 }
1473
1474 return 0;
1475 }
1476
1477 static int
qla2x00_read_optrom(struct bsg_job * bsg_job)1478 qla2x00_read_optrom(struct bsg_job *bsg_job)
1479 {
1480 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1481 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1482 scsi_qla_host_t *vha = shost_priv(host);
1483 struct qla_hw_data *ha = vha->hw;
1484 int rval = 0;
1485
1486 if (ha->flags.nic_core_reset_hdlr_active)
1487 return -EBUSY;
1488
1489 mutex_lock(&ha->optrom_mutex);
1490 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1491 if (rval) {
1492 mutex_unlock(&ha->optrom_mutex);
1493 return rval;
1494 }
1495
1496 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1497 ha->optrom_region_start, ha->optrom_region_size);
1498
1499 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1500 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1501 ha->optrom_region_size);
1502
1503 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1504 bsg_reply->result = DID_OK;
1505 vfree(ha->optrom_buffer);
1506 ha->optrom_buffer = NULL;
1507 ha->optrom_state = QLA_SWAITING;
1508 mutex_unlock(&ha->optrom_mutex);
1509 bsg_job_done(bsg_job, bsg_reply->result,
1510 bsg_reply->reply_payload_rcv_len);
1511 return rval;
1512 }
1513
1514 static int
qla2x00_update_optrom(struct bsg_job * bsg_job)1515 qla2x00_update_optrom(struct bsg_job *bsg_job)
1516 {
1517 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1518 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1519 scsi_qla_host_t *vha = shost_priv(host);
1520 struct qla_hw_data *ha = vha->hw;
1521 int rval = 0;
1522
1523 mutex_lock(&ha->optrom_mutex);
1524 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1525 if (rval) {
1526 mutex_unlock(&ha->optrom_mutex);
1527 return rval;
1528 }
1529
1530 /* Set the isp82xx_no_md_cap not to capture minidump */
1531 ha->flags.isp82xx_no_md_cap = 1;
1532
1533 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1534 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1535 ha->optrom_region_size);
1536
1537 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1538 ha->optrom_region_start, ha->optrom_region_size);
1539
1540 if (rval) {
1541 bsg_reply->result = -EINVAL;
1542 rval = -EINVAL;
1543 } else {
1544 bsg_reply->result = DID_OK;
1545 }
1546 vfree(ha->optrom_buffer);
1547 ha->optrom_buffer = NULL;
1548 ha->optrom_state = QLA_SWAITING;
1549 mutex_unlock(&ha->optrom_mutex);
1550 bsg_job_done(bsg_job, bsg_reply->result,
1551 bsg_reply->reply_payload_rcv_len);
1552 return rval;
1553 }
1554
1555 static int
qla2x00_update_fru_versions(struct bsg_job * bsg_job)1556 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1557 {
1558 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1559 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1560 scsi_qla_host_t *vha = shost_priv(host);
1561 struct qla_hw_data *ha = vha->hw;
1562 int rval = 0;
1563 uint8_t bsg[DMA_POOL_SIZE];
1564 struct qla_image_version_list *list = (void *)bsg;
1565 struct qla_image_version *image;
1566 uint32_t count;
1567 dma_addr_t sfp_dma;
1568 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1569
1570 if (!sfp) {
1571 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1572 EXT_STATUS_NO_MEMORY;
1573 goto done;
1574 }
1575
1576 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1577 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1578
1579 image = list->version;
1580 count = list->count;
1581 while (count--) {
1582 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1583 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1584 image->field_address.device, image->field_address.offset,
1585 sizeof(image->field_info), image->field_address.option);
1586 if (rval) {
1587 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1588 EXT_STATUS_MAILBOX;
1589 goto dealloc;
1590 }
1591 image++;
1592 }
1593
1594 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1595
1596 dealloc:
1597 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1598
1599 done:
1600 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1601 bsg_reply->result = DID_OK << 16;
1602 bsg_job_done(bsg_job, bsg_reply->result,
1603 bsg_reply->reply_payload_rcv_len);
1604
1605 return 0;
1606 }
1607
1608 static int
qla2x00_read_fru_status(struct bsg_job * bsg_job)1609 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1610 {
1611 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1612 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1613 scsi_qla_host_t *vha = shost_priv(host);
1614 struct qla_hw_data *ha = vha->hw;
1615 int rval = 0;
1616 uint8_t bsg[DMA_POOL_SIZE];
1617 struct qla_status_reg *sr = (void *)bsg;
1618 dma_addr_t sfp_dma;
1619 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1620
1621 if (!sfp) {
1622 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1623 EXT_STATUS_NO_MEMORY;
1624 goto done;
1625 }
1626
1627 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1628 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1629
1630 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1631 sr->field_address.device, sr->field_address.offset,
1632 sizeof(sr->status_reg), sr->field_address.option);
1633 sr->status_reg = *sfp;
1634
1635 if (rval) {
1636 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1637 EXT_STATUS_MAILBOX;
1638 goto dealloc;
1639 }
1640
1641 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1642 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1643
1644 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1645
1646 dealloc:
1647 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1648
1649 done:
1650 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1651 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1652 bsg_reply->result = DID_OK << 16;
1653 bsg_job_done(bsg_job, bsg_reply->result,
1654 bsg_reply->reply_payload_rcv_len);
1655
1656 return 0;
1657 }
1658
1659 static int
qla2x00_write_fru_status(struct bsg_job * bsg_job)1660 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1661 {
1662 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1663 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1664 scsi_qla_host_t *vha = shost_priv(host);
1665 struct qla_hw_data *ha = vha->hw;
1666 int rval = 0;
1667 uint8_t bsg[DMA_POOL_SIZE];
1668 struct qla_status_reg *sr = (void *)bsg;
1669 dma_addr_t sfp_dma;
1670 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1671
1672 if (!sfp) {
1673 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1674 EXT_STATUS_NO_MEMORY;
1675 goto done;
1676 }
1677
1678 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1679 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1680
1681 *sfp = sr->status_reg;
1682 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1683 sr->field_address.device, sr->field_address.offset,
1684 sizeof(sr->status_reg), sr->field_address.option);
1685
1686 if (rval) {
1687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1688 EXT_STATUS_MAILBOX;
1689 goto dealloc;
1690 }
1691
1692 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1693
1694 dealloc:
1695 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1696
1697 done:
1698 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1699 bsg_reply->result = DID_OK << 16;
1700 bsg_job_done(bsg_job, bsg_reply->result,
1701 bsg_reply->reply_payload_rcv_len);
1702
1703 return 0;
1704 }
1705
1706 static int
qla2x00_write_i2c(struct bsg_job * bsg_job)1707 qla2x00_write_i2c(struct bsg_job *bsg_job)
1708 {
1709 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1710 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1711 scsi_qla_host_t *vha = shost_priv(host);
1712 struct qla_hw_data *ha = vha->hw;
1713 int rval = 0;
1714 uint8_t bsg[DMA_POOL_SIZE];
1715 struct qla_i2c_access *i2c = (void *)bsg;
1716 dma_addr_t sfp_dma;
1717 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1718
1719 if (!sfp) {
1720 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1721 EXT_STATUS_NO_MEMORY;
1722 goto done;
1723 }
1724
1725 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1726 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1727
1728 memcpy(sfp, i2c->buffer, i2c->length);
1729 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1730 i2c->device, i2c->offset, i2c->length, i2c->option);
1731
1732 if (rval) {
1733 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1734 EXT_STATUS_MAILBOX;
1735 goto dealloc;
1736 }
1737
1738 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1739
1740 dealloc:
1741 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1742
1743 done:
1744 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1745 bsg_reply->result = DID_OK << 16;
1746 bsg_job_done(bsg_job, bsg_reply->result,
1747 bsg_reply->reply_payload_rcv_len);
1748
1749 return 0;
1750 }
1751
1752 static int
qla2x00_read_i2c(struct bsg_job * bsg_job)1753 qla2x00_read_i2c(struct bsg_job *bsg_job)
1754 {
1755 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1756 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1757 scsi_qla_host_t *vha = shost_priv(host);
1758 struct qla_hw_data *ha = vha->hw;
1759 int rval = 0;
1760 uint8_t bsg[DMA_POOL_SIZE];
1761 struct qla_i2c_access *i2c = (void *)bsg;
1762 dma_addr_t sfp_dma;
1763 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1764
1765 if (!sfp) {
1766 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1767 EXT_STATUS_NO_MEMORY;
1768 goto done;
1769 }
1770
1771 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1772 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1773
1774 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1775 i2c->device, i2c->offset, i2c->length, i2c->option);
1776
1777 if (rval) {
1778 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1779 EXT_STATUS_MAILBOX;
1780 goto dealloc;
1781 }
1782
1783 memcpy(i2c->buffer, sfp, i2c->length);
1784 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1785 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1786
1787 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1788
1789 dealloc:
1790 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1791
1792 done:
1793 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1794 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1795 bsg_reply->result = DID_OK << 16;
1796 bsg_job_done(bsg_job, bsg_reply->result,
1797 bsg_reply->reply_payload_rcv_len);
1798
1799 return 0;
1800 }
1801
1802 static int
qla24xx_process_bidir_cmd(struct bsg_job * bsg_job)1803 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1804 {
1805 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1806 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1807 scsi_qla_host_t *vha = shost_priv(host);
1808 struct qla_hw_data *ha = vha->hw;
1809 uint32_t rval = EXT_STATUS_OK;
1810 uint16_t req_sg_cnt = 0;
1811 uint16_t rsp_sg_cnt = 0;
1812 uint16_t nextlid = 0;
1813 uint32_t tot_dsds;
1814 srb_t *sp = NULL;
1815 uint32_t req_data_len;
1816 uint32_t rsp_data_len;
1817
1818 /* Check the type of the adapter */
1819 if (!IS_BIDI_CAPABLE(ha)) {
1820 ql_log(ql_log_warn, vha, 0x70a0,
1821 "This adapter is not supported\n");
1822 rval = EXT_STATUS_NOT_SUPPORTED;
1823 goto done;
1824 }
1825
1826 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1827 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1828 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1829 rval = EXT_STATUS_BUSY;
1830 goto done;
1831 }
1832
1833 /* Check if host is online */
1834 if (!vha->flags.online) {
1835 ql_log(ql_log_warn, vha, 0x70a1,
1836 "Host is not online\n");
1837 rval = EXT_STATUS_DEVICE_OFFLINE;
1838 goto done;
1839 }
1840
1841 /* Check if cable is plugged in or not */
1842 if (vha->device_flags & DFLG_NO_CABLE) {
1843 ql_log(ql_log_warn, vha, 0x70a2,
1844 "Cable is unplugged...\n");
1845 rval = EXT_STATUS_INVALID_CFG;
1846 goto done;
1847 }
1848
1849 /* Check if the switch is connected or not */
1850 if (ha->current_topology != ISP_CFG_F) {
1851 ql_log(ql_log_warn, vha, 0x70a3,
1852 "Host is not connected to the switch\n");
1853 rval = EXT_STATUS_INVALID_CFG;
1854 goto done;
1855 }
1856
1857 /* Check if operating mode is P2P */
1858 if (ha->operating_mode != P2P) {
1859 ql_log(ql_log_warn, vha, 0x70a4,
1860 "Host operating mode is not P2p\n");
1861 rval = EXT_STATUS_INVALID_CFG;
1862 goto done;
1863 }
1864
1865 mutex_lock(&ha->selflogin_lock);
1866 if (vha->self_login_loop_id == 0) {
1867 /* Initialize all required fields of fcport */
1868 vha->bidir_fcport.vha = vha;
1869 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1870 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1871 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1872 vha->bidir_fcport.loop_id = vha->loop_id;
1873
1874 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1875 ql_log(ql_log_warn, vha, 0x70a7,
1876 "Failed to login port %06X for bidirectional IOCB\n",
1877 vha->bidir_fcport.d_id.b24);
1878 mutex_unlock(&ha->selflogin_lock);
1879 rval = EXT_STATUS_MAILBOX;
1880 goto done;
1881 }
1882 vha->self_login_loop_id = nextlid - 1;
1883
1884 }
1885 /* Assign the self login loop id to fcport */
1886 mutex_unlock(&ha->selflogin_lock);
1887
1888 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1889
1890 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1891 bsg_job->request_payload.sg_list,
1892 bsg_job->request_payload.sg_cnt,
1893 DMA_TO_DEVICE);
1894
1895 if (!req_sg_cnt) {
1896 rval = EXT_STATUS_NO_MEMORY;
1897 goto done;
1898 }
1899
1900 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1901 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1902 DMA_FROM_DEVICE);
1903
1904 if (!rsp_sg_cnt) {
1905 rval = EXT_STATUS_NO_MEMORY;
1906 goto done_unmap_req_sg;
1907 }
1908
1909 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1910 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1911 ql_dbg(ql_dbg_user, vha, 0x70a9,
1912 "Dma mapping resulted in different sg counts "
1913 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1914 "%x dma_reply_sg_cnt: %x]\n",
1915 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1916 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1917 rval = EXT_STATUS_NO_MEMORY;
1918 goto done_unmap_sg;
1919 }
1920
1921 req_data_len = bsg_job->request_payload.payload_len;
1922 rsp_data_len = bsg_job->reply_payload.payload_len;
1923
1924 if (req_data_len != rsp_data_len) {
1925 rval = EXT_STATUS_BUSY;
1926 ql_log(ql_log_warn, vha, 0x70aa,
1927 "req_data_len != rsp_data_len\n");
1928 goto done_unmap_sg;
1929 }
1930
1931 /* Alloc SRB structure */
1932 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1933 if (!sp) {
1934 ql_dbg(ql_dbg_user, vha, 0x70ac,
1935 "Alloc SRB structure failed\n");
1936 rval = EXT_STATUS_NO_MEMORY;
1937 goto done_unmap_sg;
1938 }
1939
1940 /*Populate srb->ctx with bidir ctx*/
1941 sp->u.bsg_job = bsg_job;
1942 sp->free = qla2x00_bsg_sp_free;
1943 sp->type = SRB_BIDI_CMD;
1944 sp->done = qla2x00_bsg_job_done;
1945
1946 /* Add the read and write sg count */
1947 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1948
1949 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1950 if (rval != EXT_STATUS_OK)
1951 goto done_free_srb;
1952 /* the bsg request will be completed in the interrupt handler */
1953 return rval;
1954
1955 done_free_srb:
1956 mempool_free(sp, ha->srb_mempool);
1957 done_unmap_sg:
1958 dma_unmap_sg(&ha->pdev->dev,
1959 bsg_job->reply_payload.sg_list,
1960 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1961 done_unmap_req_sg:
1962 dma_unmap_sg(&ha->pdev->dev,
1963 bsg_job->request_payload.sg_list,
1964 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1965 done:
1966
1967 /* Return an error vendor specific response
1968 * and complete the bsg request
1969 */
1970 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1971 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1972 bsg_reply->reply_payload_rcv_len = 0;
1973 bsg_reply->result = (DID_OK) << 16;
1974 bsg_job_done(bsg_job, bsg_reply->result,
1975 bsg_reply->reply_payload_rcv_len);
1976 /* Always return success, vendor rsp carries correct status */
1977 return 0;
1978 }
1979
1980 static int
qlafx00_mgmt_cmd(struct bsg_job * bsg_job)1981 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1982 {
1983 struct fc_bsg_request *bsg_request = bsg_job->request;
1984 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1985 scsi_qla_host_t *vha = shost_priv(host);
1986 struct qla_hw_data *ha = vha->hw;
1987 int rval = (DID_ERROR << 16);
1988 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1989 srb_t *sp;
1990 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1991 struct fc_port *fcport;
1992 char *type = "FC_BSG_HST_FX_MGMT";
1993
1994 /* Copy the IOCB specific information */
1995 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1996 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1997
1998 /* Dump the vendor information */
1999 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
2000 piocb_rqst, sizeof(*piocb_rqst));
2001
2002 if (!vha->flags.online) {
2003 ql_log(ql_log_warn, vha, 0x70d0,
2004 "Host is not online.\n");
2005 rval = -EIO;
2006 goto done;
2007 }
2008
2009 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2010 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2011 bsg_job->request_payload.sg_list,
2012 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2013 if (!req_sg_cnt) {
2014 ql_log(ql_log_warn, vha, 0x70c7,
2015 "dma_map_sg return %d for request\n", req_sg_cnt);
2016 rval = -ENOMEM;
2017 goto done;
2018 }
2019 }
2020
2021 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2022 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2023 bsg_job->reply_payload.sg_list,
2024 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2025 if (!rsp_sg_cnt) {
2026 ql_log(ql_log_warn, vha, 0x70c8,
2027 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2028 rval = -ENOMEM;
2029 goto done_unmap_req_sg;
2030 }
2031 }
2032
2033 ql_dbg(ql_dbg_user, vha, 0x70c9,
2034 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2035 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2036 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2037
2038 /* Allocate a dummy fcport structure, since functions preparing the
2039 * IOCB and mailbox command retrieves port specific information
2040 * from fcport structure. For Host based ELS commands there will be
2041 * no fcport structure allocated
2042 */
2043 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2044 if (!fcport) {
2045 ql_log(ql_log_warn, vha, 0x70ca,
2046 "Failed to allocate fcport.\n");
2047 rval = -ENOMEM;
2048 goto done_unmap_rsp_sg;
2049 }
2050
2051 /* Alloc SRB structure */
2052 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2053 if (!sp) {
2054 ql_log(ql_log_warn, vha, 0x70cb,
2055 "qla2x00_get_sp failed.\n");
2056 rval = -ENOMEM;
2057 goto done_free_fcport;
2058 }
2059
2060 /* Initialize all required fields of fcport */
2061 fcport->vha = vha;
2062 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2063
2064 sp->type = SRB_FXIOCB_BCMD;
2065 sp->name = "bsg_fx_mgmt";
2066 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2067 sp->u.bsg_job = bsg_job;
2068 sp->free = qla2x00_bsg_sp_free;
2069 sp->done = qla2x00_bsg_job_done;
2070
2071 ql_dbg(ql_dbg_user, vha, 0x70cc,
2072 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2073 type, piocb_rqst->func_type, fcport->loop_id);
2074
2075 rval = qla2x00_start_sp(sp);
2076 if (rval != QLA_SUCCESS) {
2077 ql_log(ql_log_warn, vha, 0x70cd,
2078 "qla2x00_start_sp failed=%d.\n", rval);
2079 mempool_free(sp, ha->srb_mempool);
2080 rval = -EIO;
2081 goto done_free_fcport;
2082 }
2083 return rval;
2084
2085 done_free_fcport:
2086 qla2x00_free_fcport(fcport);
2087
2088 done_unmap_rsp_sg:
2089 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2090 dma_unmap_sg(&ha->pdev->dev,
2091 bsg_job->reply_payload.sg_list,
2092 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2093 done_unmap_req_sg:
2094 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2095 dma_unmap_sg(&ha->pdev->dev,
2096 bsg_job->request_payload.sg_list,
2097 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2098
2099 done:
2100 return rval;
2101 }
2102
2103 static int
qla26xx_serdes_op(struct bsg_job * bsg_job)2104 qla26xx_serdes_op(struct bsg_job *bsg_job)
2105 {
2106 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2107 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2108 scsi_qla_host_t *vha = shost_priv(host);
2109 int rval = 0;
2110 struct qla_serdes_reg sr;
2111
2112 memset(&sr, 0, sizeof(sr));
2113
2114 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2115 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2116
2117 switch (sr.cmd) {
2118 case INT_SC_SERDES_WRITE_REG:
2119 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2120 bsg_reply->reply_payload_rcv_len = 0;
2121 break;
2122 case INT_SC_SERDES_READ_REG:
2123 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2124 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2125 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2126 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2127 break;
2128 default:
2129 ql_dbg(ql_dbg_user, vha, 0x708c,
2130 "Unknown serdes cmd %x.\n", sr.cmd);
2131 rval = -EINVAL;
2132 break;
2133 }
2134
2135 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2136 rval ? EXT_STATUS_MAILBOX : 0;
2137
2138 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2139 bsg_reply->result = DID_OK << 16;
2140 bsg_job_done(bsg_job, bsg_reply->result,
2141 bsg_reply->reply_payload_rcv_len);
2142 return 0;
2143 }
2144
2145 static int
qla8044_serdes_op(struct bsg_job * bsg_job)2146 qla8044_serdes_op(struct bsg_job *bsg_job)
2147 {
2148 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2149 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2150 scsi_qla_host_t *vha = shost_priv(host);
2151 int rval = 0;
2152 struct qla_serdes_reg_ex sr;
2153
2154 memset(&sr, 0, sizeof(sr));
2155
2156 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2157 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2158
2159 switch (sr.cmd) {
2160 case INT_SC_SERDES_WRITE_REG:
2161 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2162 bsg_reply->reply_payload_rcv_len = 0;
2163 break;
2164 case INT_SC_SERDES_READ_REG:
2165 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2166 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2167 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2168 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2169 break;
2170 default:
2171 ql_dbg(ql_dbg_user, vha, 0x7020,
2172 "Unknown serdes cmd %x.\n", sr.cmd);
2173 rval = -EINVAL;
2174 break;
2175 }
2176
2177 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2178 rval ? EXT_STATUS_MAILBOX : 0;
2179
2180 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2181 bsg_reply->result = DID_OK << 16;
2182 bsg_job_done(bsg_job, bsg_reply->result,
2183 bsg_reply->reply_payload_rcv_len);
2184 return 0;
2185 }
2186
2187 static int
qla27xx_get_flash_upd_cap(struct bsg_job * bsg_job)2188 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2189 {
2190 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2191 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2192 scsi_qla_host_t *vha = shost_priv(host);
2193 struct qla_hw_data *ha = vha->hw;
2194 struct qla_flash_update_caps cap;
2195
2196 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2197 return -EPERM;
2198
2199 memset(&cap, 0, sizeof(cap));
2200 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2201 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2202 (uint64_t)ha->fw_attributes_h << 16 |
2203 (uint64_t)ha->fw_attributes;
2204
2205 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2206 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2207 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2208
2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2210 EXT_STATUS_OK;
2211
2212 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2213 bsg_reply->result = DID_OK << 16;
2214 bsg_job_done(bsg_job, bsg_reply->result,
2215 bsg_reply->reply_payload_rcv_len);
2216 return 0;
2217 }
2218
2219 static int
qla27xx_set_flash_upd_cap(struct bsg_job * bsg_job)2220 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2221 {
2222 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2223 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2224 scsi_qla_host_t *vha = shost_priv(host);
2225 struct qla_hw_data *ha = vha->hw;
2226 uint64_t online_fw_attr = 0;
2227 struct qla_flash_update_caps cap;
2228
2229 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2230 return -EPERM;
2231
2232 memset(&cap, 0, sizeof(cap));
2233 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2234 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2235
2236 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2237 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2238 (uint64_t)ha->fw_attributes_h << 16 |
2239 (uint64_t)ha->fw_attributes;
2240
2241 if (online_fw_attr != cap.capabilities) {
2242 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2243 EXT_STATUS_INVALID_PARAM;
2244 return -EINVAL;
2245 }
2246
2247 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2248 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2249 EXT_STATUS_INVALID_PARAM;
2250 return -EINVAL;
2251 }
2252
2253 bsg_reply->reply_payload_rcv_len = 0;
2254
2255 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2256 EXT_STATUS_OK;
2257
2258 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2259 bsg_reply->result = DID_OK << 16;
2260 bsg_job_done(bsg_job, bsg_reply->result,
2261 bsg_reply->reply_payload_rcv_len);
2262 return 0;
2263 }
2264
2265 static int
qla27xx_get_bbcr_data(struct bsg_job * bsg_job)2266 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2267 {
2268 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2269 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2270 scsi_qla_host_t *vha = shost_priv(host);
2271 struct qla_hw_data *ha = vha->hw;
2272 struct qla_bbcr_data bbcr;
2273 uint16_t loop_id, topo, sw_cap;
2274 uint8_t domain, area, al_pa, state;
2275 int rval;
2276
2277 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2278 return -EPERM;
2279
2280 memset(&bbcr, 0, sizeof(bbcr));
2281
2282 if (vha->flags.bbcr_enable)
2283 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2284 else
2285 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2286
2287 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2288 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2289 &area, &domain, &topo, &sw_cap);
2290 if (rval != QLA_SUCCESS) {
2291 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2292 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2293 bbcr.mbx1 = loop_id;
2294 goto done;
2295 }
2296
2297 state = (vha->bbcr >> 12) & 0x1;
2298
2299 if (state) {
2300 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2301 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2302 } else {
2303 bbcr.state = QLA_BBCR_STATE_ONLINE;
2304 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2305 }
2306
2307 bbcr.configured_bbscn = vha->bbcr & 0xf;
2308 }
2309
2310 done:
2311 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2312 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2313 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2314
2315 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2316
2317 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2318 bsg_reply->result = DID_OK << 16;
2319 bsg_job_done(bsg_job, bsg_reply->result,
2320 bsg_reply->reply_payload_rcv_len);
2321 return 0;
2322 }
2323
2324 static int
qla2x00_get_priv_stats(struct bsg_job * bsg_job)2325 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2326 {
2327 struct fc_bsg_request *bsg_request = bsg_job->request;
2328 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2329 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2330 scsi_qla_host_t *vha = shost_priv(host);
2331 struct qla_hw_data *ha = vha->hw;
2332 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2333 struct link_statistics *stats = NULL;
2334 dma_addr_t stats_dma;
2335 int rval;
2336 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2337 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2338
2339 if (test_bit(UNLOADING, &vha->dpc_flags))
2340 return -ENODEV;
2341
2342 if (unlikely(pci_channel_offline(ha->pdev)))
2343 return -ENODEV;
2344
2345 if (qla2x00_reset_active(vha))
2346 return -EBUSY;
2347
2348 if (!IS_FWI2_CAPABLE(ha))
2349 return -EPERM;
2350
2351 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2352 GFP_KERNEL);
2353 if (!stats) {
2354 ql_log(ql_log_warn, vha, 0x70e2,
2355 "Failed to allocate memory for stats.\n");
2356 return -ENOMEM;
2357 }
2358
2359 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2360
2361 if (rval == QLA_SUCCESS) {
2362 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2363 stats, sizeof(*stats));
2364 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2365 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2366 }
2367
2368 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2369 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2370 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2371
2372 bsg_job->reply_len = sizeof(*bsg_reply);
2373 bsg_reply->result = DID_OK << 16;
2374 bsg_job_done(bsg_job, bsg_reply->result,
2375 bsg_reply->reply_payload_rcv_len);
2376
2377 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2378 stats, stats_dma);
2379
2380 return 0;
2381 }
2382
2383 static int
qla2x00_do_dport_diagnostics(struct bsg_job * bsg_job)2384 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2385 {
2386 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2387 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2388 scsi_qla_host_t *vha = shost_priv(host);
2389 int rval;
2390 struct qla_dport_diag *dd;
2391
2392 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2393 !IS_QLA28XX(vha->hw))
2394 return -EPERM;
2395
2396 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2397 if (!dd) {
2398 ql_log(ql_log_warn, vha, 0x70db,
2399 "Failed to allocate memory for dport.\n");
2400 return -ENOMEM;
2401 }
2402
2403 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2404 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2405
2406 rval = qla26xx_dport_diagnostics(
2407 vha, dd->buf, sizeof(dd->buf), dd->options);
2408 if (rval == QLA_SUCCESS) {
2409 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2410 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2411 }
2412
2413 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2414 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2415 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2416
2417 bsg_job->reply_len = sizeof(*bsg_reply);
2418 bsg_reply->result = DID_OK << 16;
2419 bsg_job_done(bsg_job, bsg_reply->result,
2420 bsg_reply->reply_payload_rcv_len);
2421
2422 kfree(dd);
2423
2424 return 0;
2425 }
2426
2427 static int
qla2x00_get_flash_image_status(struct bsg_job * bsg_job)2428 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2429 {
2430 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2431 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2432 struct qla_hw_data *ha = vha->hw;
2433 struct qla_active_regions regions = { };
2434 struct active_regions active_regions = { };
2435
2436 qla27xx_get_active_image(vha, &active_regions);
2437 regions.global_image = active_regions.global;
2438
2439 if (IS_QLA28XX(ha)) {
2440 qla28xx_get_aux_images(vha, &active_regions);
2441 regions.board_config = active_regions.aux.board_config;
2442 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2443 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2444 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2445 }
2446
2447 ql_dbg(ql_dbg_user, vha, 0x70e1,
2448 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2449 __func__, vha->host_no, regions.global_image,
2450 regions.board_config, regions.vpd_nvram,
2451 regions.npiv_config_0_1, regions.npiv_config_2_3);
2452
2453 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2454 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2455
2456 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2457 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2458 bsg_reply->result = DID_OK << 16;
2459 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2460 bsg_job_done(bsg_job, bsg_reply->result,
2461 bsg_reply->reply_payload_rcv_len);
2462
2463 return 0;
2464 }
2465
2466 static int
qla2x00_manage_host_stats(struct bsg_job * bsg_job)2467 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2468 {
2469 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2470 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2471 struct ql_vnd_mng_host_stats_param *req_data;
2472 struct ql_vnd_mng_host_stats_resp rsp_data;
2473 u32 req_data_len;
2474 int ret = 0;
2475
2476 if (!vha->flags.online) {
2477 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2478 return -EIO;
2479 }
2480
2481 req_data_len = bsg_job->request_payload.payload_len;
2482
2483 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2484 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2485 return -EIO;
2486 }
2487
2488 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2489 if (!req_data) {
2490 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2491 return -ENOMEM;
2492 }
2493
2494 /* Copy the request buffer in req_data */
2495 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2496 bsg_job->request_payload.sg_cnt, req_data,
2497 req_data_len);
2498
2499 switch (req_data->action) {
2500 case QLA_STOP:
2501 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2502 break;
2503 case QLA_START:
2504 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2505 break;
2506 case QLA_CLEAR:
2507 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2508 break;
2509 default:
2510 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2511 ret = -EIO;
2512 break;
2513 }
2514
2515 kfree(req_data);
2516
2517 /* Prepare response */
2518 rsp_data.status = ret;
2519 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2520
2521 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2522 bsg_reply->reply_payload_rcv_len =
2523 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2524 bsg_job->reply_payload.sg_cnt,
2525 &rsp_data,
2526 sizeof(struct ql_vnd_mng_host_stats_resp));
2527
2528 bsg_reply->result = DID_OK;
2529 bsg_job_done(bsg_job, bsg_reply->result,
2530 bsg_reply->reply_payload_rcv_len);
2531
2532 return ret;
2533 }
2534
2535 static int
qla2x00_get_host_stats(struct bsg_job * bsg_job)2536 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2537 {
2538 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2539 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2540 struct ql_vnd_stats_param *req_data;
2541 struct ql_vnd_host_stats_resp rsp_data;
2542 u32 req_data_len;
2543 int ret = 0;
2544 u64 ini_entry_count = 0;
2545 u64 entry_count = 0;
2546 u64 tgt_num = 0;
2547 u64 tmp_stat_type = 0;
2548 u64 response_len = 0;
2549 void *data;
2550
2551 req_data_len = bsg_job->request_payload.payload_len;
2552
2553 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2554 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2555 return -EIO;
2556 }
2557
2558 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2559 if (!req_data) {
2560 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2561 return -ENOMEM;
2562 }
2563
2564 /* Copy the request buffer in req_data */
2565 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2566 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2567
2568 /* Copy stat type to work on it */
2569 tmp_stat_type = req_data->stat_type;
2570
2571 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2572 /* Num of tgts connected to this host */
2573 tgt_num = qla2x00_get_num_tgts(vha);
2574 /* unset BIT_17 */
2575 tmp_stat_type &= ~(1 << 17);
2576 }
2577
2578 /* Total ini stats */
2579 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2580
2581 /* Total number of entries */
2582 entry_count = ini_entry_count + tgt_num;
2583
2584 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2585 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2586
2587 if (response_len > bsg_job->reply_payload.payload_len) {
2588 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2589 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2590 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2591
2592 bsg_reply->reply_payload_rcv_len =
2593 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2594 bsg_job->reply_payload.sg_cnt, &rsp_data,
2595 sizeof(struct ql_vnd_mng_host_stats_resp));
2596
2597 bsg_reply->result = DID_OK;
2598 bsg_job_done(bsg_job, bsg_reply->result,
2599 bsg_reply->reply_payload_rcv_len);
2600 goto host_stat_out;
2601 }
2602
2603 data = kzalloc(response_len, GFP_KERNEL);
2604 if (!data) {
2605 ret = -ENOMEM;
2606 goto host_stat_out;
2607 }
2608
2609 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2610 data, response_len);
2611
2612 rsp_data.status = EXT_STATUS_OK;
2613 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2614
2615 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2616 bsg_job->reply_payload.sg_cnt,
2617 data, response_len);
2618 bsg_reply->result = DID_OK;
2619 bsg_job_done(bsg_job, bsg_reply->result,
2620 bsg_reply->reply_payload_rcv_len);
2621
2622 kfree(data);
2623 host_stat_out:
2624 kfree(req_data);
2625 return ret;
2626 }
2627
2628 static struct fc_rport *
qla2xxx_find_rport(scsi_qla_host_t * vha,uint32_t tgt_num)2629 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2630 {
2631 fc_port_t *fcport = NULL;
2632
2633 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2634 if (fcport->rport->number == tgt_num)
2635 return fcport->rport;
2636 }
2637 return NULL;
2638 }
2639
2640 static int
qla2x00_get_tgt_stats(struct bsg_job * bsg_job)2641 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2642 {
2643 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2644 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2645 struct ql_vnd_tgt_stats_param *req_data;
2646 u32 req_data_len;
2647 int ret = 0;
2648 u64 response_len = 0;
2649 struct ql_vnd_tgt_stats_resp *data = NULL;
2650 struct fc_rport *rport = NULL;
2651
2652 if (!vha->flags.online) {
2653 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2654 return -EIO;
2655 }
2656
2657 req_data_len = bsg_job->request_payload.payload_len;
2658
2659 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2660 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2661 return -EIO;
2662 }
2663
2664 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2665 if (!req_data) {
2666 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2667 return -ENOMEM;
2668 }
2669
2670 /* Copy the request buffer in req_data */
2671 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2672 bsg_job->request_payload.sg_cnt,
2673 req_data, req_data_len);
2674
2675 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2676 sizeof(struct ql_vnd_stat_entry);
2677
2678 /* structure + size for one entry */
2679 data = kzalloc(response_len, GFP_KERNEL);
2680 if (!data) {
2681 kfree(req_data);
2682 return -ENOMEM;
2683 }
2684
2685 if (response_len > bsg_job->reply_payload.payload_len) {
2686 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2688 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2689
2690 bsg_reply->reply_payload_rcv_len =
2691 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2692 bsg_job->reply_payload.sg_cnt, data,
2693 sizeof(struct ql_vnd_tgt_stats_resp));
2694
2695 bsg_reply->result = DID_OK;
2696 bsg_job_done(bsg_job, bsg_reply->result,
2697 bsg_reply->reply_payload_rcv_len);
2698 goto tgt_stat_out;
2699 }
2700
2701 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2702 if (!rport) {
2703 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2704 ret = EXT_STATUS_INVALID_PARAM;
2705 data->status = EXT_STATUS_INVALID_PARAM;
2706 goto reply;
2707 }
2708
2709 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2710 rport, (void *)data, response_len);
2711
2712 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2713 reply:
2714 bsg_reply->reply_payload_rcv_len =
2715 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2716 bsg_job->reply_payload.sg_cnt, data,
2717 response_len);
2718 bsg_reply->result = DID_OK;
2719 bsg_job_done(bsg_job, bsg_reply->result,
2720 bsg_reply->reply_payload_rcv_len);
2721
2722 tgt_stat_out:
2723 kfree(data);
2724 kfree(req_data);
2725
2726 return ret;
2727 }
2728
2729 static int
qla2x00_manage_host_port(struct bsg_job * bsg_job)2730 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2731 {
2732 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2733 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2734 struct ql_vnd_mng_host_port_param *req_data;
2735 struct ql_vnd_mng_host_port_resp rsp_data;
2736 u32 req_data_len;
2737 int ret = 0;
2738
2739 req_data_len = bsg_job->request_payload.payload_len;
2740
2741 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2742 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2743 return -EIO;
2744 }
2745
2746 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2747 if (!req_data) {
2748 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2749 return -ENOMEM;
2750 }
2751
2752 /* Copy the request buffer in req_data */
2753 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2754 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2755
2756 switch (req_data->action) {
2757 case QLA_ENABLE:
2758 ret = qla2xxx_enable_port(vha->host);
2759 break;
2760 case QLA_DISABLE:
2761 ret = qla2xxx_disable_port(vha->host);
2762 break;
2763 default:
2764 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2765 ret = -EIO;
2766 break;
2767 }
2768
2769 kfree(req_data);
2770
2771 /* Prepare response */
2772 rsp_data.status = ret;
2773 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2774 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2775
2776 bsg_reply->reply_payload_rcv_len =
2777 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2778 bsg_job->reply_payload.sg_cnt, &rsp_data,
2779 sizeof(struct ql_vnd_mng_host_port_resp));
2780 bsg_reply->result = DID_OK;
2781 bsg_job_done(bsg_job, bsg_reply->result,
2782 bsg_reply->reply_payload_rcv_len);
2783
2784 return ret;
2785 }
2786
2787 static int
qla2x00_process_vendor_specific(struct scsi_qla_host * vha,struct bsg_job * bsg_job)2788 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2789 {
2790 struct fc_bsg_request *bsg_request = bsg_job->request;
2791
2792 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2793 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2794
2795 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2796 case QL_VND_LOOPBACK:
2797 return qla2x00_process_loopback(bsg_job);
2798
2799 case QL_VND_A84_RESET:
2800 return qla84xx_reset(bsg_job);
2801
2802 case QL_VND_A84_UPDATE_FW:
2803 return qla84xx_updatefw(bsg_job);
2804
2805 case QL_VND_A84_MGMT_CMD:
2806 return qla84xx_mgmt_cmd(bsg_job);
2807
2808 case QL_VND_IIDMA:
2809 return qla24xx_iidma(bsg_job);
2810
2811 case QL_VND_FCP_PRIO_CFG_CMD:
2812 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2813
2814 case QL_VND_READ_FLASH:
2815 return qla2x00_read_optrom(bsg_job);
2816
2817 case QL_VND_UPDATE_FLASH:
2818 return qla2x00_update_optrom(bsg_job);
2819
2820 case QL_VND_SET_FRU_VERSION:
2821 return qla2x00_update_fru_versions(bsg_job);
2822
2823 case QL_VND_READ_FRU_STATUS:
2824 return qla2x00_read_fru_status(bsg_job);
2825
2826 case QL_VND_WRITE_FRU_STATUS:
2827 return qla2x00_write_fru_status(bsg_job);
2828
2829 case QL_VND_WRITE_I2C:
2830 return qla2x00_write_i2c(bsg_job);
2831
2832 case QL_VND_READ_I2C:
2833 return qla2x00_read_i2c(bsg_job);
2834
2835 case QL_VND_DIAG_IO_CMD:
2836 return qla24xx_process_bidir_cmd(bsg_job);
2837
2838 case QL_VND_FX00_MGMT_CMD:
2839 return qlafx00_mgmt_cmd(bsg_job);
2840
2841 case QL_VND_SERDES_OP:
2842 return qla26xx_serdes_op(bsg_job);
2843
2844 case QL_VND_SERDES_OP_EX:
2845 return qla8044_serdes_op(bsg_job);
2846
2847 case QL_VND_GET_FLASH_UPDATE_CAPS:
2848 return qla27xx_get_flash_upd_cap(bsg_job);
2849
2850 case QL_VND_SET_FLASH_UPDATE_CAPS:
2851 return qla27xx_set_flash_upd_cap(bsg_job);
2852
2853 case QL_VND_GET_BBCR_DATA:
2854 return qla27xx_get_bbcr_data(bsg_job);
2855
2856 case QL_VND_GET_PRIV_STATS:
2857 case QL_VND_GET_PRIV_STATS_EX:
2858 return qla2x00_get_priv_stats(bsg_job);
2859
2860 case QL_VND_DPORT_DIAGNOSTICS:
2861 return qla2x00_do_dport_diagnostics(bsg_job);
2862
2863 case QL_VND_EDIF_MGMT:
2864 return qla_edif_app_mgmt(bsg_job);
2865
2866 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2867 return qla2x00_get_flash_image_status(bsg_job);
2868
2869 case QL_VND_MANAGE_HOST_STATS:
2870 return qla2x00_manage_host_stats(bsg_job);
2871
2872 case QL_VND_GET_HOST_STATS:
2873 return qla2x00_get_host_stats(bsg_job);
2874
2875 case QL_VND_GET_TGT_STATS:
2876 return qla2x00_get_tgt_stats(bsg_job);
2877
2878 case QL_VND_MANAGE_HOST_PORT:
2879 return qla2x00_manage_host_port(bsg_job);
2880
2881 case QL_VND_MBX_PASSTHRU:
2882 return qla2x00_mailbox_passthru(bsg_job);
2883
2884 default:
2885 return -ENOSYS;
2886 }
2887 }
2888
2889 int
qla24xx_bsg_request(struct bsg_job * bsg_job)2890 qla24xx_bsg_request(struct bsg_job *bsg_job)
2891 {
2892 struct fc_bsg_request *bsg_request = bsg_job->request;
2893 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2894 int ret = -EINVAL;
2895 struct fc_rport *rport;
2896 struct Scsi_Host *host;
2897 scsi_qla_host_t *vha;
2898
2899 /* In case no data transferred. */
2900 bsg_reply->reply_payload_rcv_len = 0;
2901
2902 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2903 rport = fc_bsg_to_rport(bsg_job);
2904 host = rport_to_shost(rport);
2905 vha = shost_priv(host);
2906 } else {
2907 host = fc_bsg_to_shost(bsg_job);
2908 vha = shost_priv(host);
2909 }
2910
2911 /* Disable port will bring down the chip, allow enable command */
2912 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2913 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2914 goto skip_chip_chk;
2915
2916 if (vha->hw->flags.port_isolated) {
2917 bsg_reply->result = DID_ERROR;
2918 /* operation not permitted */
2919 return -EPERM;
2920 }
2921
2922 if (qla2x00_chip_is_down(vha)) {
2923 ql_dbg(ql_dbg_user, vha, 0x709f,
2924 "BSG: ISP abort active/needed -- cmd=%d.\n",
2925 bsg_request->msgcode);
2926 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2927 return -EBUSY;
2928 }
2929
2930 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
2931 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2932 return -EIO;
2933 }
2934
2935 skip_chip_chk:
2936 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2937 "Entered %s msgcode=0x%x. bsg ptr %px\n",
2938 __func__, bsg_request->msgcode, bsg_job);
2939
2940 switch (bsg_request->msgcode) {
2941 case FC_BSG_RPT_ELS:
2942 case FC_BSG_HST_ELS_NOLOGIN:
2943 ret = qla2x00_process_els(bsg_job);
2944 break;
2945 case FC_BSG_HST_CT:
2946 ret = qla2x00_process_ct(bsg_job);
2947 break;
2948 case FC_BSG_HST_VENDOR:
2949 ret = qla2x00_process_vendor_specific(vha, bsg_job);
2950 break;
2951 case FC_BSG_HST_ADD_RPORT:
2952 case FC_BSG_HST_DEL_RPORT:
2953 case FC_BSG_RPT_CT:
2954 default:
2955 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2956 break;
2957 }
2958
2959 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2960 "%s done with return %x\n", __func__, ret);
2961
2962 return ret;
2963 }
2964
2965 int
qla24xx_bsg_timeout(struct bsg_job * bsg_job)2966 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2967 {
2968 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2969 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2970 struct qla_hw_data *ha = vha->hw;
2971 srb_t *sp;
2972 int cnt, que;
2973 unsigned long flags;
2974 struct req_que *req;
2975
2976 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
2977 __func__, bsg_job);
2978
2979 if (qla2x00_isp_reg_stat(ha)) {
2980 ql_log(ql_log_info, vha, 0x9007,
2981 "PCI/Register disconnect.\n");
2982 qla_pci_set_eeh_busy(vha);
2983 }
2984
2985 /* find the bsg job from the active list of commands */
2986 spin_lock_irqsave(&ha->hardware_lock, flags);
2987 for (que = 0; que < ha->max_req_queues; que++) {
2988 req = ha->req_q_map[que];
2989 if (!req)
2990 continue;
2991
2992 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2993 sp = req->outstanding_cmds[cnt];
2994 if (sp &&
2995 (sp->type == SRB_CT_CMD ||
2996 sp->type == SRB_ELS_CMD_HST ||
2997 sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
2998 sp->type == SRB_FXIOCB_BCMD) &&
2999 sp->u.bsg_job == bsg_job) {
3000 req->outstanding_cmds[cnt] = NULL;
3001 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3002
3003 if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
3004 ql_log(ql_log_warn, vha, 0x7089,
3005 "mbx abort_command failed.\n");
3006 bsg_reply->result = -EIO;
3007 } else {
3008 ql_dbg(ql_dbg_user, vha, 0x708a,
3009 "mbx abort_command success.\n");
3010 bsg_reply->result = 0;
3011 }
3012 spin_lock_irqsave(&ha->hardware_lock, flags);
3013 goto done;
3014
3015 }
3016 }
3017 }
3018 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3019 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3020 bsg_reply->result = -ENXIO;
3021 return 0;
3022
3023 done:
3024 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3025 /* ref: INIT */
3026 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3027 return 0;
3028 }
3029
qla2x00_mailbox_passthru(struct bsg_job * bsg_job)3030 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
3031 {
3032 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3033 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3034 int ret = -EINVAL;
3035 int ptsize = sizeof(struct qla_mbx_passthru);
3036 struct qla_mbx_passthru *req_data = NULL;
3037 uint32_t req_data_len;
3038
3039 req_data_len = bsg_job->request_payload.payload_len;
3040 if (req_data_len != ptsize) {
3041 ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
3042 return -EIO;
3043 }
3044 req_data = kzalloc(ptsize, GFP_KERNEL);
3045 if (!req_data) {
3046 ql_log(ql_log_warn, vha, 0xf0a4,
3047 "req_data memory allocation failure.\n");
3048 return -ENOMEM;
3049 }
3050
3051 /* Copy the request buffer in req_data */
3052 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3053 bsg_job->request_payload.sg_cnt, req_data, ptsize);
3054 ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
3055
3056 /* Copy the req_data in request buffer */
3057 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
3058 bsg_job->reply_payload.sg_cnt, req_data, ptsize);
3059
3060 bsg_reply->reply_payload_rcv_len = ptsize;
3061 if (ret == QLA_SUCCESS)
3062 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
3063 else
3064 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
3065
3066 bsg_job->reply_len = sizeof(*bsg_job->reply);
3067 bsg_reply->result = DID_OK << 16;
3068 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3069
3070 kfree(req_data);
3071
3072 return ret;
3073 }
3074