1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2009-2015 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
22
23 #include <linux/interrupt.h>
24 #include <linux/mempool.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/list.h>
29 #include <linux/bsg-lib.h>
30 #include <linux/vmalloc.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include <scsi/scsi_bsg_fc.h>
36 #include <scsi/fc/fc_fs.h>
37
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_bsg.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_debugfs.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_version.h"
52
53 struct lpfc_bsg_event {
54 struct list_head node;
55 struct kref kref;
56 wait_queue_head_t wq;
57
58 /* Event type and waiter identifiers */
59 uint32_t type_mask;
60 uint32_t req_id;
61 uint32_t reg_id;
62
63 /* next two flags are here for the auto-delete logic */
64 unsigned long wait_time_stamp;
65 int waiting;
66
67 /* seen and not seen events */
68 struct list_head events_to_get;
69 struct list_head events_to_see;
70
71 /* driver data associated with the job */
72 void *dd_data;
73 };
74
75 struct lpfc_bsg_iocb {
76 struct lpfc_iocbq *cmdiocbq;
77 struct lpfc_dmabuf *rmp;
78 struct lpfc_nodelist *ndlp;
79 };
80
81 struct lpfc_bsg_mbox {
82 LPFC_MBOXQ_t *pmboxq;
83 MAILBOX_t *mb;
84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 uint8_t *ext; /* extended mailbox data */
86 uint32_t mbOffset; /* from app */
87 uint32_t inExtWLen; /* from app */
88 uint32_t outExtWLen; /* from app */
89 };
90
91 #define MENLO_DID 0x0000FC0E
92
93 struct lpfc_bsg_menlo {
94 struct lpfc_iocbq *cmdiocbq;
95 struct lpfc_dmabuf *rmp;
96 };
97
98 #define TYPE_EVT 1
99 #define TYPE_IOCB 2
100 #define TYPE_MBOX 3
101 #define TYPE_MENLO 4
102 struct bsg_job_data {
103 uint32_t type;
104 struct bsg_job *set_job; /* job waiting for this iocb to finish */
105 union {
106 struct lpfc_bsg_event *evt;
107 struct lpfc_bsg_iocb iocb;
108 struct lpfc_bsg_mbox mbox;
109 struct lpfc_bsg_menlo menlo;
110 } context_un;
111 };
112
113 struct event_data {
114 struct list_head node;
115 uint32_t type;
116 uint32_t immed_dat;
117 void *data;
118 uint32_t len;
119 };
120
121 #define BUF_SZ_4K 4096
122 #define SLI_CT_ELX_LOOPBACK 0x10
123
124 enum ELX_LOOPBACK_CMD {
125 ELX_LOOPBACK_XRI_SETUP,
126 ELX_LOOPBACK_DATA,
127 };
128
129 #define ELX_LOOPBACK_HEADER_SZ \
130 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131
132 struct lpfc_dmabufext {
133 struct lpfc_dmabuf dma;
134 uint32_t size;
135 uint32_t flag;
136 };
137
138 static void
lpfc_free_bsg_buffers(struct lpfc_hba * phba,struct lpfc_dmabuf * mlist)139 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
140 {
141 struct lpfc_dmabuf *mlast, *next_mlast;
142
143 if (mlist) {
144 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
145 list) {
146 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
147 list_del(&mlast->list);
148 kfree(mlast);
149 }
150 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
151 kfree(mlist);
152 }
153 return;
154 }
155
156 static struct lpfc_dmabuf *
lpfc_alloc_bsg_buffers(struct lpfc_hba * phba,unsigned int size,int outbound_buffers,struct ulp_bde64 * bpl,int * bpl_entries)157 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
158 int outbound_buffers, struct ulp_bde64 *bpl,
159 int *bpl_entries)
160 {
161 struct lpfc_dmabuf *mlist = NULL;
162 struct lpfc_dmabuf *mp;
163 unsigned int bytes_left = size;
164
165 /* Verify we can support the size specified */
166 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
167 return NULL;
168
169 /* Determine the number of dma buffers to allocate */
170 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
171 size/LPFC_BPL_SIZE);
172
173 /* Allocate dma buffer and place in BPL passed */
174 while (bytes_left) {
175 /* Allocate dma buffer */
176 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
177 if (!mp) {
178 if (mlist)
179 lpfc_free_bsg_buffers(phba, mlist);
180 return NULL;
181 }
182
183 INIT_LIST_HEAD(&mp->list);
184 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
185
186 if (!mp->virt) {
187 kfree(mp);
188 if (mlist)
189 lpfc_free_bsg_buffers(phba, mlist);
190 return NULL;
191 }
192
193 /* Queue it to a linked list */
194 if (!mlist)
195 mlist = mp;
196 else
197 list_add_tail(&mp->list, &mlist->list);
198
199 /* Add buffer to buffer pointer list */
200 if (outbound_buffers)
201 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
202 else
203 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
204 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
205 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
206 bpl->tus.f.bdeSize = (uint16_t)
207 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
208 bytes_left);
209 bytes_left -= bpl->tus.f.bdeSize;
210 bpl->tus.w = le32_to_cpu(bpl->tus.w);
211 bpl++;
212 }
213 return mlist;
214 }
215
216 static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf * dma_buffers,struct bsg_buffer * bsg_buffers,unsigned int bytes_to_transfer,int to_buffers)217 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
218 struct bsg_buffer *bsg_buffers,
219 unsigned int bytes_to_transfer, int to_buffers)
220 {
221
222 struct lpfc_dmabuf *mp;
223 unsigned int transfer_bytes, bytes_copied = 0;
224 unsigned int sg_offset, dma_offset;
225 unsigned char *dma_address, *sg_address;
226 LIST_HEAD(temp_list);
227 struct sg_mapping_iter miter;
228 unsigned long flags;
229 unsigned int sg_flags = SG_MITER_ATOMIC;
230 bool sg_valid;
231
232 list_splice_init(&dma_buffers->list, &temp_list);
233 list_add(&dma_buffers->list, &temp_list);
234 sg_offset = 0;
235 if (to_buffers)
236 sg_flags |= SG_MITER_FROM_SG;
237 else
238 sg_flags |= SG_MITER_TO_SG;
239 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
240 sg_flags);
241 local_irq_save(flags);
242 sg_valid = sg_miter_next(&miter);
243 list_for_each_entry(mp, &temp_list, list) {
244 dma_offset = 0;
245 while (bytes_to_transfer && sg_valid &&
246 (dma_offset < LPFC_BPL_SIZE)) {
247 dma_address = mp->virt + dma_offset;
248 if (sg_offset) {
249 /* Continue previous partial transfer of sg */
250 sg_address = miter.addr + sg_offset;
251 transfer_bytes = miter.length - sg_offset;
252 } else {
253 sg_address = miter.addr;
254 transfer_bytes = miter.length;
255 }
256 if (bytes_to_transfer < transfer_bytes)
257 transfer_bytes = bytes_to_transfer;
258 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
259 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
260 if (to_buffers)
261 memcpy(dma_address, sg_address, transfer_bytes);
262 else
263 memcpy(sg_address, dma_address, transfer_bytes);
264 dma_offset += transfer_bytes;
265 sg_offset += transfer_bytes;
266 bytes_to_transfer -= transfer_bytes;
267 bytes_copied += transfer_bytes;
268 if (sg_offset >= miter.length) {
269 sg_offset = 0;
270 sg_valid = sg_miter_next(&miter);
271 }
272 }
273 }
274 sg_miter_stop(&miter);
275 local_irq_restore(flags);
276 list_del_init(&dma_buffers->list);
277 list_splice(&temp_list, &dma_buffers->list);
278 return bytes_copied;
279 }
280
281 /**
282 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
283 * @phba: Pointer to HBA context object.
284 * @cmdiocbq: Pointer to command iocb.
285 * @rspiocbq: Pointer to response iocb.
286 *
287 * This function is the completion handler for iocbs issued using
288 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
289 * ring event handler function without any lock held. This function
290 * can be called from both worker thread context and interrupt
291 * context. This function also can be called from another thread which
292 * cleans up the SLI layer objects.
293 * This function copies the contents of the response iocb to the
294 * response iocb memory object provided by the caller of
295 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
296 * sleeps for the iocb completion.
297 **/
298 static void
lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)299 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
300 struct lpfc_iocbq *cmdiocbq,
301 struct lpfc_iocbq *rspiocbq)
302 {
303 struct bsg_job_data *dd_data;
304 struct bsg_job *job;
305 struct fc_bsg_reply *bsg_reply;
306 struct lpfc_dmabuf *bmp, *cmp, *rmp;
307 struct lpfc_nodelist *ndlp;
308 struct lpfc_bsg_iocb *iocb;
309 unsigned long flags;
310 int rc = 0;
311 u32 ulp_status, ulp_word4, total_data_placed;
312
313 dd_data = cmdiocbq->context_un.dd_data;
314
315 /* Determine if job has been aborted */
316 spin_lock_irqsave(&phba->ct_ev_lock, flags);
317 job = dd_data->set_job;
318 if (job) {
319 bsg_reply = job->reply;
320 /* Prevent timeout handling from trying to abort job */
321 job->dd_data = NULL;
322 }
323 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
324
325 /* Close the timeout handler abort window */
326 spin_lock_irqsave(&phba->hbalock, flags);
327 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
328 spin_unlock_irqrestore(&phba->hbalock, flags);
329
330 iocb = &dd_data->context_un.iocb;
331 ndlp = iocb->cmdiocbq->ndlp;
332 rmp = iocb->rmp;
333 cmp = cmdiocbq->cmd_dmabuf;
334 bmp = cmdiocbq->bpl_dmabuf;
335 ulp_status = get_job_ulpstatus(phba, rspiocbq);
336 ulp_word4 = get_job_word4(phba, rspiocbq);
337 total_data_placed = get_job_data_placed(phba, rspiocbq);
338
339 /* Copy the completed data or set the error status */
340
341 if (job) {
342 if (ulp_status) {
343 if (ulp_status == IOSTAT_LOCAL_REJECT) {
344 switch (ulp_word4 & IOERR_PARAM_MASK) {
345 case IOERR_SEQUENCE_TIMEOUT:
346 rc = -ETIMEDOUT;
347 break;
348 case IOERR_INVALID_RPI:
349 rc = -EFAULT;
350 break;
351 default:
352 rc = -EACCES;
353 break;
354 }
355 } else {
356 rc = -EACCES;
357 }
358 } else {
359 bsg_reply->reply_payload_rcv_len =
360 lpfc_bsg_copy_data(rmp, &job->reply_payload,
361 total_data_placed, 0);
362 }
363 }
364
365 lpfc_free_bsg_buffers(phba, cmp);
366 lpfc_free_bsg_buffers(phba, rmp);
367 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
368 kfree(bmp);
369 lpfc_nlp_put(ndlp);
370 lpfc_sli_release_iocbq(phba, cmdiocbq);
371 kfree(dd_data);
372
373 /* Complete the job if the job is still active */
374
375 if (job) {
376 bsg_reply->result = rc;
377 bsg_job_done(job, bsg_reply->result,
378 bsg_reply->reply_payload_rcv_len);
379 }
380 return;
381 }
382
383 /**
384 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
385 * @job: fc_bsg_job to handle
386 **/
387 static int
lpfc_bsg_send_mgmt_cmd(struct bsg_job * job)388 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
389 {
390 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
391 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
392 struct lpfc_hba *phba = vport->phba;
393 struct lpfc_nodelist *ndlp = rdata->pnode;
394 struct fc_bsg_reply *bsg_reply = job->reply;
395 struct ulp_bde64 *bpl = NULL;
396 struct lpfc_iocbq *cmdiocbq = NULL;
397 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
398 int request_nseg, reply_nseg;
399 u32 num_entry;
400 struct bsg_job_data *dd_data;
401 unsigned long flags;
402 uint32_t creg_val;
403 int rc = 0;
404 int iocb_stat;
405 u16 ulp_context;
406
407 /* in case no data is transferred */
408 bsg_reply->reply_payload_rcv_len = 0;
409
410 if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
411 return -ENODEV;
412
413 /* allocate our bsg tracking structure */
414 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
415 if (!dd_data) {
416 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
417 "2733 Failed allocation of dd_data\n");
418 rc = -ENOMEM;
419 goto no_dd_data;
420 }
421
422 cmdiocbq = lpfc_sli_get_iocbq(phba);
423 if (!cmdiocbq) {
424 rc = -ENOMEM;
425 goto free_dd;
426 }
427
428 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
429 if (!bmp) {
430 rc = -ENOMEM;
431 goto free_cmdiocbq;
432 }
433 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
434 if (!bmp->virt) {
435 rc = -ENOMEM;
436 goto free_bmp;
437 }
438
439 INIT_LIST_HEAD(&bmp->list);
440
441 bpl = (struct ulp_bde64 *) bmp->virt;
442 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
443 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
444 1, bpl, &request_nseg);
445 if (!cmp) {
446 rc = -ENOMEM;
447 goto free_bmp;
448 }
449 lpfc_bsg_copy_data(cmp, &job->request_payload,
450 job->request_payload.payload_len, 1);
451
452 bpl += request_nseg;
453 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
454 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
455 bpl, &reply_nseg);
456 if (!rmp) {
457 rc = -ENOMEM;
458 goto free_cmp;
459 }
460
461 num_entry = request_nseg + reply_nseg;
462
463 if (phba->sli_rev == LPFC_SLI_REV4)
464 ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
465 else
466 ulp_context = ndlp->nlp_rpi;
467
468 lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
469 phba->fc_ratov * 2);
470
471 cmdiocbq->num_bdes = num_entry;
472 cmdiocbq->vport = phba->pport;
473 cmdiocbq->cmd_dmabuf = cmp;
474 cmdiocbq->bpl_dmabuf = bmp;
475 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
476
477 cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
478 cmdiocbq->context_un.dd_data = dd_data;
479
480 dd_data->type = TYPE_IOCB;
481 dd_data->set_job = job;
482 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
483 dd_data->context_un.iocb.rmp = rmp;
484 job->dd_data = dd_data;
485
486 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
487 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
488 rc = -EIO ;
489 goto free_rmp;
490 }
491 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
492 writel(creg_val, phba->HCregaddr);
493 readl(phba->HCregaddr); /* flush */
494 }
495
496 cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
497 if (!cmdiocbq->ndlp) {
498 rc = -ENODEV;
499 goto free_rmp;
500 }
501
502 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
503 if (iocb_stat == IOCB_SUCCESS) {
504 spin_lock_irqsave(&phba->hbalock, flags);
505 /* make sure the I/O had not been completed yet */
506 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
507 /* open up abort window to timeout handler */
508 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
509 }
510 spin_unlock_irqrestore(&phba->hbalock, flags);
511 return 0; /* done for now */
512 } else if (iocb_stat == IOCB_BUSY) {
513 rc = -EAGAIN;
514 } else {
515 rc = -EIO;
516 }
517
518 /* iocb failed so cleanup */
519 lpfc_nlp_put(ndlp);
520
521 free_rmp:
522 lpfc_free_bsg_buffers(phba, rmp);
523 free_cmp:
524 lpfc_free_bsg_buffers(phba, cmp);
525 free_bmp:
526 if (bmp->virt)
527 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
528 kfree(bmp);
529 free_cmdiocbq:
530 lpfc_sli_release_iocbq(phba, cmdiocbq);
531 free_dd:
532 kfree(dd_data);
533 no_dd_data:
534 /* make error code available to userspace */
535 bsg_reply->result = rc;
536 job->dd_data = NULL;
537 return rc;
538 }
539
540 /**
541 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
542 * @phba: Pointer to HBA context object.
543 * @cmdiocbq: Pointer to command iocb.
544 * @rspiocbq: Pointer to response iocb.
545 *
546 * This function is the completion handler for iocbs issued using
547 * lpfc_bsg_rport_els_cmp function. This function is called by the
548 * ring event handler function without any lock held. This function
549 * can be called from both worker thread context and interrupt
550 * context. This function also can be called from other thread which
551 * cleans up the SLI layer objects.
552 * This function copies the contents of the response iocb to the
553 * response iocb memory object provided by the caller of
554 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
555 * sleeps for the iocb completion.
556 **/
557 static void
lpfc_bsg_rport_els_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)558 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
559 struct lpfc_iocbq *cmdiocbq,
560 struct lpfc_iocbq *rspiocbq)
561 {
562 struct bsg_job_data *dd_data;
563 struct bsg_job *job;
564 struct fc_bsg_reply *bsg_reply;
565 struct lpfc_nodelist *ndlp;
566 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
567 struct fc_bsg_ctels_reply *els_reply;
568 uint8_t *rjt_data;
569 unsigned long flags;
570 unsigned int rsp_size;
571 int rc = 0;
572 u32 ulp_status, ulp_word4, total_data_placed;
573
574 dd_data = cmdiocbq->context_un.dd_data;
575 ndlp = dd_data->context_un.iocb.ndlp;
576 cmdiocbq->ndlp = ndlp;
577
578 /* Determine if job has been aborted */
579 spin_lock_irqsave(&phba->ct_ev_lock, flags);
580 job = dd_data->set_job;
581 if (job) {
582 bsg_reply = job->reply;
583 /* Prevent timeout handling from trying to abort job */
584 job->dd_data = NULL;
585 }
586 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
587
588 /* Close the timeout handler abort window */
589 spin_lock_irqsave(&phba->hbalock, flags);
590 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
591 spin_unlock_irqrestore(&phba->hbalock, flags);
592
593 ulp_status = get_job_ulpstatus(phba, rspiocbq);
594 ulp_word4 = get_job_word4(phba, rspiocbq);
595 total_data_placed = get_job_data_placed(phba, rspiocbq);
596 pcmd = cmdiocbq->cmd_dmabuf;
597 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
598
599 /* Copy the completed job data or determine the job status if job is
600 * still active
601 */
602
603 if (job) {
604 if (ulp_status == IOSTAT_SUCCESS) {
605 rsp_size = total_data_placed;
606 bsg_reply->reply_payload_rcv_len =
607 sg_copy_from_buffer(job->reply_payload.sg_list,
608 job->reply_payload.sg_cnt,
609 prsp->virt,
610 rsp_size);
611 } else if (ulp_status == IOSTAT_LS_RJT) {
612 bsg_reply->reply_payload_rcv_len =
613 sizeof(struct fc_bsg_ctels_reply);
614 /* LS_RJT data returned in word 4 */
615 rjt_data = (uint8_t *)&ulp_word4;
616 els_reply = &bsg_reply->reply_data.ctels_reply;
617 els_reply->status = FC_CTELS_STATUS_REJECT;
618 els_reply->rjt_data.action = rjt_data[3];
619 els_reply->rjt_data.reason_code = rjt_data[2];
620 els_reply->rjt_data.reason_explanation = rjt_data[1];
621 els_reply->rjt_data.vendor_unique = rjt_data[0];
622 } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
623 (ulp_word4 & IOERR_PARAM_MASK) ==
624 IOERR_SEQUENCE_TIMEOUT) {
625 rc = -ETIMEDOUT;
626 } else {
627 rc = -EIO;
628 }
629 }
630
631 lpfc_els_free_iocb(phba, cmdiocbq);
632
633 lpfc_nlp_put(ndlp);
634 kfree(dd_data);
635
636 /* Complete the job if the job is still active */
637
638 if (job) {
639 bsg_reply->result = rc;
640 bsg_job_done(job, bsg_reply->result,
641 bsg_reply->reply_payload_rcv_len);
642 }
643 return;
644 }
645
646 /**
647 * lpfc_bsg_rport_els - send an ELS command from a bsg request
648 * @job: fc_bsg_job to handle
649 **/
650 static int
lpfc_bsg_rport_els(struct bsg_job * job)651 lpfc_bsg_rport_els(struct bsg_job *job)
652 {
653 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
654 struct lpfc_hba *phba = vport->phba;
655 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
656 struct lpfc_nodelist *ndlp = rdata->pnode;
657 struct fc_bsg_request *bsg_request = job->request;
658 struct fc_bsg_reply *bsg_reply = job->reply;
659 uint32_t elscmd;
660 uint32_t cmdsize;
661 struct lpfc_iocbq *cmdiocbq;
662 uint16_t rpi = 0;
663 struct bsg_job_data *dd_data;
664 unsigned long flags;
665 uint32_t creg_val;
666 int rc = 0;
667
668 /* in case no data is transferred */
669 bsg_reply->reply_payload_rcv_len = 0;
670
671 /* verify the els command is not greater than the
672 * maximum ELS transfer size.
673 */
674
675 if (job->request_payload.payload_len > FCELSSIZE) {
676 rc = -EINVAL;
677 goto no_dd_data;
678 }
679
680 /* allocate our bsg tracking structure */
681 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
682 if (!dd_data) {
683 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
684 "2735 Failed allocation of dd_data\n");
685 rc = -ENOMEM;
686 goto no_dd_data;
687 }
688
689 elscmd = bsg_request->rqst_data.r_els.els_code;
690 cmdsize = job->request_payload.payload_len;
691
692 if (!lpfc_nlp_get(ndlp)) {
693 rc = -ENODEV;
694 goto free_dd_data;
695 }
696
697 /* We will use the allocated dma buffers by prep els iocb for command
698 * and response to ensure if the job times out and the request is freed,
699 * we won't be dma into memory that is no longer allocated to for the
700 * request.
701 */
702 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
703 ndlp->nlp_DID, elscmd);
704 if (!cmdiocbq) {
705 rc = -EIO;
706 goto release_ndlp;
707 }
708
709 /* Transfer the request payload to allocated command dma buffer */
710 sg_copy_to_buffer(job->request_payload.sg_list,
711 job->request_payload.sg_cnt,
712 cmdiocbq->cmd_dmabuf->virt,
713 cmdsize);
714
715 rpi = ndlp->nlp_rpi;
716
717 if (phba->sli_rev == LPFC_SLI_REV4)
718 bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
719 phba->sli4_hba.rpi_ids[rpi]);
720 else
721 cmdiocbq->iocb.ulpContext = rpi;
722 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
723 cmdiocbq->context_un.dd_data = dd_data;
724 cmdiocbq->ndlp = ndlp;
725 cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
726 dd_data->type = TYPE_IOCB;
727 dd_data->set_job = job;
728 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
729 dd_data->context_un.iocb.ndlp = ndlp;
730 dd_data->context_un.iocb.rmp = NULL;
731 job->dd_data = dd_data;
732
733 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
734 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
735 rc = -EIO;
736 goto linkdown_err;
737 }
738 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
739 writel(creg_val, phba->HCregaddr);
740 readl(phba->HCregaddr); /* flush */
741 }
742
743 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
744 if (rc == IOCB_SUCCESS) {
745 spin_lock_irqsave(&phba->hbalock, flags);
746 /* make sure the I/O had not been completed/released */
747 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
748 /* open up abort window to timeout handler */
749 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
750 }
751 spin_unlock_irqrestore(&phba->hbalock, flags);
752 return 0; /* done for now */
753 } else if (rc == IOCB_BUSY) {
754 rc = -EAGAIN;
755 } else {
756 rc = -EIO;
757 }
758
759 /* I/O issue failed. Cleanup resources. */
760
761 linkdown_err:
762 lpfc_els_free_iocb(phba, cmdiocbq);
763
764 release_ndlp:
765 lpfc_nlp_put(ndlp);
766
767 free_dd_data:
768 kfree(dd_data);
769
770 no_dd_data:
771 /* make error code available to userspace */
772 bsg_reply->result = rc;
773 job->dd_data = NULL;
774 return rc;
775 }
776
777 /**
778 * lpfc_bsg_event_free - frees an allocated event structure
779 * @kref: Pointer to a kref.
780 *
781 * Called from kref_put. Back cast the kref into an event structure address.
782 * Free any events to get, delete associated nodes, free any events to see,
783 * free any data then free the event itself.
784 **/
785 static void
lpfc_bsg_event_free(struct kref * kref)786 lpfc_bsg_event_free(struct kref *kref)
787 {
788 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
789 kref);
790 struct event_data *ed;
791
792 list_del(&evt->node);
793
794 while (!list_empty(&evt->events_to_get)) {
795 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
796 list_del(&ed->node);
797 kfree(ed->data);
798 kfree(ed);
799 }
800
801 while (!list_empty(&evt->events_to_see)) {
802 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
803 list_del(&ed->node);
804 kfree(ed->data);
805 kfree(ed);
806 }
807
808 kfree(evt->dd_data);
809 kfree(evt);
810 }
811
812 /**
813 * lpfc_bsg_event_ref - increments the kref for an event
814 * @evt: Pointer to an event structure.
815 **/
816 static inline void
lpfc_bsg_event_ref(struct lpfc_bsg_event * evt)817 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
818 {
819 kref_get(&evt->kref);
820 }
821
822 /**
823 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
824 * @evt: Pointer to an event structure.
825 **/
826 static inline void
lpfc_bsg_event_unref(struct lpfc_bsg_event * evt)827 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
828 {
829 kref_put(&evt->kref, lpfc_bsg_event_free);
830 }
831
832 /**
833 * lpfc_bsg_event_new - allocate and initialize a event structure
834 * @ev_mask: Mask of events.
835 * @ev_reg_id: Event reg id.
836 * @ev_req_id: Event request id.
837 **/
838 static struct lpfc_bsg_event *
lpfc_bsg_event_new(uint32_t ev_mask,int ev_reg_id,uint32_t ev_req_id)839 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
840 {
841 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
842
843 if (!evt)
844 return NULL;
845
846 INIT_LIST_HEAD(&evt->events_to_get);
847 INIT_LIST_HEAD(&evt->events_to_see);
848 evt->type_mask = ev_mask;
849 evt->req_id = ev_req_id;
850 evt->reg_id = ev_reg_id;
851 evt->wait_time_stamp = jiffies;
852 evt->dd_data = NULL;
853 init_waitqueue_head(&evt->wq);
854 kref_init(&evt->kref);
855 return evt;
856 }
857
858 /**
859 * diag_cmd_data_free - Frees an lpfc dma buffer extension
860 * @phba: Pointer to HBA context object.
861 * @mlist: Pointer to an lpfc dma buffer extension.
862 **/
863 static int
diag_cmd_data_free(struct lpfc_hba * phba,struct lpfc_dmabufext * mlist)864 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
865 {
866 struct lpfc_dmabufext *mlast;
867 struct pci_dev *pcidev;
868 struct list_head head, *curr, *next;
869
870 if ((!mlist) || (!lpfc_is_link_up(phba) &&
871 (phba->link_flag & LS_LOOPBACK_MODE))) {
872 return 0;
873 }
874
875 pcidev = phba->pcidev;
876 list_add_tail(&head, &mlist->dma.list);
877
878 list_for_each_safe(curr, next, &head) {
879 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
880 if (mlast->dma.virt)
881 dma_free_coherent(&pcidev->dev,
882 mlast->size,
883 mlast->dma.virt,
884 mlast->dma.phys);
885 kfree(mlast);
886 }
887 return 0;
888 }
889
890 /*
891 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
892 *
893 * This function is called when an unsolicited CT command is received. It
894 * forwards the event to any processes registered to receive CT events.
895 **/
896 int
lpfc_bsg_ct_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocbq)897 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
898 struct lpfc_iocbq *piocbq)
899 {
900 uint32_t evt_req_id = 0;
901 uint32_t cmd;
902 struct lpfc_dmabuf *dmabuf = NULL;
903 struct lpfc_bsg_event *evt;
904 struct event_data *evt_dat = NULL;
905 struct lpfc_iocbq *iocbq;
906 IOCB_t *iocb = NULL;
907 size_t offset = 0;
908 struct list_head head;
909 struct ulp_bde64 *bde;
910 dma_addr_t dma_addr;
911 int i;
912 struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
913 struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
914 struct lpfc_sli_ct_request *ct_req;
915 struct bsg_job *job = NULL;
916 struct fc_bsg_reply *bsg_reply;
917 struct bsg_job_data *dd_data = NULL;
918 unsigned long flags;
919 int size = 0;
920 u32 bde_count = 0;
921
922 INIT_LIST_HEAD(&head);
923 list_add_tail(&head, &piocbq->list);
924
925 ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
926 evt_req_id = ct_req->FsType;
927 cmd = ct_req->CommandResponse.bits.CmdRsp;
928
929 spin_lock_irqsave(&phba->ct_ev_lock, flags);
930 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
931 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
932 evt->req_id != evt_req_id)
933 continue;
934
935 lpfc_bsg_event_ref(evt);
936 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
937 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
938 if (evt_dat == NULL) {
939 spin_lock_irqsave(&phba->ct_ev_lock, flags);
940 lpfc_bsg_event_unref(evt);
941 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
942 "2614 Memory allocation failed for "
943 "CT event\n");
944 break;
945 }
946
947 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
948 /* take accumulated byte count from the last iocbq */
949 iocbq = list_entry(head.prev, typeof(*iocbq), list);
950 if (phba->sli_rev == LPFC_SLI_REV4)
951 evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
952 else
953 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
954 } else {
955 list_for_each_entry(iocbq, &head, list) {
956 iocb = &iocbq->iocb;
957 for (i = 0; i < iocb->ulpBdeCount;
958 i++)
959 evt_dat->len +=
960 iocb->un.cont64[i].tus.f.bdeSize;
961 }
962 }
963
964 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
965 if (evt_dat->data == NULL) {
966 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
967 "2615 Memory allocation failed for "
968 "CT event data, size %d\n",
969 evt_dat->len);
970 kfree(evt_dat);
971 spin_lock_irqsave(&phba->ct_ev_lock, flags);
972 lpfc_bsg_event_unref(evt);
973 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
974 goto error_ct_unsol_exit;
975 }
976
977 list_for_each_entry(iocbq, &head, list) {
978 size = 0;
979 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
980 bdeBuf1 = iocbq->cmd_dmabuf;
981 bdeBuf2 = iocbq->bpl_dmabuf;
982 }
983 if (phba->sli_rev == LPFC_SLI_REV4)
984 bde_count = iocbq->wcqe_cmpl.word3;
985 else
986 bde_count = iocbq->iocb.ulpBdeCount;
987 for (i = 0; i < bde_count; i++) {
988 if (phba->sli3_options &
989 LPFC_SLI3_HBQ_ENABLED) {
990 if (i == 0) {
991 size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
992 dmabuf = bdeBuf1;
993 } else if (i == 1) {
994 size = iocbq->unsol_rcv_len;
995 dmabuf = bdeBuf2;
996 }
997 if ((offset + size) > evt_dat->len)
998 size = evt_dat->len - offset;
999 } else {
1000 size = iocbq->iocb.un.cont64[i].
1001 tus.f.bdeSize;
1002 bde = &iocbq->iocb.un.cont64[i];
1003 dma_addr = getPaddr(bde->addrHigh,
1004 bde->addrLow);
1005 dmabuf = lpfc_sli_ringpostbuf_get(phba,
1006 pring, dma_addr);
1007 }
1008 if (!dmabuf) {
1009 lpfc_printf_log(phba, KERN_ERR,
1010 LOG_LIBDFC, "2616 No dmabuf "
1011 "found for iocbq x%px\n",
1012 iocbq);
1013 kfree(evt_dat->data);
1014 kfree(evt_dat);
1015 spin_lock_irqsave(&phba->ct_ev_lock,
1016 flags);
1017 lpfc_bsg_event_unref(evt);
1018 spin_unlock_irqrestore(
1019 &phba->ct_ev_lock, flags);
1020 goto error_ct_unsol_exit;
1021 }
1022 memcpy((char *)(evt_dat->data) + offset,
1023 dmabuf->virt, size);
1024 offset += size;
1025 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1026 !(phba->sli3_options &
1027 LPFC_SLI3_HBQ_ENABLED)) {
1028 lpfc_sli_ringpostbuf_put(phba, pring,
1029 dmabuf);
1030 } else {
1031 switch (cmd) {
1032 case ELX_LOOPBACK_DATA:
1033 if (phba->sli_rev <
1034 LPFC_SLI_REV4)
1035 diag_cmd_data_free(phba,
1036 (struct lpfc_dmabufext
1037 *)dmabuf);
1038 break;
1039 case ELX_LOOPBACK_XRI_SETUP:
1040 if ((phba->sli_rev ==
1041 LPFC_SLI_REV2) ||
1042 (phba->sli3_options &
1043 LPFC_SLI3_HBQ_ENABLED
1044 )) {
1045 lpfc_in_buf_free(phba,
1046 dmabuf);
1047 } else {
1048 lpfc_sli3_post_buffer(phba,
1049 pring,
1050 1);
1051 }
1052 break;
1053 default:
1054 if (!(phba->sli3_options &
1055 LPFC_SLI3_HBQ_ENABLED))
1056 lpfc_sli3_post_buffer(phba,
1057 pring,
1058 1);
1059 break;
1060 }
1061 }
1062 }
1063 }
1064
1065 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1066 if (phba->sli_rev == LPFC_SLI_REV4) {
1067 evt_dat->immed_dat = phba->ctx_idx;
1068 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1069 /* Provide warning for over-run of the ct_ctx array */
1070 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1071 UNSOL_VALID)
1072 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1073 "2717 CT context array entry "
1074 "[%d] over-run: oxid:x%x, "
1075 "sid:x%x\n", phba->ctx_idx,
1076 phba->ct_ctx[
1077 evt_dat->immed_dat].oxid,
1078 phba->ct_ctx[
1079 evt_dat->immed_dat].SID);
1080 phba->ct_ctx[evt_dat->immed_dat].rxid =
1081 get_job_ulpcontext(phba, piocbq);
1082 phba->ct_ctx[evt_dat->immed_dat].oxid =
1083 get_job_rcvoxid(phba, piocbq);
1084 phba->ct_ctx[evt_dat->immed_dat].SID =
1085 bf_get(wqe_els_did,
1086 &piocbq->wqe.xmit_els_rsp.wqe_dest);
1087 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1088 } else
1089 evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
1090
1091 evt_dat->type = FC_REG_CT_EVENT;
1092 list_add(&evt_dat->node, &evt->events_to_see);
1093 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1094 wake_up_interruptible(&evt->wq);
1095 lpfc_bsg_event_unref(evt);
1096 break;
1097 }
1098
1099 list_move(evt->events_to_see.prev, &evt->events_to_get);
1100
1101 dd_data = (struct bsg_job_data *)evt->dd_data;
1102 job = dd_data->set_job;
1103 dd_data->set_job = NULL;
1104 lpfc_bsg_event_unref(evt);
1105 if (job) {
1106 bsg_reply = job->reply;
1107 bsg_reply->reply_payload_rcv_len = size;
1108 /* make error code available to userspace */
1109 bsg_reply->result = 0;
1110 job->dd_data = NULL;
1111 /* complete the job back to userspace */
1112 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1113 bsg_job_done(job, bsg_reply->result,
1114 bsg_reply->reply_payload_rcv_len);
1115 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1116 }
1117 }
1118 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1119
1120 error_ct_unsol_exit:
1121 if (!list_empty(&head))
1122 list_del(&head);
1123 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1124 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1125 return 0;
1126 return 1;
1127 }
1128
1129 /**
1130 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1131 * @phba: Pointer to HBA context object.
1132 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1133 *
1134 * This function handles abort to the CT command toward management plane
1135 * for SLI4 port.
1136 *
1137 * If the pending context of a CT command to management plane present, clears
1138 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1139 * no context exists.
1140 **/
1141 int
lpfc_bsg_ct_unsol_abort(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)1142 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1143 {
1144 struct fc_frame_header fc_hdr;
1145 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1146 int ctx_idx, handled = 0;
1147 uint16_t oxid, rxid;
1148 uint32_t sid;
1149
1150 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1151 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1152 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1153 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1154
1155 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1156 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1157 continue;
1158 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1159 continue;
1160 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1161 continue;
1162 if (phba->ct_ctx[ctx_idx].SID != sid)
1163 continue;
1164 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1165 handled = 1;
1166 }
1167 return handled;
1168 }
1169
1170 /**
1171 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1172 * @job: SET_EVENT fc_bsg_job
1173 **/
1174 static int
lpfc_bsg_hba_set_event(struct bsg_job * job)1175 lpfc_bsg_hba_set_event(struct bsg_job *job)
1176 {
1177 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1178 struct lpfc_hba *phba = vport->phba;
1179 struct fc_bsg_request *bsg_request = job->request;
1180 struct set_ct_event *event_req;
1181 struct lpfc_bsg_event *evt;
1182 int rc = 0;
1183 struct bsg_job_data *dd_data = NULL;
1184 uint32_t ev_mask;
1185 unsigned long flags;
1186
1187 if (job->request_len <
1188 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1189 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1190 "2612 Received SET_CT_EVENT below minimum "
1191 "size\n");
1192 rc = -EINVAL;
1193 goto job_error;
1194 }
1195
1196 event_req = (struct set_ct_event *)
1197 bsg_request->rqst_data.h_vendor.vendor_cmd;
1198 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1199 FC_REG_EVENT_MASK);
1200 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1201 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1202 if (evt->reg_id == event_req->ev_reg_id) {
1203 lpfc_bsg_event_ref(evt);
1204 evt->wait_time_stamp = jiffies;
1205 dd_data = (struct bsg_job_data *)evt->dd_data;
1206 break;
1207 }
1208 }
1209 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1210
1211 if (&evt->node == &phba->ct_ev_waiters) {
1212 /* no event waiting struct yet - first call */
1213 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1214 if (dd_data == NULL) {
1215 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1216 "2734 Failed allocation of dd_data\n");
1217 rc = -ENOMEM;
1218 goto job_error;
1219 }
1220 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1221 event_req->ev_req_id);
1222 if (!evt) {
1223 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1224 "2617 Failed allocation of event "
1225 "waiter\n");
1226 rc = -ENOMEM;
1227 goto job_error;
1228 }
1229 dd_data->type = TYPE_EVT;
1230 dd_data->set_job = NULL;
1231 dd_data->context_un.evt = evt;
1232 evt->dd_data = (void *)dd_data;
1233 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1234 list_add(&evt->node, &phba->ct_ev_waiters);
1235 lpfc_bsg_event_ref(evt);
1236 evt->wait_time_stamp = jiffies;
1237 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1238 }
1239
1240 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1241 evt->waiting = 1;
1242 dd_data->set_job = job; /* for unsolicited command */
1243 job->dd_data = dd_data; /* for fc transport timeout callback*/
1244 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1245 return 0; /* call job done later */
1246
1247 job_error:
1248 kfree(dd_data);
1249 job->dd_data = NULL;
1250 return rc;
1251 }
1252
1253 /**
1254 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1255 * @job: GET_EVENT fc_bsg_job
1256 **/
1257 static int
lpfc_bsg_hba_get_event(struct bsg_job * job)1258 lpfc_bsg_hba_get_event(struct bsg_job *job)
1259 {
1260 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1261 struct lpfc_hba *phba = vport->phba;
1262 struct fc_bsg_request *bsg_request = job->request;
1263 struct fc_bsg_reply *bsg_reply = job->reply;
1264 struct get_ct_event *event_req;
1265 struct get_ct_event_reply *event_reply;
1266 struct lpfc_bsg_event *evt, *evt_next;
1267 struct event_data *evt_dat = NULL;
1268 unsigned long flags;
1269 uint32_t rc = 0;
1270
1271 if (job->request_len <
1272 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1273 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1274 "2613 Received GET_CT_EVENT request below "
1275 "minimum size\n");
1276 rc = -EINVAL;
1277 goto job_error;
1278 }
1279
1280 event_req = (struct get_ct_event *)
1281 bsg_request->rqst_data.h_vendor.vendor_cmd;
1282
1283 event_reply = (struct get_ct_event_reply *)
1284 bsg_reply->reply_data.vendor_reply.vendor_rsp;
1285 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1286 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1287 if (evt->reg_id == event_req->ev_reg_id) {
1288 if (list_empty(&evt->events_to_get))
1289 break;
1290 lpfc_bsg_event_ref(evt);
1291 evt->wait_time_stamp = jiffies;
1292 evt_dat = list_entry(evt->events_to_get.prev,
1293 struct event_data, node);
1294 list_del(&evt_dat->node);
1295 break;
1296 }
1297 }
1298 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1299
1300 /* The app may continue to ask for event data until it gets
1301 * an error indicating that there isn't anymore
1302 */
1303 if (evt_dat == NULL) {
1304 bsg_reply->reply_payload_rcv_len = 0;
1305 rc = -ENOENT;
1306 goto job_error;
1307 }
1308
1309 if (evt_dat->len > job->request_payload.payload_len) {
1310 evt_dat->len = job->request_payload.payload_len;
1311 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1312 "2618 Truncated event data at %d "
1313 "bytes\n",
1314 job->request_payload.payload_len);
1315 }
1316
1317 event_reply->type = evt_dat->type;
1318 event_reply->immed_data = evt_dat->immed_dat;
1319 if (evt_dat->len > 0)
1320 bsg_reply->reply_payload_rcv_len =
1321 sg_copy_from_buffer(job->request_payload.sg_list,
1322 job->request_payload.sg_cnt,
1323 evt_dat->data, evt_dat->len);
1324 else
1325 bsg_reply->reply_payload_rcv_len = 0;
1326
1327 if (evt_dat) {
1328 kfree(evt_dat->data);
1329 kfree(evt_dat);
1330 }
1331
1332 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1333 lpfc_bsg_event_unref(evt);
1334 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1335 job->dd_data = NULL;
1336 bsg_reply->result = 0;
1337 bsg_job_done(job, bsg_reply->result,
1338 bsg_reply->reply_payload_rcv_len);
1339 return 0;
1340
1341 job_error:
1342 job->dd_data = NULL;
1343 bsg_reply->result = rc;
1344 return rc;
1345 }
1346
1347 /**
1348 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1349 * @phba: Pointer to HBA context object.
1350 * @cmdiocbq: Pointer to command iocb.
1351 * @rspiocbq: Pointer to response iocb.
1352 *
1353 * This function is the completion handler for iocbs issued using
1354 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1355 * ring event handler function without any lock held. This function
1356 * can be called from both worker thread context and interrupt
1357 * context. This function also can be called from other thread which
1358 * cleans up the SLI layer objects.
1359 * This function copy the contents of the response iocb to the
1360 * response iocb memory object provided by the caller of
1361 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1362 * sleeps for the iocb completion.
1363 **/
1364 static void
lpfc_issue_ct_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)1365 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1366 struct lpfc_iocbq *cmdiocbq,
1367 struct lpfc_iocbq *rspiocbq)
1368 {
1369 struct bsg_job_data *dd_data;
1370 struct bsg_job *job;
1371 struct fc_bsg_reply *bsg_reply;
1372 struct lpfc_dmabuf *bmp, *cmp;
1373 struct lpfc_nodelist *ndlp;
1374 unsigned long flags;
1375 int rc = 0;
1376 u32 ulp_status, ulp_word4;
1377
1378 dd_data = cmdiocbq->context_un.dd_data;
1379
1380 /* Determine if job has been aborted */
1381 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1382 job = dd_data->set_job;
1383 if (job) {
1384 /* Prevent timeout handling from trying to abort job */
1385 job->dd_data = NULL;
1386 }
1387 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1388
1389 /* Close the timeout handler abort window */
1390 spin_lock_irqsave(&phba->hbalock, flags);
1391 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1392 spin_unlock_irqrestore(&phba->hbalock, flags);
1393
1394 ndlp = dd_data->context_un.iocb.ndlp;
1395 cmp = cmdiocbq->cmd_dmabuf;
1396 bmp = cmdiocbq->bpl_dmabuf;
1397
1398 ulp_status = get_job_ulpstatus(phba, rspiocbq);
1399 ulp_word4 = get_job_word4(phba, rspiocbq);
1400
1401 /* Copy the completed job data or set the error status */
1402
1403 if (job) {
1404 bsg_reply = job->reply;
1405 if (ulp_status) {
1406 if (ulp_status == IOSTAT_LOCAL_REJECT) {
1407 switch (ulp_word4 & IOERR_PARAM_MASK) {
1408 case IOERR_SEQUENCE_TIMEOUT:
1409 rc = -ETIMEDOUT;
1410 break;
1411 case IOERR_INVALID_RPI:
1412 rc = -EFAULT;
1413 break;
1414 default:
1415 rc = -EACCES;
1416 break;
1417 }
1418 } else {
1419 rc = -EACCES;
1420 }
1421 } else {
1422 bsg_reply->reply_payload_rcv_len = 0;
1423 }
1424 }
1425
1426 lpfc_free_bsg_buffers(phba, cmp);
1427 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1428 kfree(bmp);
1429 lpfc_sli_release_iocbq(phba, cmdiocbq);
1430 lpfc_nlp_put(ndlp);
1431 kfree(dd_data);
1432
1433 /* Complete the job if the job is still active */
1434
1435 if (job) {
1436 bsg_reply->result = rc;
1437 bsg_job_done(job, bsg_reply->result,
1438 bsg_reply->reply_payload_rcv_len);
1439 }
1440 return;
1441 }
1442
1443 /**
1444 * lpfc_issue_ct_rsp - issue a ct response
1445 * @phba: Pointer to HBA context object.
1446 * @job: Pointer to the job object.
1447 * @tag: tag index value into the ports context exchange array.
1448 * @cmp: Pointer to a cmp dma buffer descriptor.
1449 * @bmp: Pointer to a bmp dma buffer descriptor.
1450 * @num_entry: Number of enties in the bde.
1451 **/
1452 static int
lpfc_issue_ct_rsp(struct lpfc_hba * phba,struct bsg_job * job,uint32_t tag,struct lpfc_dmabuf * cmp,struct lpfc_dmabuf * bmp,int num_entry)1453 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1454 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1455 int num_entry)
1456 {
1457 struct lpfc_iocbq *ctiocb = NULL;
1458 int rc = 0;
1459 struct lpfc_nodelist *ndlp = NULL;
1460 struct bsg_job_data *dd_data;
1461 unsigned long flags;
1462 uint32_t creg_val;
1463 u16 ulp_context, iotag;
1464
1465 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1466 if (!ndlp) {
1467 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1468 "2721 ndlp null for oxid %x SID %x\n",
1469 phba->ct_ctx[tag].rxid,
1470 phba->ct_ctx[tag].SID);
1471 return IOCB_ERROR;
1472 }
1473
1474 /* allocate our bsg tracking structure */
1475 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1476 if (!dd_data) {
1477 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1478 "2736 Failed allocation of dd_data\n");
1479 rc = -ENOMEM;
1480 goto no_dd_data;
1481 }
1482
1483 /* Allocate buffer for command iocb */
1484 ctiocb = lpfc_sli_get_iocbq(phba);
1485 if (!ctiocb) {
1486 rc = -ENOMEM;
1487 goto no_ctiocb;
1488 }
1489
1490 if (phba->sli_rev == LPFC_SLI_REV4) {
1491 /* Do not issue unsol response if oxid not marked as valid */
1492 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1493 rc = IOCB_ERROR;
1494 goto issue_ct_rsp_exit;
1495 }
1496
1497 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
1498 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
1499 phba->ct_ctx[tag].oxid, num_entry,
1500 FC_RCTL_DD_SOL_CTL, 1,
1501 CMD_XMIT_SEQUENCE64_WQE);
1502
1503 /* The exchange is done, mark the entry as invalid */
1504 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1505 iotag = get_wqe_reqtag(ctiocb);
1506 } else {
1507 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
1508 FC_RCTL_DD_SOL_CTL, 1,
1509 CMD_XMIT_SEQUENCE64_CX);
1510 ctiocb->num_bdes = num_entry;
1511 iotag = ctiocb->iocb.ulpIoTag;
1512 }
1513
1514 ulp_context = get_job_ulpcontext(phba, ctiocb);
1515
1516 /* Xmit CT response on exchange <xid> */
1517 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1518 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1519 ulp_context, iotag, tag, phba->link_state);
1520
1521 ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
1522 ctiocb->vport = phba->pport;
1523 ctiocb->context_un.dd_data = dd_data;
1524 ctiocb->cmd_dmabuf = cmp;
1525 ctiocb->bpl_dmabuf = bmp;
1526 ctiocb->ndlp = ndlp;
1527 ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
1528
1529 dd_data->type = TYPE_IOCB;
1530 dd_data->set_job = job;
1531 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1532 dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1533 if (!dd_data->context_un.iocb.ndlp) {
1534 rc = -IOCB_ERROR;
1535 goto issue_ct_rsp_exit;
1536 }
1537 dd_data->context_un.iocb.rmp = NULL;
1538 job->dd_data = dd_data;
1539
1540 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1541 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1542 rc = -IOCB_ERROR;
1543 goto issue_ct_rsp_exit;
1544 }
1545 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1546 writel(creg_val, phba->HCregaddr);
1547 readl(phba->HCregaddr); /* flush */
1548 }
1549
1550 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1551 if (rc == IOCB_SUCCESS) {
1552 spin_lock_irqsave(&phba->hbalock, flags);
1553 /* make sure the I/O had not been completed/released */
1554 if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
1555 /* open up abort window to timeout handler */
1556 ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
1557 }
1558 spin_unlock_irqrestore(&phba->hbalock, flags);
1559 return 0; /* done for now */
1560 }
1561
1562 /* iocb failed so cleanup */
1563 job->dd_data = NULL;
1564 lpfc_nlp_put(ndlp);
1565
1566 issue_ct_rsp_exit:
1567 lpfc_sli_release_iocbq(phba, ctiocb);
1568 no_ctiocb:
1569 kfree(dd_data);
1570 no_dd_data:
1571 return rc;
1572 }
1573
1574 /**
1575 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1576 * @job: SEND_MGMT_RESP fc_bsg_job
1577 **/
1578 static int
lpfc_bsg_send_mgmt_rsp(struct bsg_job * job)1579 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1580 {
1581 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1582 struct lpfc_hba *phba = vport->phba;
1583 struct fc_bsg_request *bsg_request = job->request;
1584 struct fc_bsg_reply *bsg_reply = job->reply;
1585 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1586 bsg_request->rqst_data.h_vendor.vendor_cmd;
1587 struct ulp_bde64 *bpl;
1588 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1589 int bpl_entries;
1590 uint32_t tag = mgmt_resp->tag;
1591 unsigned long reqbfrcnt =
1592 (unsigned long)job->request_payload.payload_len;
1593 int rc = 0;
1594
1595 /* in case no data is transferred */
1596 bsg_reply->reply_payload_rcv_len = 0;
1597
1598 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1599 rc = -ERANGE;
1600 goto send_mgmt_rsp_exit;
1601 }
1602
1603 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1604 if (!bmp) {
1605 rc = -ENOMEM;
1606 goto send_mgmt_rsp_exit;
1607 }
1608
1609 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1610 if (!bmp->virt) {
1611 rc = -ENOMEM;
1612 goto send_mgmt_rsp_free_bmp;
1613 }
1614
1615 INIT_LIST_HEAD(&bmp->list);
1616 bpl = (struct ulp_bde64 *) bmp->virt;
1617 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1618 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1619 1, bpl, &bpl_entries);
1620 if (!cmp) {
1621 rc = -ENOMEM;
1622 goto send_mgmt_rsp_free_bmp;
1623 }
1624 lpfc_bsg_copy_data(cmp, &job->request_payload,
1625 job->request_payload.payload_len, 1);
1626
1627 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1628
1629 if (rc == IOCB_SUCCESS)
1630 return 0; /* done for now */
1631
1632 rc = -EACCES;
1633
1634 lpfc_free_bsg_buffers(phba, cmp);
1635
1636 send_mgmt_rsp_free_bmp:
1637 if (bmp->virt)
1638 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1639 kfree(bmp);
1640 send_mgmt_rsp_exit:
1641 /* make error code available to userspace */
1642 bsg_reply->result = rc;
1643 job->dd_data = NULL;
1644 return rc;
1645 }
1646
1647 /**
1648 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1649 * @phba: Pointer to HBA context object.
1650 *
1651 * This function is responsible for preparing driver for diag loopback
1652 * on device.
1653 */
1654 static int
lpfc_bsg_diag_mode_enter(struct lpfc_hba * phba)1655 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1656 {
1657 struct lpfc_vport **vports;
1658 struct Scsi_Host *shost;
1659 struct lpfc_sli *psli;
1660 struct lpfc_queue *qp = NULL;
1661 struct lpfc_sli_ring *pring;
1662 int i = 0;
1663
1664 psli = &phba->sli;
1665 if (!psli)
1666 return -ENODEV;
1667
1668
1669 if ((phba->link_state == LPFC_HBA_ERROR) ||
1670 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1671 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1672 return -EACCES;
1673
1674 vports = lpfc_create_vport_work_array(phba);
1675 if (vports) {
1676 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1677 shost = lpfc_shost_from_vport(vports[i]);
1678 scsi_block_requests(shost);
1679 }
1680 lpfc_destroy_vport_work_array(phba, vports);
1681 } else {
1682 shost = lpfc_shost_from_vport(phba->pport);
1683 scsi_block_requests(shost);
1684 }
1685
1686 if (phba->sli_rev != LPFC_SLI_REV4) {
1687 pring = &psli->sli3_ring[LPFC_FCP_RING];
1688 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1689 return 0;
1690 }
1691 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1692 pring = qp->pring;
1693 if (!pring || (pring->ringno != LPFC_FCP_RING))
1694 continue;
1695 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1696 &pring->ring_lock))
1697 break;
1698 }
1699 return 0;
1700 }
1701
1702 /**
1703 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1704 * @phba: Pointer to HBA context object.
1705 *
1706 * This function is responsible for driver exit processing of setting up
1707 * diag loopback mode on device.
1708 */
1709 static void
lpfc_bsg_diag_mode_exit(struct lpfc_hba * phba)1710 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1711 {
1712 struct Scsi_Host *shost;
1713 struct lpfc_vport **vports;
1714 int i;
1715
1716 vports = lpfc_create_vport_work_array(phba);
1717 if (vports) {
1718 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1719 shost = lpfc_shost_from_vport(vports[i]);
1720 scsi_unblock_requests(shost);
1721 }
1722 lpfc_destroy_vport_work_array(phba, vports);
1723 } else {
1724 shost = lpfc_shost_from_vport(phba->pport);
1725 scsi_unblock_requests(shost);
1726 }
1727 return;
1728 }
1729
1730 /**
1731 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1732 * @phba: Pointer to HBA context object.
1733 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1734 *
1735 * This function is responsible for placing an sli3 port into diagnostic
1736 * loopback mode in order to perform a diagnostic loopback test.
1737 * All new scsi requests are blocked, a small delay is used to allow the
1738 * scsi requests to complete then the link is brought down. If the link is
1739 * is placed in loopback mode then scsi requests are again allowed
1740 * so the scsi mid-layer doesn't give up on the port.
1741 * All of this is done in-line.
1742 */
1743 static int
lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba * phba,struct bsg_job * job)1744 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1745 {
1746 struct fc_bsg_request *bsg_request = job->request;
1747 struct fc_bsg_reply *bsg_reply = job->reply;
1748 struct diag_mode_set *loopback_mode;
1749 uint32_t link_flags;
1750 uint32_t timeout;
1751 LPFC_MBOXQ_t *pmboxq = NULL;
1752 int mbxstatus = MBX_SUCCESS;
1753 int i = 0;
1754 int rc = 0;
1755
1756 /* no data to return just the return code */
1757 bsg_reply->reply_payload_rcv_len = 0;
1758
1759 if (job->request_len < sizeof(struct fc_bsg_request) +
1760 sizeof(struct diag_mode_set)) {
1761 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1762 "2738 Received DIAG MODE request size:%d "
1763 "below the minimum size:%d\n",
1764 job->request_len,
1765 (int)(sizeof(struct fc_bsg_request) +
1766 sizeof(struct diag_mode_set)));
1767 rc = -EINVAL;
1768 goto job_error;
1769 }
1770
1771 rc = lpfc_bsg_diag_mode_enter(phba);
1772 if (rc)
1773 goto job_error;
1774
1775 /* bring the link to diagnostic mode */
1776 loopback_mode = (struct diag_mode_set *)
1777 bsg_request->rqst_data.h_vendor.vendor_cmd;
1778 link_flags = loopback_mode->type;
1779 timeout = loopback_mode->timeout * 100;
1780
1781 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1782 if (!pmboxq) {
1783 rc = -ENOMEM;
1784 goto loopback_mode_exit;
1785 }
1786 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1787 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1788 pmboxq->u.mb.mbxOwner = OWN_HOST;
1789
1790 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1791
1792 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1793 /* wait for link down before proceeding */
1794 i = 0;
1795 while (phba->link_state != LPFC_LINK_DOWN) {
1796 if (i++ > timeout) {
1797 rc = -ETIMEDOUT;
1798 goto loopback_mode_exit;
1799 }
1800 msleep(10);
1801 }
1802
1803 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1804 if (link_flags == INTERNAL_LOOP_BACK)
1805 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1806 else
1807 pmboxq->u.mb.un.varInitLnk.link_flags =
1808 FLAGS_TOPOLOGY_MODE_LOOP;
1809
1810 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1811 pmboxq->u.mb.mbxOwner = OWN_HOST;
1812
1813 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1814 LPFC_MBOX_TMO);
1815
1816 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1817 rc = -ENODEV;
1818 else {
1819 spin_lock_irq(&phba->hbalock);
1820 phba->link_flag |= LS_LOOPBACK_MODE;
1821 spin_unlock_irq(&phba->hbalock);
1822 /* wait for the link attention interrupt */
1823 msleep(100);
1824
1825 i = 0;
1826 while (phba->link_state != LPFC_HBA_READY) {
1827 if (i++ > timeout) {
1828 rc = -ETIMEDOUT;
1829 break;
1830 }
1831
1832 msleep(10);
1833 }
1834 }
1835
1836 } else
1837 rc = -ENODEV;
1838
1839 loopback_mode_exit:
1840 lpfc_bsg_diag_mode_exit(phba);
1841
1842 /*
1843 * Let SLI layer release mboxq if mbox command completed after timeout.
1844 */
1845 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1846 mempool_free(pmboxq, phba->mbox_mem_pool);
1847
1848 job_error:
1849 /* make error code available to userspace */
1850 bsg_reply->result = rc;
1851 /* complete the job back to userspace if no error */
1852 if (rc == 0)
1853 bsg_job_done(job, bsg_reply->result,
1854 bsg_reply->reply_payload_rcv_len);
1855 return rc;
1856 }
1857
1858 /**
1859 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1860 * @phba: Pointer to HBA context object.
1861 * @diag: Flag for set link to diag or nomral operation state.
1862 *
1863 * This function is responsible for issuing a sli4 mailbox command for setting
1864 * link to either diag state or normal operation state.
1865 */
1866 static int
lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba * phba,uint32_t diag)1867 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1868 {
1869 LPFC_MBOXQ_t *pmboxq;
1870 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1871 uint32_t req_len, alloc_len;
1872 int mbxstatus = MBX_SUCCESS, rc;
1873
1874 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1875 if (!pmboxq)
1876 return -ENOMEM;
1877
1878 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1879 sizeof(struct lpfc_sli4_cfg_mhdr));
1880 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1881 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1882 req_len, LPFC_SLI4_MBX_EMBED);
1883 if (alloc_len != req_len) {
1884 rc = -ENOMEM;
1885 goto link_diag_state_set_out;
1886 }
1887 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1888 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1889 diag, phba->sli4_hba.lnk_info.lnk_tp,
1890 phba->sli4_hba.lnk_info.lnk_no);
1891
1892 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1893 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1894 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1895 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1896 phba->sli4_hba.lnk_info.lnk_no);
1897 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1898 phba->sli4_hba.lnk_info.lnk_tp);
1899 if (diag)
1900 bf_set(lpfc_mbx_set_diag_state_diag,
1901 &link_diag_state->u.req, 1);
1902 else
1903 bf_set(lpfc_mbx_set_diag_state_diag,
1904 &link_diag_state->u.req, 0);
1905
1906 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1907
1908 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1909 rc = 0;
1910 else
1911 rc = -ENODEV;
1912
1913 link_diag_state_set_out:
1914 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1915 mempool_free(pmboxq, phba->mbox_mem_pool);
1916
1917 return rc;
1918 }
1919
1920 /**
1921 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1922 * @phba: Pointer to HBA context object.
1923 * @mode: loopback mode to set
1924 * @link_no: link number for loopback mode to set
1925 *
1926 * This function is responsible for issuing a sli4 mailbox command for setting
1927 * up loopback diagnostic for a link.
1928 */
1929 static int
lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba * phba,int mode,uint32_t link_no)1930 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1931 uint32_t link_no)
1932 {
1933 LPFC_MBOXQ_t *pmboxq;
1934 uint32_t req_len, alloc_len;
1935 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1936 int mbxstatus = MBX_SUCCESS, rc = 0;
1937
1938 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1939 if (!pmboxq)
1940 return -ENOMEM;
1941 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1942 sizeof(struct lpfc_sli4_cfg_mhdr));
1943 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1944 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1945 req_len, LPFC_SLI4_MBX_EMBED);
1946 if (alloc_len != req_len) {
1947 mempool_free(pmboxq, phba->mbox_mem_pool);
1948 return -ENOMEM;
1949 }
1950 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1951 bf_set(lpfc_mbx_set_diag_state_link_num,
1952 &link_diag_loopback->u.req, link_no);
1953
1954 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1955 bf_set(lpfc_mbx_set_diag_state_link_type,
1956 &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1957 } else {
1958 bf_set(lpfc_mbx_set_diag_state_link_type,
1959 &link_diag_loopback->u.req,
1960 phba->sli4_hba.lnk_info.lnk_tp);
1961 }
1962
1963 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1964 mode);
1965
1966 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1967 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1968 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1969 "3127 Failed setup loopback mode mailbox "
1970 "command, rc:x%x, status:x%x\n", mbxstatus,
1971 pmboxq->u.mb.mbxStatus);
1972 rc = -ENODEV;
1973 }
1974 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1975 mempool_free(pmboxq, phba->mbox_mem_pool);
1976 return rc;
1977 }
1978
1979 /**
1980 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1981 * @phba: Pointer to HBA context object.
1982 *
1983 * This function set up SLI4 FC port registrations for diagnostic run, which
1984 * includes all the rpis, vfi, and also vpi.
1985 */
1986 static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba * phba)1987 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1988 {
1989 int rc;
1990
1991 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1992 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1993 "3136 Port still had vfi registered: "
1994 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1995 phba->pport->fc_myDID, phba->fcf.fcfi,
1996 phba->sli4_hba.vfi_ids[phba->pport->vfi],
1997 phba->vpi_ids[phba->pport->vpi]);
1998 return -EINVAL;
1999 }
2000 rc = lpfc_issue_reg_vfi(phba->pport);
2001 return rc;
2002 }
2003
2004 /**
2005 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2006 * @phba: Pointer to HBA context object.
2007 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2008 *
2009 * This function is responsible for placing an sli4 port into diagnostic
2010 * loopback mode in order to perform a diagnostic loopback test.
2011 */
2012 static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba * phba,struct bsg_job * job)2013 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2014 {
2015 struct fc_bsg_request *bsg_request = job->request;
2016 struct fc_bsg_reply *bsg_reply = job->reply;
2017 struct diag_mode_set *loopback_mode;
2018 uint32_t link_flags, timeout, link_no;
2019 int i, rc = 0;
2020
2021 /* no data to return just the return code */
2022 bsg_reply->reply_payload_rcv_len = 0;
2023
2024 if (job->request_len < sizeof(struct fc_bsg_request) +
2025 sizeof(struct diag_mode_set)) {
2026 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2027 "3011 Received DIAG MODE request size:%d "
2028 "below the minimum size:%d\n",
2029 job->request_len,
2030 (int)(sizeof(struct fc_bsg_request) +
2031 sizeof(struct diag_mode_set)));
2032 rc = -EINVAL;
2033 goto job_done;
2034 }
2035
2036 loopback_mode = (struct diag_mode_set *)
2037 bsg_request->rqst_data.h_vendor.vendor_cmd;
2038 link_flags = loopback_mode->type;
2039 timeout = loopback_mode->timeout * 100;
2040
2041 if (loopback_mode->physical_link == -1)
2042 link_no = phba->sli4_hba.lnk_info.lnk_no;
2043 else
2044 link_no = loopback_mode->physical_link;
2045
2046 if (link_flags == DISABLE_LOOP_BACK) {
2047 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2048 LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2049 link_no);
2050 if (!rc) {
2051 /* Unset the need disable bit */
2052 phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2053 }
2054 goto job_done;
2055 } else {
2056 /* Check if we need to disable the loopback state */
2057 if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2058 rc = -EPERM;
2059 goto job_done;
2060 }
2061 }
2062
2063 rc = lpfc_bsg_diag_mode_enter(phba);
2064 if (rc)
2065 goto job_done;
2066
2067 /* indicate we are in loobpack diagnostic mode */
2068 spin_lock_irq(&phba->hbalock);
2069 phba->link_flag |= LS_LOOPBACK_MODE;
2070 spin_unlock_irq(&phba->hbalock);
2071
2072 /* reset port to start frome scratch */
2073 rc = lpfc_selective_reset(phba);
2074 if (rc)
2075 goto job_done;
2076
2077 /* bring the link to diagnostic mode */
2078 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2079 "3129 Bring link to diagnostic state.\n");
2080
2081 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2082 if (rc) {
2083 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2084 "3130 Failed to bring link to diagnostic "
2085 "state, rc:x%x\n", rc);
2086 goto loopback_mode_exit;
2087 }
2088
2089 /* wait for link down before proceeding */
2090 i = 0;
2091 while (phba->link_state != LPFC_LINK_DOWN) {
2092 if (i++ > timeout) {
2093 rc = -ETIMEDOUT;
2094 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2095 "3131 Timeout waiting for link to "
2096 "diagnostic mode, timeout:%d ms\n",
2097 timeout * 10);
2098 goto loopback_mode_exit;
2099 }
2100 msleep(10);
2101 }
2102
2103 /* set up loopback mode */
2104 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2105 "3132 Set up loopback mode:x%x\n", link_flags);
2106
2107 switch (link_flags) {
2108 case INTERNAL_LOOP_BACK:
2109 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2110 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2111 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2112 link_no);
2113 } else {
2114 /* Trunk is configured, but link is not in this trunk */
2115 if (phba->sli4_hba.conf_trunk) {
2116 rc = -ELNRNG;
2117 goto loopback_mode_exit;
2118 }
2119
2120 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2121 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2122 link_no);
2123 }
2124
2125 if (!rc) {
2126 /* Set the need disable bit */
2127 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2128 }
2129
2130 break;
2131 case EXTERNAL_LOOP_BACK:
2132 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2133 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2134 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2135 link_no);
2136 } else {
2137 /* Trunk is configured, but link is not in this trunk */
2138 if (phba->sli4_hba.conf_trunk) {
2139 rc = -ELNRNG;
2140 goto loopback_mode_exit;
2141 }
2142
2143 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2144 LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2145 link_no);
2146 }
2147
2148 if (!rc) {
2149 /* Set the need disable bit */
2150 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2151 }
2152
2153 break;
2154 default:
2155 rc = -EINVAL;
2156 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2157 "3141 Loopback mode:x%x not supported\n",
2158 link_flags);
2159 goto loopback_mode_exit;
2160 }
2161
2162 if (!rc) {
2163 /* wait for the link attention interrupt */
2164 msleep(100);
2165 i = 0;
2166 while (phba->link_state < LPFC_LINK_UP) {
2167 if (i++ > timeout) {
2168 rc = -ETIMEDOUT;
2169 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2170 "3137 Timeout waiting for link up "
2171 "in loopback mode, timeout:%d ms\n",
2172 timeout * 10);
2173 break;
2174 }
2175 msleep(10);
2176 }
2177 }
2178
2179 /* port resource registration setup for loopback diagnostic */
2180 if (!rc) {
2181 /* set up a none zero myDID for loopback test */
2182 phba->pport->fc_myDID = 1;
2183 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2184 } else
2185 goto loopback_mode_exit;
2186
2187 if (!rc) {
2188 /* wait for the port ready */
2189 msleep(100);
2190 i = 0;
2191 while (phba->link_state != LPFC_HBA_READY) {
2192 if (i++ > timeout) {
2193 rc = -ETIMEDOUT;
2194 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2195 "3133 Timeout waiting for port "
2196 "loopback mode ready, timeout:%d ms\n",
2197 timeout * 10);
2198 break;
2199 }
2200 msleep(10);
2201 }
2202 }
2203
2204 loopback_mode_exit:
2205 /* clear loopback diagnostic mode */
2206 if (rc) {
2207 spin_lock_irq(&phba->hbalock);
2208 phba->link_flag &= ~LS_LOOPBACK_MODE;
2209 spin_unlock_irq(&phba->hbalock);
2210 }
2211 lpfc_bsg_diag_mode_exit(phba);
2212
2213 job_done:
2214 /* make error code available to userspace */
2215 bsg_reply->result = rc;
2216 /* complete the job back to userspace if no error */
2217 if (rc == 0)
2218 bsg_job_done(job, bsg_reply->result,
2219 bsg_reply->reply_payload_rcv_len);
2220 return rc;
2221 }
2222
2223 /**
2224 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2225 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2226 *
2227 * This function is responsible for responding to check and dispatch bsg diag
2228 * command from the user to proper driver action routines.
2229 */
2230 static int
lpfc_bsg_diag_loopback_mode(struct bsg_job * job)2231 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2232 {
2233 struct Scsi_Host *shost;
2234 struct lpfc_vport *vport;
2235 struct lpfc_hba *phba;
2236 int rc;
2237
2238 shost = fc_bsg_to_shost(job);
2239 if (!shost)
2240 return -ENODEV;
2241 vport = shost_priv(shost);
2242 if (!vport)
2243 return -ENODEV;
2244 phba = vport->phba;
2245 if (!phba)
2246 return -ENODEV;
2247
2248 if (phba->sli_rev < LPFC_SLI_REV4)
2249 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2250 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2251 LPFC_SLI_INTF_IF_TYPE_2)
2252 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2253 else
2254 rc = -ENODEV;
2255
2256 return rc;
2257 }
2258
2259 /**
2260 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2261 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2262 *
2263 * This function is responsible for responding to check and dispatch bsg diag
2264 * command from the user to proper driver action routines.
2265 */
2266 static int
lpfc_sli4_bsg_diag_mode_end(struct bsg_job * job)2267 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2268 {
2269 struct fc_bsg_request *bsg_request = job->request;
2270 struct fc_bsg_reply *bsg_reply = job->reply;
2271 struct Scsi_Host *shost;
2272 struct lpfc_vport *vport;
2273 struct lpfc_hba *phba;
2274 struct diag_mode_set *loopback_mode_end_cmd;
2275 uint32_t timeout;
2276 int rc, i;
2277
2278 shost = fc_bsg_to_shost(job);
2279 if (!shost)
2280 return -ENODEV;
2281 vport = shost_priv(shost);
2282 if (!vport)
2283 return -ENODEV;
2284 phba = vport->phba;
2285 if (!phba)
2286 return -ENODEV;
2287
2288 if (phba->sli_rev < LPFC_SLI_REV4)
2289 return -ENODEV;
2290 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2291 LPFC_SLI_INTF_IF_TYPE_2)
2292 return -ENODEV;
2293
2294 /* clear loopback diagnostic mode */
2295 spin_lock_irq(&phba->hbalock);
2296 phba->link_flag &= ~LS_LOOPBACK_MODE;
2297 spin_unlock_irq(&phba->hbalock);
2298 loopback_mode_end_cmd = (struct diag_mode_set *)
2299 bsg_request->rqst_data.h_vendor.vendor_cmd;
2300 timeout = loopback_mode_end_cmd->timeout * 100;
2301
2302 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2303 if (rc) {
2304 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2305 "3139 Failed to bring link to diagnostic "
2306 "state, rc:x%x\n", rc);
2307 goto loopback_mode_end_exit;
2308 }
2309
2310 /* wait for link down before proceeding */
2311 i = 0;
2312 while (phba->link_state != LPFC_LINK_DOWN) {
2313 if (i++ > timeout) {
2314 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2315 "3140 Timeout waiting for link to "
2316 "diagnostic mode_end, timeout:%d ms\n",
2317 timeout * 10);
2318 /* there is nothing much we can do here */
2319 break;
2320 }
2321 msleep(10);
2322 }
2323
2324 /* reset port resource registrations */
2325 rc = lpfc_selective_reset(phba);
2326 phba->pport->fc_myDID = 0;
2327
2328 loopback_mode_end_exit:
2329 /* make return code available to userspace */
2330 bsg_reply->result = rc;
2331 /* complete the job back to userspace if no error */
2332 if (rc == 0)
2333 bsg_job_done(job, bsg_reply->result,
2334 bsg_reply->reply_payload_rcv_len);
2335 return rc;
2336 }
2337
2338 /**
2339 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2340 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2341 *
2342 * This function is to perform SLI4 diag link test request from the user
2343 * applicaiton.
2344 */
2345 static int
lpfc_sli4_bsg_link_diag_test(struct bsg_job * job)2346 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2347 {
2348 struct fc_bsg_request *bsg_request = job->request;
2349 struct fc_bsg_reply *bsg_reply = job->reply;
2350 struct Scsi_Host *shost;
2351 struct lpfc_vport *vport;
2352 struct lpfc_hba *phba;
2353 LPFC_MBOXQ_t *pmboxq;
2354 struct sli4_link_diag *link_diag_test_cmd;
2355 uint32_t req_len, alloc_len;
2356 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2357 union lpfc_sli4_cfg_shdr *shdr;
2358 uint32_t shdr_status, shdr_add_status;
2359 struct diag_status *diag_status_reply;
2360 int mbxstatus, rc = -ENODEV, rc1 = 0;
2361
2362 shost = fc_bsg_to_shost(job);
2363 if (!shost)
2364 goto job_error;
2365
2366 vport = shost_priv(shost);
2367 if (!vport)
2368 goto job_error;
2369
2370 phba = vport->phba;
2371 if (!phba)
2372 goto job_error;
2373
2374
2375 if (phba->sli_rev < LPFC_SLI_REV4)
2376 goto job_error;
2377
2378 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2379 LPFC_SLI_INTF_IF_TYPE_2)
2380 goto job_error;
2381
2382 if (job->request_len < sizeof(struct fc_bsg_request) +
2383 sizeof(struct sli4_link_diag)) {
2384 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2385 "3013 Received LINK DIAG TEST request "
2386 " size:%d below the minimum size:%d\n",
2387 job->request_len,
2388 (int)(sizeof(struct fc_bsg_request) +
2389 sizeof(struct sli4_link_diag)));
2390 rc = -EINVAL;
2391 goto job_error;
2392 }
2393
2394 rc = lpfc_bsg_diag_mode_enter(phba);
2395 if (rc)
2396 goto job_error;
2397
2398 link_diag_test_cmd = (struct sli4_link_diag *)
2399 bsg_request->rqst_data.h_vendor.vendor_cmd;
2400
2401 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2402
2403 if (rc)
2404 goto job_error;
2405
2406 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2407 if (!pmboxq) {
2408 rc = -ENOMEM;
2409 goto link_diag_test_exit;
2410 }
2411
2412 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2413 sizeof(struct lpfc_sli4_cfg_mhdr));
2414 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2415 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2416 req_len, LPFC_SLI4_MBX_EMBED);
2417 if (alloc_len != req_len) {
2418 rc = -ENOMEM;
2419 goto link_diag_test_exit;
2420 }
2421
2422 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2423 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2424 phba->sli4_hba.lnk_info.lnk_no);
2425 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2426 phba->sli4_hba.lnk_info.lnk_tp);
2427 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2428 link_diag_test_cmd->test_id);
2429 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2430 link_diag_test_cmd->loops);
2431 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2432 link_diag_test_cmd->test_version);
2433 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2434 link_diag_test_cmd->error_action);
2435
2436 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2437
2438 shdr = (union lpfc_sli4_cfg_shdr *)
2439 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2440 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2441 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2442 if (shdr_status || shdr_add_status || mbxstatus) {
2443 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2444 "3010 Run link diag test mailbox failed with "
2445 "mbx_status x%x status x%x, add_status x%x\n",
2446 mbxstatus, shdr_status, shdr_add_status);
2447 }
2448
2449 diag_status_reply = (struct diag_status *)
2450 bsg_reply->reply_data.vendor_reply.vendor_rsp;
2451
2452 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2453 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2454 "3012 Received Run link diag test reply "
2455 "below minimum size (%d): reply_len:%d\n",
2456 (int)(sizeof(*bsg_reply) +
2457 sizeof(*diag_status_reply)),
2458 job->reply_len);
2459 rc = -EINVAL;
2460 goto job_error;
2461 }
2462
2463 diag_status_reply->mbox_status = mbxstatus;
2464 diag_status_reply->shdr_status = shdr_status;
2465 diag_status_reply->shdr_add_status = shdr_add_status;
2466
2467 link_diag_test_exit:
2468 rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2469
2470 if (pmboxq)
2471 mempool_free(pmboxq, phba->mbox_mem_pool);
2472
2473 lpfc_bsg_diag_mode_exit(phba);
2474
2475 job_error:
2476 /* make error code available to userspace */
2477 if (rc1 && !rc)
2478 rc = rc1;
2479 bsg_reply->result = rc;
2480 /* complete the job back to userspace if no error */
2481 if (rc == 0)
2482 bsg_job_done(job, bsg_reply->result,
2483 bsg_reply->reply_payload_rcv_len);
2484 return rc;
2485 }
2486
2487 /**
2488 * lpfcdiag_loop_self_reg - obtains a remote port login id
2489 * @phba: Pointer to HBA context object
2490 * @rpi: Pointer to a remote port login id
2491 *
2492 * This function obtains a remote port login id so the diag loopback test
2493 * can send and receive its own unsolicited CT command.
2494 **/
lpfcdiag_loop_self_reg(struct lpfc_hba * phba,uint16_t * rpi)2495 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2496 {
2497 LPFC_MBOXQ_t *mbox;
2498 struct lpfc_dmabuf *dmabuff;
2499 int status;
2500
2501 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2502 if (!mbox)
2503 return -ENOMEM;
2504
2505 if (phba->sli_rev < LPFC_SLI_REV4)
2506 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2507 (uint8_t *)&phba->pport->fc_sparam,
2508 mbox, *rpi);
2509 else {
2510 *rpi = lpfc_sli4_alloc_rpi(phba);
2511 if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2512 mempool_free(mbox, phba->mbox_mem_pool);
2513 return -EBUSY;
2514 }
2515 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2516 phba->pport->fc_myDID,
2517 (uint8_t *)&phba->pport->fc_sparam,
2518 mbox, *rpi);
2519 }
2520
2521 if (status) {
2522 mempool_free(mbox, phba->mbox_mem_pool);
2523 if (phba->sli_rev == LPFC_SLI_REV4)
2524 lpfc_sli4_free_rpi(phba, *rpi);
2525 return -ENOMEM;
2526 }
2527
2528 dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2529 mbox->ctx_buf = NULL;
2530 mbox->ctx_ndlp = NULL;
2531 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2532
2533 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2534 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2535 kfree(dmabuff);
2536 if (status != MBX_TIMEOUT)
2537 mempool_free(mbox, phba->mbox_mem_pool);
2538 if (phba->sli_rev == LPFC_SLI_REV4)
2539 lpfc_sli4_free_rpi(phba, *rpi);
2540 return -ENODEV;
2541 }
2542
2543 if (phba->sli_rev < LPFC_SLI_REV4)
2544 *rpi = mbox->u.mb.un.varWords[0];
2545
2546 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2547 kfree(dmabuff);
2548 mempool_free(mbox, phba->mbox_mem_pool);
2549 return 0;
2550 }
2551
2552 /**
2553 * lpfcdiag_loop_self_unreg - unregs from the rpi
2554 * @phba: Pointer to HBA context object
2555 * @rpi: Remote port login id
2556 *
2557 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2558 **/
lpfcdiag_loop_self_unreg(struct lpfc_hba * phba,uint16_t rpi)2559 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2560 {
2561 LPFC_MBOXQ_t *mbox;
2562 int status;
2563
2564 /* Allocate mboxq structure */
2565 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2566 if (mbox == NULL)
2567 return -ENOMEM;
2568
2569 if (phba->sli_rev < LPFC_SLI_REV4)
2570 lpfc_unreg_login(phba, 0, rpi, mbox);
2571 else
2572 lpfc_unreg_login(phba, phba->pport->vpi,
2573 phba->sli4_hba.rpi_ids[rpi], mbox);
2574
2575 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2576
2577 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2578 if (status != MBX_TIMEOUT)
2579 mempool_free(mbox, phba->mbox_mem_pool);
2580 return -EIO;
2581 }
2582 mempool_free(mbox, phba->mbox_mem_pool);
2583 if (phba->sli_rev == LPFC_SLI_REV4)
2584 lpfc_sli4_free_rpi(phba, rpi);
2585 return 0;
2586 }
2587
2588 /**
2589 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2590 * @phba: Pointer to HBA context object
2591 * @rpi: Remote port login id
2592 * @txxri: Pointer to transmit exchange id
2593 * @rxxri: Pointer to response exchabge id
2594 *
2595 * This function obtains the transmit and receive ids required to send
2596 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2597 * flags are used to the unsolicted response handler is able to process
2598 * the ct command sent on the same port.
2599 **/
lpfcdiag_loop_get_xri(struct lpfc_hba * phba,uint16_t rpi,uint16_t * txxri,uint16_t * rxxri)2600 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2601 uint16_t *txxri, uint16_t * rxxri)
2602 {
2603 struct lpfc_bsg_event *evt;
2604 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2605 struct lpfc_dmabuf *dmabuf;
2606 struct ulp_bde64 *bpl = NULL;
2607 struct lpfc_sli_ct_request *ctreq = NULL;
2608 int ret_val = 0;
2609 int time_left;
2610 int iocb_stat = IOCB_SUCCESS;
2611 unsigned long flags;
2612 u32 status;
2613
2614 *txxri = 0;
2615 *rxxri = 0;
2616 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2617 SLI_CT_ELX_LOOPBACK);
2618 if (!evt)
2619 return -ENOMEM;
2620
2621 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2622 list_add(&evt->node, &phba->ct_ev_waiters);
2623 lpfc_bsg_event_ref(evt);
2624 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2625
2626 cmdiocbq = lpfc_sli_get_iocbq(phba);
2627 rspiocbq = lpfc_sli_get_iocbq(phba);
2628
2629 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2630 if (dmabuf) {
2631 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2632 if (dmabuf->virt) {
2633 INIT_LIST_HEAD(&dmabuf->list);
2634 bpl = (struct ulp_bde64 *) dmabuf->virt;
2635 memset(bpl, 0, sizeof(*bpl));
2636 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2637 bpl->addrHigh =
2638 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2639 sizeof(*bpl)));
2640 bpl->addrLow =
2641 le32_to_cpu(putPaddrLow(dmabuf->phys +
2642 sizeof(*bpl)));
2643 bpl->tus.f.bdeFlags = 0;
2644 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2645 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2646 }
2647 }
2648
2649 if (cmdiocbq == NULL || rspiocbq == NULL ||
2650 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2651 dmabuf->virt == NULL) {
2652 ret_val = -ENOMEM;
2653 goto err_get_xri_exit;
2654 }
2655
2656 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2657
2658 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2659 ctreq->RevisionId.bits.InId = 0;
2660 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2661 ctreq->FsSubType = 0;
2662 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2663 ctreq->CommandResponse.bits.Size = 0;
2664
2665 cmdiocbq->bpl_dmabuf = dmabuf;
2666 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
2667 cmdiocbq->vport = phba->pport;
2668 cmdiocbq->cmd_cmpl = NULL;
2669
2670 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
2671 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
2672
2673 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2674 rspiocbq, (phba->fc_ratov * 2)
2675 + LPFC_DRVR_TIMEOUT);
2676
2677 status = get_job_ulpstatus(phba, rspiocbq);
2678 if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
2679 ret_val = -EIO;
2680 goto err_get_xri_exit;
2681 }
2682 *txxri = get_job_ulpcontext(phba, rspiocbq);
2683
2684 evt->waiting = 1;
2685 evt->wait_time_stamp = jiffies;
2686 time_left = wait_event_interruptible_timeout(
2687 evt->wq, !list_empty(&evt->events_to_see),
2688 msecs_to_jiffies(1000 *
2689 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2690 if (list_empty(&evt->events_to_see))
2691 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2692 else {
2693 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2694 list_move(evt->events_to_see.prev, &evt->events_to_get);
2695 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2696 *rxxri = (list_entry(evt->events_to_get.prev,
2697 typeof(struct event_data),
2698 node))->immed_dat;
2699 }
2700 evt->waiting = 0;
2701
2702 err_get_xri_exit:
2703 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2704 lpfc_bsg_event_unref(evt); /* release ref */
2705 lpfc_bsg_event_unref(evt); /* delete */
2706 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2707
2708 if (dmabuf) {
2709 if (dmabuf->virt)
2710 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2711 kfree(dmabuf);
2712 }
2713
2714 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2715 lpfc_sli_release_iocbq(phba, cmdiocbq);
2716 if (rspiocbq)
2717 lpfc_sli_release_iocbq(phba, rspiocbq);
2718 return ret_val;
2719 }
2720
2721 /**
2722 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2723 * @phba: Pointer to HBA context object
2724 *
2725 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2726 * returns the pointer to the buffer.
2727 **/
2728 static struct lpfc_dmabuf *
lpfc_bsg_dma_page_alloc(struct lpfc_hba * phba)2729 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2730 {
2731 struct lpfc_dmabuf *dmabuf;
2732 struct pci_dev *pcidev = phba->pcidev;
2733
2734 /* allocate dma buffer struct */
2735 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2736 if (!dmabuf)
2737 return NULL;
2738
2739 INIT_LIST_HEAD(&dmabuf->list);
2740
2741 /* now, allocate dma buffer */
2742 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2743 &(dmabuf->phys), GFP_KERNEL);
2744
2745 if (!dmabuf->virt) {
2746 kfree(dmabuf);
2747 return NULL;
2748 }
2749
2750 return dmabuf;
2751 }
2752
2753 /**
2754 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2755 * @phba: Pointer to HBA context object.
2756 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2757 *
2758 * This routine just simply frees a dma buffer and its associated buffer
2759 * descriptor referred by @dmabuf.
2760 **/
2761 static void
lpfc_bsg_dma_page_free(struct lpfc_hba * phba,struct lpfc_dmabuf * dmabuf)2762 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2763 {
2764 struct pci_dev *pcidev = phba->pcidev;
2765
2766 if (!dmabuf)
2767 return;
2768
2769 if (dmabuf->virt)
2770 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2771 dmabuf->virt, dmabuf->phys);
2772 kfree(dmabuf);
2773 return;
2774 }
2775
2776 /**
2777 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2778 * @phba: Pointer to HBA context object.
2779 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2780 *
2781 * This routine just simply frees all dma buffers and their associated buffer
2782 * descriptors referred by @dmabuf_list.
2783 **/
2784 static void
lpfc_bsg_dma_page_list_free(struct lpfc_hba * phba,struct list_head * dmabuf_list)2785 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2786 struct list_head *dmabuf_list)
2787 {
2788 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2789
2790 if (list_empty(dmabuf_list))
2791 return;
2792
2793 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2794 list_del_init(&dmabuf->list);
2795 lpfc_bsg_dma_page_free(phba, dmabuf);
2796 }
2797 return;
2798 }
2799
2800 /**
2801 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2802 * @phba: Pointer to HBA context object
2803 * @bpl: Pointer to 64 bit bde structure
2804 * @size: Number of bytes to process
2805 * @nocopydata: Flag to copy user data into the allocated buffer
2806 *
2807 * This function allocates page size buffers and populates an lpfc_dmabufext.
2808 * If allowed the user data pointed to with indataptr is copied into the kernel
2809 * memory. The chained list of page size buffers is returned.
2810 **/
2811 static struct lpfc_dmabufext *
diag_cmd_data_alloc(struct lpfc_hba * phba,struct ulp_bde64 * bpl,uint32_t size,int nocopydata)2812 diag_cmd_data_alloc(struct lpfc_hba *phba,
2813 struct ulp_bde64 *bpl, uint32_t size,
2814 int nocopydata)
2815 {
2816 struct lpfc_dmabufext *mlist = NULL;
2817 struct lpfc_dmabufext *dmp;
2818 int cnt, offset = 0, i = 0;
2819 struct pci_dev *pcidev;
2820
2821 pcidev = phba->pcidev;
2822
2823 while (size) {
2824 /* We get chunks of 4K */
2825 if (size > BUF_SZ_4K)
2826 cnt = BUF_SZ_4K;
2827 else
2828 cnt = size;
2829
2830 /* allocate struct lpfc_dmabufext buffer header */
2831 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2832 if (!dmp)
2833 goto out;
2834
2835 INIT_LIST_HEAD(&dmp->dma.list);
2836
2837 /* Queue it to a linked list */
2838 if (mlist)
2839 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2840 else
2841 mlist = dmp;
2842
2843 /* allocate buffer */
2844 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2845 cnt,
2846 &(dmp->dma.phys),
2847 GFP_KERNEL);
2848
2849 if (!dmp->dma.virt)
2850 goto out;
2851
2852 dmp->size = cnt;
2853
2854 if (nocopydata) {
2855 bpl->tus.f.bdeFlags = 0;
2856 } else {
2857 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2858 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2859 }
2860
2861 /* build buffer ptr list for IOCB */
2862 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2863 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2864 bpl->tus.f.bdeSize = (ushort) cnt;
2865 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2866 bpl++;
2867
2868 i++;
2869 offset += cnt;
2870 size -= cnt;
2871 }
2872
2873 if (mlist) {
2874 mlist->flag = i;
2875 return mlist;
2876 }
2877 out:
2878 diag_cmd_data_free(phba, mlist);
2879 return NULL;
2880 }
2881
2882 /**
2883 * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2884 * @phba: Pointer to HBA context object
2885 * @rxxri: Receive exchange id
2886 * @len: Number of data bytes
2887 *
2888 * This function allocates and posts a data buffer of sufficient size to receive
2889 * an unsolicted CT command.
2890 **/
lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba * phba,uint16_t rxxri,size_t len)2891 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2892 size_t len)
2893 {
2894 struct lpfc_sli_ring *pring;
2895 struct lpfc_iocbq *cmdiocbq;
2896 IOCB_t *cmd = NULL;
2897 struct list_head head, *curr, *next;
2898 struct lpfc_dmabuf *rxbmp;
2899 struct lpfc_dmabuf *dmp;
2900 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2901 struct ulp_bde64 *rxbpl = NULL;
2902 uint32_t num_bde;
2903 struct lpfc_dmabufext *rxbuffer = NULL;
2904 int ret_val = 0;
2905 int iocb_stat;
2906 int i = 0;
2907
2908 pring = lpfc_phba_elsring(phba);
2909
2910 cmdiocbq = lpfc_sli_get_iocbq(phba);
2911 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2912 if (rxbmp != NULL) {
2913 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2914 if (rxbmp->virt) {
2915 INIT_LIST_HEAD(&rxbmp->list);
2916 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2917 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2918 }
2919 }
2920
2921 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2922 ret_val = -ENOMEM;
2923 goto err_post_rxbufs_exit;
2924 }
2925
2926 /* Queue buffers for the receive exchange */
2927 num_bde = (uint32_t)rxbuffer->flag;
2928 dmp = &rxbuffer->dma;
2929 cmd = &cmdiocbq->iocb;
2930 i = 0;
2931
2932 INIT_LIST_HEAD(&head);
2933 list_add_tail(&head, &dmp->list);
2934 list_for_each_safe(curr, next, &head) {
2935 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2936 list_del(curr);
2937
2938 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2939 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2940 cmd->un.quexri64cx.buff.bde.addrHigh =
2941 putPaddrHigh(mp[i]->phys);
2942 cmd->un.quexri64cx.buff.bde.addrLow =
2943 putPaddrLow(mp[i]->phys);
2944 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2945 ((struct lpfc_dmabufext *)mp[i])->size;
2946 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2947 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2948 cmd->ulpPU = 0;
2949 cmd->ulpLe = 1;
2950 cmd->ulpBdeCount = 1;
2951 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2952
2953 } else {
2954 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2955 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2956 cmd->un.cont64[i].tus.f.bdeSize =
2957 ((struct lpfc_dmabufext *)mp[i])->size;
2958 cmd->ulpBdeCount = ++i;
2959
2960 if ((--num_bde > 0) && (i < 2))
2961 continue;
2962
2963 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2964 cmd->ulpLe = 1;
2965 }
2966
2967 cmd->ulpClass = CLASS3;
2968 cmd->ulpContext = rxxri;
2969
2970 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2971 0);
2972 if (iocb_stat == IOCB_ERROR) {
2973 diag_cmd_data_free(phba,
2974 (struct lpfc_dmabufext *)mp[0]);
2975 if (mp[1])
2976 diag_cmd_data_free(phba,
2977 (struct lpfc_dmabufext *)mp[1]);
2978 dmp = list_entry(next, struct lpfc_dmabuf, list);
2979 ret_val = -EIO;
2980 goto err_post_rxbufs_exit;
2981 }
2982
2983 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2984 if (mp[1]) {
2985 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2986 mp[1] = NULL;
2987 }
2988
2989 /* The iocb was freed by lpfc_sli_issue_iocb */
2990 cmdiocbq = lpfc_sli_get_iocbq(phba);
2991 if (!cmdiocbq) {
2992 dmp = list_entry(next, struct lpfc_dmabuf, list);
2993 ret_val = -EIO;
2994 goto err_post_rxbufs_exit;
2995 }
2996 cmd = &cmdiocbq->iocb;
2997 i = 0;
2998 }
2999 list_del(&head);
3000
3001 err_post_rxbufs_exit:
3002
3003 if (rxbmp) {
3004 if (rxbmp->virt)
3005 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3006 kfree(rxbmp);
3007 }
3008
3009 if (cmdiocbq)
3010 lpfc_sli_release_iocbq(phba, cmdiocbq);
3011 return ret_val;
3012 }
3013
3014 /**
3015 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3016 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3017 *
3018 * This function receives a user data buffer to be transmitted and received on
3019 * the same port, the link must be up and in loopback mode prior
3020 * to being called.
3021 * 1. A kernel buffer is allocated to copy the user data into.
3022 * 2. The port registers with "itself".
3023 * 3. The transmit and receive exchange ids are obtained.
3024 * 4. The receive exchange id is posted.
3025 * 5. A new els loopback event is created.
3026 * 6. The command and response iocbs are allocated.
3027 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3028 *
3029 * This function is meant to be called n times while the port is in loopback
3030 * so it is the apps responsibility to issue a reset to take the port out
3031 * of loopback mode.
3032 **/
3033 static int
lpfc_bsg_diag_loopback_run(struct bsg_job * job)3034 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3035 {
3036 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3037 struct fc_bsg_reply *bsg_reply = job->reply;
3038 struct lpfc_hba *phba = vport->phba;
3039 struct lpfc_bsg_event *evt;
3040 struct event_data *evdat;
3041 struct lpfc_sli *psli = &phba->sli;
3042 uint32_t size;
3043 uint32_t full_size;
3044 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3045 uint16_t rpi = 0;
3046 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3047 union lpfc_wqe128 *cmdwqe, *rspwqe;
3048 struct lpfc_sli_ct_request *ctreq;
3049 struct lpfc_dmabuf *txbmp;
3050 struct ulp_bde64 *txbpl = NULL;
3051 struct lpfc_dmabufext *txbuffer = NULL;
3052 struct list_head head;
3053 struct lpfc_dmabuf *curr;
3054 uint16_t txxri = 0, rxxri;
3055 uint32_t num_bde;
3056 uint8_t *ptr = NULL, *rx_databuf = NULL;
3057 int rc = 0;
3058 int time_left;
3059 int iocb_stat = IOCB_SUCCESS;
3060 unsigned long flags;
3061 void *dataout = NULL;
3062 uint32_t total_mem;
3063
3064 /* in case no data is returned return just the return code */
3065 bsg_reply->reply_payload_rcv_len = 0;
3066
3067 if (job->request_len <
3068 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3069 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3070 "2739 Received DIAG TEST request below minimum "
3071 "size\n");
3072 rc = -EINVAL;
3073 goto loopback_test_exit;
3074 }
3075
3076 if (job->request_payload.payload_len !=
3077 job->reply_payload.payload_len) {
3078 rc = -EINVAL;
3079 goto loopback_test_exit;
3080 }
3081
3082 if ((phba->link_state == LPFC_HBA_ERROR) ||
3083 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3084 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3085 rc = -EACCES;
3086 goto loopback_test_exit;
3087 }
3088
3089 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3090 rc = -EACCES;
3091 goto loopback_test_exit;
3092 }
3093
3094 size = job->request_payload.payload_len;
3095 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3096
3097 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3098 rc = -ERANGE;
3099 goto loopback_test_exit;
3100 }
3101
3102 if (full_size >= BUF_SZ_4K) {
3103 /*
3104 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3105 * then we allocate 64k and re-use that buffer over and over to
3106 * xfer the whole block. This is because Linux kernel has a
3107 * problem allocating more than 120k of kernel space memory. Saw
3108 * problem with GET_FCPTARGETMAPPING...
3109 */
3110 if (size <= (64 * 1024))
3111 total_mem = full_size;
3112 else
3113 total_mem = 64 * 1024;
3114 } else
3115 /* Allocate memory for ioctl data */
3116 total_mem = BUF_SZ_4K;
3117
3118 dataout = kmalloc(total_mem, GFP_KERNEL);
3119 if (dataout == NULL) {
3120 rc = -ENOMEM;
3121 goto loopback_test_exit;
3122 }
3123
3124 ptr = dataout;
3125 ptr += ELX_LOOPBACK_HEADER_SZ;
3126 sg_copy_to_buffer(job->request_payload.sg_list,
3127 job->request_payload.sg_cnt,
3128 ptr, size);
3129 rc = lpfcdiag_loop_self_reg(phba, &rpi);
3130 if (rc)
3131 goto loopback_test_exit;
3132
3133 if (phba->sli_rev < LPFC_SLI_REV4) {
3134 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3135 if (rc) {
3136 lpfcdiag_loop_self_unreg(phba, rpi);
3137 goto loopback_test_exit;
3138 }
3139
3140 rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
3141 if (rc) {
3142 lpfcdiag_loop_self_unreg(phba, rpi);
3143 goto loopback_test_exit;
3144 }
3145 }
3146 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3147 SLI_CT_ELX_LOOPBACK);
3148 if (!evt) {
3149 lpfcdiag_loop_self_unreg(phba, rpi);
3150 rc = -ENOMEM;
3151 goto loopback_test_exit;
3152 }
3153
3154 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3155 list_add(&evt->node, &phba->ct_ev_waiters);
3156 lpfc_bsg_event_ref(evt);
3157 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3158
3159 cmdiocbq = lpfc_sli_get_iocbq(phba);
3160 if (phba->sli_rev < LPFC_SLI_REV4)
3161 rspiocbq = lpfc_sli_get_iocbq(phba);
3162 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3163
3164 if (txbmp) {
3165 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3166 if (txbmp->virt) {
3167 INIT_LIST_HEAD(&txbmp->list);
3168 txbpl = (struct ulp_bde64 *) txbmp->virt;
3169 txbuffer = diag_cmd_data_alloc(phba,
3170 txbpl, full_size, 0);
3171 }
3172 }
3173
3174 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3175 rc = -ENOMEM;
3176 goto err_loopback_test_exit;
3177 }
3178 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3179 rc = -ENOMEM;
3180 goto err_loopback_test_exit;
3181 }
3182
3183 cmdwqe = &cmdiocbq->wqe;
3184 memset(cmdwqe, 0, sizeof(union lpfc_wqe));
3185 if (phba->sli_rev < LPFC_SLI_REV4) {
3186 rspwqe = &rspiocbq->wqe;
3187 memset(rspwqe, 0, sizeof(union lpfc_wqe));
3188 }
3189
3190 INIT_LIST_HEAD(&head);
3191 list_add_tail(&head, &txbuffer->dma.list);
3192 list_for_each_entry(curr, &head, list) {
3193 segment_len = ((struct lpfc_dmabufext *)curr)->size;
3194 if (current_offset == 0) {
3195 ctreq = curr->virt;
3196 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3197 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3198 ctreq->RevisionId.bits.InId = 0;
3199 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3200 ctreq->FsSubType = 0;
3201 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3202 ctreq->CommandResponse.bits.Size = size;
3203 segment_offset = ELX_LOOPBACK_HEADER_SZ;
3204 } else
3205 segment_offset = 0;
3206
3207 BUG_ON(segment_offset >= segment_len);
3208 memcpy(curr->virt + segment_offset,
3209 ptr + current_offset,
3210 segment_len - segment_offset);
3211
3212 current_offset += segment_len - segment_offset;
3213 BUG_ON(current_offset > size);
3214 }
3215 list_del(&head);
3216
3217 /* Build the XMIT_SEQUENCE iocb */
3218 num_bde = (uint32_t)txbuffer->flag;
3219
3220 cmdiocbq->num_bdes = num_bde;
3221 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
3222 cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
3223 cmdiocbq->vport = phba->pport;
3224 cmdiocbq->cmd_cmpl = NULL;
3225 cmdiocbq->bpl_dmabuf = txbmp;
3226
3227 if (phba->sli_rev < LPFC_SLI_REV4) {
3228 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
3229 num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
3230 CMD_XMIT_SEQUENCE64_CX);
3231
3232 } else {
3233 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
3234 phba->sli4_hba.rpi_ids[rpi], 0xffff,
3235 full_size, FC_RCTL_DD_UNSOL_CTL, 1,
3236 CMD_XMIT_SEQUENCE64_WQE);
3237 cmdiocbq->sli4_xritag = NO_XRI;
3238 }
3239
3240 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3241 rspiocbq, (phba->fc_ratov * 2) +
3242 LPFC_DRVR_TIMEOUT);
3243 if (iocb_stat != IOCB_SUCCESS ||
3244 (phba->sli_rev < LPFC_SLI_REV4 &&
3245 (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
3246 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3247 "3126 Failed loopback test issue iocb: "
3248 "iocb_stat:x%x\n", iocb_stat);
3249 rc = -EIO;
3250 goto err_loopback_test_exit;
3251 }
3252
3253 evt->waiting = 1;
3254 time_left = wait_event_interruptible_timeout(
3255 evt->wq, !list_empty(&evt->events_to_see),
3256 msecs_to_jiffies(1000 *
3257 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3258 evt->waiting = 0;
3259 if (list_empty(&evt->events_to_see)) {
3260 rc = (time_left) ? -EINTR : -ETIMEDOUT;
3261 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3262 "3125 Not receiving unsolicited event, "
3263 "rc:x%x\n", rc);
3264 } else {
3265 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3266 list_move(evt->events_to_see.prev, &evt->events_to_get);
3267 evdat = list_entry(evt->events_to_get.prev,
3268 typeof(*evdat), node);
3269 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3270 rx_databuf = evdat->data;
3271 if (evdat->len != full_size) {
3272 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3273 "1603 Loopback test did not receive expected "
3274 "data length. actual length 0x%x expected "
3275 "length 0x%x\n",
3276 evdat->len, full_size);
3277 rc = -EIO;
3278 } else if (rx_databuf == NULL)
3279 rc = -EIO;
3280 else {
3281 rc = IOCB_SUCCESS;
3282 /* skip over elx loopback header */
3283 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3284 bsg_reply->reply_payload_rcv_len =
3285 sg_copy_from_buffer(job->reply_payload.sg_list,
3286 job->reply_payload.sg_cnt,
3287 rx_databuf, size);
3288 bsg_reply->reply_payload_rcv_len = size;
3289 }
3290 }
3291
3292 err_loopback_test_exit:
3293 lpfcdiag_loop_self_unreg(phba, rpi);
3294
3295 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3296 lpfc_bsg_event_unref(evt); /* release ref */
3297 lpfc_bsg_event_unref(evt); /* delete */
3298 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3299
3300 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3301 lpfc_sli_release_iocbq(phba, cmdiocbq);
3302
3303 if (rspiocbq != NULL)
3304 lpfc_sli_release_iocbq(phba, rspiocbq);
3305
3306 if (txbmp != NULL) {
3307 if (txbpl != NULL) {
3308 if (txbuffer != NULL)
3309 diag_cmd_data_free(phba, txbuffer);
3310 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3311 }
3312 kfree(txbmp);
3313 }
3314
3315 loopback_test_exit:
3316 kfree(dataout);
3317 /* make error code available to userspace */
3318 bsg_reply->result = rc;
3319 job->dd_data = NULL;
3320 /* complete the job back to userspace if no error */
3321 if (rc == IOCB_SUCCESS)
3322 bsg_job_done(job, bsg_reply->result,
3323 bsg_reply->reply_payload_rcv_len);
3324 return rc;
3325 }
3326
3327 /**
3328 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3329 * @job: GET_DFC_REV fc_bsg_job
3330 **/
3331 static int
lpfc_bsg_get_dfc_rev(struct bsg_job * job)3332 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3333 {
3334 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3335 struct fc_bsg_reply *bsg_reply = job->reply;
3336 struct lpfc_hba *phba = vport->phba;
3337 struct get_mgmt_rev_reply *event_reply;
3338 int rc = 0;
3339
3340 if (job->request_len <
3341 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3343 "2740 Received GET_DFC_REV request below "
3344 "minimum size\n");
3345 rc = -EINVAL;
3346 goto job_error;
3347 }
3348
3349 event_reply = (struct get_mgmt_rev_reply *)
3350 bsg_reply->reply_data.vendor_reply.vendor_rsp;
3351
3352 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3353 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3354 "2741 Received GET_DFC_REV reply below "
3355 "minimum size\n");
3356 rc = -EINVAL;
3357 goto job_error;
3358 }
3359
3360 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3361 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3362 job_error:
3363 bsg_reply->result = rc;
3364 if (rc == 0)
3365 bsg_job_done(job, bsg_reply->result,
3366 bsg_reply->reply_payload_rcv_len);
3367 return rc;
3368 }
3369
3370 /**
3371 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3372 * @phba: Pointer to HBA context object.
3373 * @pmboxq: Pointer to mailbox command.
3374 *
3375 * This is completion handler function for mailbox commands issued from
3376 * lpfc_bsg_issue_mbox function. This function is called by the
3377 * mailbox event handler function with no lock held. This function
3378 * will wake up thread waiting on the wait queue pointed by dd_data
3379 * of the mailbox.
3380 **/
3381 static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3382 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3383 {
3384 struct bsg_job_data *dd_data;
3385 struct fc_bsg_reply *bsg_reply;
3386 struct bsg_job *job;
3387 uint32_t size;
3388 unsigned long flags;
3389 uint8_t *pmb, *pmb_buf;
3390
3391 dd_data = pmboxq->ctx_ndlp;
3392
3393 /*
3394 * The outgoing buffer is readily referred from the dma buffer,
3395 * just need to get header part from mailboxq structure.
3396 */
3397 pmb = (uint8_t *)&pmboxq->u.mb;
3398 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3399 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3400
3401 /* Determine if job has been aborted */
3402
3403 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3404 job = dd_data->set_job;
3405 if (job) {
3406 /* Prevent timeout handling from trying to abort job */
3407 job->dd_data = NULL;
3408 }
3409 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3410
3411 /* Copy the mailbox data to the job if it is still active */
3412
3413 if (job) {
3414 bsg_reply = job->reply;
3415 size = job->reply_payload.payload_len;
3416 bsg_reply->reply_payload_rcv_len =
3417 sg_copy_from_buffer(job->reply_payload.sg_list,
3418 job->reply_payload.sg_cnt,
3419 pmb_buf, size);
3420 }
3421
3422 dd_data->set_job = NULL;
3423 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3424 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3425 kfree(dd_data);
3426
3427 /* Complete the job if the job is still active */
3428
3429 if (job) {
3430 bsg_reply->result = 0;
3431 bsg_job_done(job, bsg_reply->result,
3432 bsg_reply->reply_payload_rcv_len);
3433 }
3434 return;
3435 }
3436
3437 /**
3438 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3439 * @phba: Pointer to HBA context object.
3440 * @mb: Pointer to a mailbox object.
3441 * @vport: Pointer to a vport object.
3442 *
3443 * Some commands require the port to be offline, some may not be called from
3444 * the application.
3445 **/
lpfc_bsg_check_cmd_access(struct lpfc_hba * phba,MAILBOX_t * mb,struct lpfc_vport * vport)3446 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3447 MAILBOX_t *mb, struct lpfc_vport *vport)
3448 {
3449 /* return negative error values for bsg job */
3450 switch (mb->mbxCommand) {
3451 /* Offline only */
3452 case MBX_INIT_LINK:
3453 case MBX_DOWN_LINK:
3454 case MBX_CONFIG_LINK:
3455 case MBX_CONFIG_RING:
3456 case MBX_RESET_RING:
3457 case MBX_UNREG_LOGIN:
3458 case MBX_CLEAR_LA:
3459 case MBX_DUMP_CONTEXT:
3460 case MBX_RUN_DIAGS:
3461 case MBX_RESTART:
3462 case MBX_SET_MASK:
3463 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3464 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3465 "2743 Command 0x%x is illegal in on-line "
3466 "state\n",
3467 mb->mbxCommand);
3468 return -EPERM;
3469 }
3470 break;
3471 case MBX_WRITE_NV:
3472 case MBX_WRITE_VPARMS:
3473 case MBX_LOAD_SM:
3474 case MBX_READ_NV:
3475 case MBX_READ_CONFIG:
3476 case MBX_READ_RCONFIG:
3477 case MBX_READ_STATUS:
3478 case MBX_READ_XRI:
3479 case MBX_READ_REV:
3480 case MBX_READ_LNK_STAT:
3481 case MBX_DUMP_MEMORY:
3482 case MBX_DOWN_LOAD:
3483 case MBX_UPDATE_CFG:
3484 case MBX_KILL_BOARD:
3485 case MBX_READ_TOPOLOGY:
3486 case MBX_LOAD_AREA:
3487 case MBX_LOAD_EXP_ROM:
3488 case MBX_BEACON:
3489 case MBX_DEL_LD_ENTRY:
3490 case MBX_SET_DEBUG:
3491 case MBX_WRITE_WWN:
3492 case MBX_SLI4_CONFIG:
3493 case MBX_READ_EVENT_LOG:
3494 case MBX_READ_EVENT_LOG_STATUS:
3495 case MBX_WRITE_EVENT_LOG:
3496 case MBX_PORT_CAPABILITIES:
3497 case MBX_PORT_IOV_CONTROL:
3498 case MBX_RUN_BIU_DIAG64:
3499 break;
3500 case MBX_SET_VARIABLE:
3501 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3502 "1226 mbox: set_variable 0x%x, 0x%x\n",
3503 mb->un.varWords[0],
3504 mb->un.varWords[1]);
3505 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3506 && (mb->un.varWords[1] == 1)) {
3507 phba->wait_4_mlo_maint_flg = 1;
3508 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3509 spin_lock_irq(&phba->hbalock);
3510 phba->link_flag &= ~LS_LOOPBACK_MODE;
3511 spin_unlock_irq(&phba->hbalock);
3512 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3513 }
3514 break;
3515 case MBX_READ_SPARM64:
3516 case MBX_REG_LOGIN:
3517 case MBX_REG_LOGIN64:
3518 case MBX_CONFIG_PORT:
3519 case MBX_RUN_BIU_DIAG:
3520 default:
3521 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3522 "2742 Unknown Command 0x%x\n",
3523 mb->mbxCommand);
3524 return -EPERM;
3525 }
3526
3527 return 0; /* ok */
3528 }
3529
3530 /**
3531 * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
3532 * @phba: Pointer to HBA context object.
3533 *
3534 * This is routine clean up and reset BSG handling of multi-buffer mbox
3535 * command session.
3536 **/
3537 static void
lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba * phba)3538 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3539 {
3540 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3541 return;
3542
3543 /* free all memory, including dma buffers */
3544 lpfc_bsg_dma_page_list_free(phba,
3545 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3546 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3547 /* multi-buffer write mailbox command pass-through complete */
3548 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3549 sizeof(struct lpfc_mbox_ext_buf_ctx));
3550 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3551
3552 return;
3553 }
3554
3555 /**
3556 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3557 * @phba: Pointer to HBA context object.
3558 * @pmboxq: Pointer to mailbox command.
3559 *
3560 * This is routine handles BSG job for mailbox commands completions with
3561 * multiple external buffers.
3562 **/
3563 static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3564 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3565 {
3566 struct bsg_job_data *dd_data;
3567 struct bsg_job *job;
3568 struct fc_bsg_reply *bsg_reply;
3569 uint8_t *pmb, *pmb_buf;
3570 unsigned long flags;
3571 uint32_t size;
3572 int rc = 0;
3573 struct lpfc_dmabuf *dmabuf;
3574 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3575 uint8_t *pmbx;
3576
3577 dd_data = pmboxq->ctx_buf;
3578
3579 /* Determine if job has been aborted */
3580 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3581 job = dd_data->set_job;
3582 if (job) {
3583 bsg_reply = job->reply;
3584 /* Prevent timeout handling from trying to abort job */
3585 job->dd_data = NULL;
3586 }
3587 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3588
3589 /*
3590 * The outgoing buffer is readily referred from the dma buffer,
3591 * just need to get header part from mailboxq structure.
3592 */
3593
3594 pmb = (uint8_t *)&pmboxq->u.mb;
3595 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3596 /* Copy the byte swapped response mailbox back to the user */
3597 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3598 /* if there is any non-embedded extended data copy that too */
3599 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3600 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3601 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3602 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3603 pmbx = (uint8_t *)dmabuf->virt;
3604 /* byte swap the extended data following the mailbox command */
3605 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3606 &pmbx[sizeof(MAILBOX_t)],
3607 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3608 }
3609
3610 /* Complete the job if the job is still active */
3611
3612 if (job) {
3613 size = job->reply_payload.payload_len;
3614 bsg_reply->reply_payload_rcv_len =
3615 sg_copy_from_buffer(job->reply_payload.sg_list,
3616 job->reply_payload.sg_cnt,
3617 pmb_buf, size);
3618
3619 /* result for successful */
3620 bsg_reply->result = 0;
3621
3622 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3623 "2937 SLI_CONFIG ext-buffer mailbox command "
3624 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3625 phba->mbox_ext_buf_ctx.nembType,
3626 phba->mbox_ext_buf_ctx.mboxType, size);
3627 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3628 phba->mbox_ext_buf_ctx.nembType,
3629 phba->mbox_ext_buf_ctx.mboxType,
3630 dma_ebuf, sta_pos_addr,
3631 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3632 } else {
3633 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3634 "2938 SLI_CONFIG ext-buffer mailbox "
3635 "command (x%x/x%x) failure, rc:x%x\n",
3636 phba->mbox_ext_buf_ctx.nembType,
3637 phba->mbox_ext_buf_ctx.mboxType, rc);
3638 }
3639
3640
3641 /* state change */
3642 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3643 kfree(dd_data);
3644 return job;
3645 }
3646
3647 /**
3648 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3649 * @phba: Pointer to HBA context object.
3650 * @pmboxq: Pointer to mailbox command.
3651 *
3652 * This is completion handler function for mailbox read commands with multiple
3653 * external buffers.
3654 **/
3655 static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3656 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3657 {
3658 struct bsg_job *job;
3659 struct fc_bsg_reply *bsg_reply;
3660
3661 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3662
3663 /* handle the BSG job with mailbox command */
3664 if (!job)
3665 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3666
3667 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3668 "2939 SLI_CONFIG ext-buffer rd mailbox command "
3669 "complete, ctxState:x%x, mbxStatus:x%x\n",
3670 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3671
3672 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3673 lpfc_bsg_mbox_ext_session_reset(phba);
3674
3675 /* free base driver mailbox structure memory */
3676 mempool_free(pmboxq, phba->mbox_mem_pool);
3677
3678 /* if the job is still active, call job done */
3679 if (job) {
3680 bsg_reply = job->reply;
3681 bsg_job_done(job, bsg_reply->result,
3682 bsg_reply->reply_payload_rcv_len);
3683 }
3684 return;
3685 }
3686
3687 /**
3688 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3689 * @phba: Pointer to HBA context object.
3690 * @pmboxq: Pointer to mailbox command.
3691 *
3692 * This is completion handler function for mailbox write commands with multiple
3693 * external buffers.
3694 **/
3695 static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3696 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3697 {
3698 struct bsg_job *job;
3699 struct fc_bsg_reply *bsg_reply;
3700
3701 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3702
3703 /* handle the BSG job with the mailbox command */
3704 if (!job)
3705 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3706
3707 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3708 "2940 SLI_CONFIG ext-buffer wr mailbox command "
3709 "complete, ctxState:x%x, mbxStatus:x%x\n",
3710 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3711
3712 /* free all memory, including dma buffers */
3713 mempool_free(pmboxq, phba->mbox_mem_pool);
3714 lpfc_bsg_mbox_ext_session_reset(phba);
3715
3716 /* if the job is still active, call job done */
3717 if (job) {
3718 bsg_reply = job->reply;
3719 bsg_job_done(job, bsg_reply->result,
3720 bsg_reply->reply_payload_rcv_len);
3721 }
3722
3723 return;
3724 }
3725
3726 static void
lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba * phba,enum nemb_type nemb_tp,uint32_t index,struct lpfc_dmabuf * mbx_dmabuf,struct lpfc_dmabuf * ext_dmabuf)3727 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3728 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3729 struct lpfc_dmabuf *ext_dmabuf)
3730 {
3731 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3732
3733 /* pointer to the start of mailbox command */
3734 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3735
3736 if (nemb_tp == nemb_mse) {
3737 if (index == 0) {
3738 sli_cfg_mbx->un.sli_config_emb0_subsys.
3739 mse[index].pa_hi =
3740 putPaddrHigh(mbx_dmabuf->phys +
3741 sizeof(MAILBOX_t));
3742 sli_cfg_mbx->un.sli_config_emb0_subsys.
3743 mse[index].pa_lo =
3744 putPaddrLow(mbx_dmabuf->phys +
3745 sizeof(MAILBOX_t));
3746 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3747 "2943 SLI_CONFIG(mse)[%d], "
3748 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3749 index,
3750 sli_cfg_mbx->un.sli_config_emb0_subsys.
3751 mse[index].buf_len,
3752 sli_cfg_mbx->un.sli_config_emb0_subsys.
3753 mse[index].pa_hi,
3754 sli_cfg_mbx->un.sli_config_emb0_subsys.
3755 mse[index].pa_lo);
3756 } else {
3757 sli_cfg_mbx->un.sli_config_emb0_subsys.
3758 mse[index].pa_hi =
3759 putPaddrHigh(ext_dmabuf->phys);
3760 sli_cfg_mbx->un.sli_config_emb0_subsys.
3761 mse[index].pa_lo =
3762 putPaddrLow(ext_dmabuf->phys);
3763 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3764 "2944 SLI_CONFIG(mse)[%d], "
3765 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3766 index,
3767 sli_cfg_mbx->un.sli_config_emb0_subsys.
3768 mse[index].buf_len,
3769 sli_cfg_mbx->un.sli_config_emb0_subsys.
3770 mse[index].pa_hi,
3771 sli_cfg_mbx->un.sli_config_emb0_subsys.
3772 mse[index].pa_lo);
3773 }
3774 } else {
3775 if (index == 0) {
3776 sli_cfg_mbx->un.sli_config_emb1_subsys.
3777 hbd[index].pa_hi =
3778 putPaddrHigh(mbx_dmabuf->phys +
3779 sizeof(MAILBOX_t));
3780 sli_cfg_mbx->un.sli_config_emb1_subsys.
3781 hbd[index].pa_lo =
3782 putPaddrLow(mbx_dmabuf->phys +
3783 sizeof(MAILBOX_t));
3784 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3785 "3007 SLI_CONFIG(hbd)[%d], "
3786 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3787 index,
3788 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3789 &sli_cfg_mbx->un.
3790 sli_config_emb1_subsys.hbd[index]),
3791 sli_cfg_mbx->un.sli_config_emb1_subsys.
3792 hbd[index].pa_hi,
3793 sli_cfg_mbx->un.sli_config_emb1_subsys.
3794 hbd[index].pa_lo);
3795
3796 } else {
3797 sli_cfg_mbx->un.sli_config_emb1_subsys.
3798 hbd[index].pa_hi =
3799 putPaddrHigh(ext_dmabuf->phys);
3800 sli_cfg_mbx->un.sli_config_emb1_subsys.
3801 hbd[index].pa_lo =
3802 putPaddrLow(ext_dmabuf->phys);
3803 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3804 "3008 SLI_CONFIG(hbd)[%d], "
3805 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3806 index,
3807 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3808 &sli_cfg_mbx->un.
3809 sli_config_emb1_subsys.hbd[index]),
3810 sli_cfg_mbx->un.sli_config_emb1_subsys.
3811 hbd[index].pa_hi,
3812 sli_cfg_mbx->un.sli_config_emb1_subsys.
3813 hbd[index].pa_lo);
3814 }
3815 }
3816 return;
3817 }
3818
3819 /**
3820 * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
3821 * @phba: Pointer to HBA context object.
3822 * @job: Pointer to the job object.
3823 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3824 * @dmabuf: Pointer to a DMA buffer descriptor.
3825 *
3826 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3827 * non-embedded external buffers.
3828 **/
3829 static int
lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba * phba,struct bsg_job * job,enum nemb_type nemb_tp,struct lpfc_dmabuf * dmabuf)3830 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3831 enum nemb_type nemb_tp,
3832 struct lpfc_dmabuf *dmabuf)
3833 {
3834 struct fc_bsg_request *bsg_request = job->request;
3835 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3836 struct dfc_mbox_req *mbox_req;
3837 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3838 uint32_t ext_buf_cnt, ext_buf_index;
3839 struct lpfc_dmabuf *ext_dmabuf = NULL;
3840 struct bsg_job_data *dd_data = NULL;
3841 LPFC_MBOXQ_t *pmboxq = NULL;
3842 MAILBOX_t *pmb;
3843 uint8_t *pmbx;
3844 int rc, i;
3845
3846 mbox_req =
3847 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3848
3849 /* pointer to the start of mailbox command */
3850 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3851
3852 if (nemb_tp == nemb_mse) {
3853 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3854 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3855 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3856 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3857 "2945 Handled SLI_CONFIG(mse) rd, "
3858 "ext_buf_cnt(%d) out of range(%d)\n",
3859 ext_buf_cnt,
3860 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3861 rc = -ERANGE;
3862 goto job_error;
3863 }
3864 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3865 "2941 Handled SLI_CONFIG(mse) rd, "
3866 "ext_buf_cnt:%d\n", ext_buf_cnt);
3867 } else {
3868 /* sanity check on interface type for support */
3869 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3870 LPFC_SLI_INTF_IF_TYPE_2) {
3871 rc = -ENODEV;
3872 goto job_error;
3873 }
3874 /* nemb_tp == nemb_hbd */
3875 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3876 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3877 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3878 "2946 Handled SLI_CONFIG(hbd) rd, "
3879 "ext_buf_cnt(%d) out of range(%d)\n",
3880 ext_buf_cnt,
3881 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3882 rc = -ERANGE;
3883 goto job_error;
3884 }
3885 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3886 "2942 Handled SLI_CONFIG(hbd) rd, "
3887 "ext_buf_cnt:%d\n", ext_buf_cnt);
3888 }
3889
3890 /* before dma descriptor setup */
3891 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3892 sta_pre_addr, dmabuf, ext_buf_cnt);
3893
3894 /* reject non-embedded mailbox command with none external buffer */
3895 if (ext_buf_cnt == 0) {
3896 rc = -EPERM;
3897 goto job_error;
3898 } else if (ext_buf_cnt > 1) {
3899 /* additional external read buffers */
3900 for (i = 1; i < ext_buf_cnt; i++) {
3901 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3902 if (!ext_dmabuf) {
3903 rc = -ENOMEM;
3904 goto job_error;
3905 }
3906 list_add_tail(&ext_dmabuf->list,
3907 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3908 }
3909 }
3910
3911 /* bsg tracking structure */
3912 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3913 if (!dd_data) {
3914 rc = -ENOMEM;
3915 goto job_error;
3916 }
3917
3918 /* mailbox command structure for base driver */
3919 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3920 if (!pmboxq) {
3921 rc = -ENOMEM;
3922 goto job_error;
3923 }
3924 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3925
3926 /* for the first external buffer */
3927 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3928
3929 /* for the rest of external buffer descriptors if any */
3930 if (ext_buf_cnt > 1) {
3931 ext_buf_index = 1;
3932 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3933 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3934 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3935 ext_buf_index, dmabuf,
3936 curr_dmabuf);
3937 ext_buf_index++;
3938 }
3939 }
3940
3941 /* after dma descriptor setup */
3942 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3943 sta_pos_addr, dmabuf, ext_buf_cnt);
3944
3945 /* construct base driver mbox command */
3946 pmb = &pmboxq->u.mb;
3947 pmbx = (uint8_t *)dmabuf->virt;
3948 memcpy(pmb, pmbx, sizeof(*pmb));
3949 pmb->mbxOwner = OWN_HOST;
3950 pmboxq->vport = phba->pport;
3951
3952 /* multi-buffer handling context */
3953 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3954 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3955 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3956 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3957 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3958 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3959
3960 /* callback for multi-buffer read mailbox command */
3961 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3962
3963 /* context fields to callback function */
3964 pmboxq->ctx_buf = dd_data;
3965 dd_data->type = TYPE_MBOX;
3966 dd_data->set_job = job;
3967 dd_data->context_un.mbox.pmboxq = pmboxq;
3968 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3969 job->dd_data = dd_data;
3970
3971 /* state change */
3972 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3973
3974 /*
3975 * Non-embedded mailbox subcommand data gets byte swapped here because
3976 * the lower level driver code only does the first 64 mailbox words.
3977 */
3978 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3979 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3980 (nemb_tp == nemb_mse))
3981 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3982 &pmbx[sizeof(MAILBOX_t)],
3983 sli_cfg_mbx->un.sli_config_emb0_subsys.
3984 mse[0].buf_len);
3985
3986 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3987 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3988 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3989 "2947 Issued SLI_CONFIG ext-buffer "
3990 "mailbox command, rc:x%x\n", rc);
3991 return SLI_CONFIG_HANDLED;
3992 }
3993 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3994 "2948 Failed to issue SLI_CONFIG ext-buffer "
3995 "mailbox command, rc:x%x\n", rc);
3996 rc = -EPIPE;
3997
3998 job_error:
3999 if (pmboxq)
4000 mempool_free(pmboxq, phba->mbox_mem_pool);
4001 lpfc_bsg_dma_page_list_free(phba,
4002 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4003 kfree(dd_data);
4004 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4005 return rc;
4006 }
4007
4008 /**
4009 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4010 * @phba: Pointer to HBA context object.
4011 * @job: Pointer to the job object.
4012 * @nemb_tp: Enumerate of non-embedded mailbox command type.
4013 * @dmabuf: Pointer to a DMA buffer descriptor.
4014 *
4015 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4016 * non-embedded external buffers.
4017 **/
4018 static int
lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba * phba,struct bsg_job * job,enum nemb_type nemb_tp,struct lpfc_dmabuf * dmabuf)4019 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4020 enum nemb_type nemb_tp,
4021 struct lpfc_dmabuf *dmabuf)
4022 {
4023 struct fc_bsg_request *bsg_request = job->request;
4024 struct fc_bsg_reply *bsg_reply = job->reply;
4025 struct dfc_mbox_req *mbox_req;
4026 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4027 uint32_t ext_buf_cnt;
4028 struct bsg_job_data *dd_data = NULL;
4029 LPFC_MBOXQ_t *pmboxq = NULL;
4030 MAILBOX_t *pmb;
4031 uint8_t *mbx;
4032 int rc = SLI_CONFIG_NOT_HANDLED, i;
4033
4034 mbox_req =
4035 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4036
4037 /* pointer to the start of mailbox command */
4038 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4039
4040 if (nemb_tp == nemb_mse) {
4041 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4042 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4043 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4044 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4045 "2953 Failed SLI_CONFIG(mse) wr, "
4046 "ext_buf_cnt(%d) out of range(%d)\n",
4047 ext_buf_cnt,
4048 LPFC_MBX_SLI_CONFIG_MAX_MSE);
4049 return -ERANGE;
4050 }
4051 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4052 "2949 Handled SLI_CONFIG(mse) wr, "
4053 "ext_buf_cnt:%d\n", ext_buf_cnt);
4054 } else {
4055 /* sanity check on interface type for support */
4056 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4057 LPFC_SLI_INTF_IF_TYPE_2)
4058 return -ENODEV;
4059 /* nemb_tp == nemb_hbd */
4060 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4061 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4062 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4063 "2954 Failed SLI_CONFIG(hbd) wr, "
4064 "ext_buf_cnt(%d) out of range(%d)\n",
4065 ext_buf_cnt,
4066 LPFC_MBX_SLI_CONFIG_MAX_HBD);
4067 return -ERANGE;
4068 }
4069 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4070 "2950 Handled SLI_CONFIG(hbd) wr, "
4071 "ext_buf_cnt:%d\n", ext_buf_cnt);
4072 }
4073
4074 /* before dma buffer descriptor setup */
4075 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4076 sta_pre_addr, dmabuf, ext_buf_cnt);
4077
4078 if (ext_buf_cnt == 0)
4079 return -EPERM;
4080
4081 /* for the first external buffer */
4082 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4083
4084 /* after dma descriptor setup */
4085 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4086 sta_pos_addr, dmabuf, ext_buf_cnt);
4087
4088 /* log for looking forward */
4089 for (i = 1; i < ext_buf_cnt; i++) {
4090 if (nemb_tp == nemb_mse)
4091 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4092 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4093 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4094 mse[i].buf_len);
4095 else
4096 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4097 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4098 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4099 &sli_cfg_mbx->un.sli_config_emb1_subsys.
4100 hbd[i]));
4101 }
4102
4103 /* multi-buffer handling context */
4104 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4105 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4106 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4107 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4108 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4109 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4110
4111 if (ext_buf_cnt == 1) {
4112 /* bsg tracking structure */
4113 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4114 if (!dd_data) {
4115 rc = -ENOMEM;
4116 goto job_error;
4117 }
4118
4119 /* mailbox command structure for base driver */
4120 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4121 if (!pmboxq) {
4122 rc = -ENOMEM;
4123 goto job_error;
4124 }
4125 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4126 pmb = &pmboxq->u.mb;
4127 mbx = (uint8_t *)dmabuf->virt;
4128 memcpy(pmb, mbx, sizeof(*pmb));
4129 pmb->mbxOwner = OWN_HOST;
4130 pmboxq->vport = phba->pport;
4131
4132 /* callback for multi-buffer read mailbox command */
4133 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4134
4135 /* context fields to callback function */
4136 pmboxq->ctx_buf = dd_data;
4137 dd_data->type = TYPE_MBOX;
4138 dd_data->set_job = job;
4139 dd_data->context_un.mbox.pmboxq = pmboxq;
4140 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4141 job->dd_data = dd_data;
4142
4143 /* state change */
4144
4145 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4146 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4147 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4148 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4149 "2955 Issued SLI_CONFIG ext-buffer "
4150 "mailbox command, rc:x%x\n", rc);
4151 return SLI_CONFIG_HANDLED;
4152 }
4153 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4154 "2956 Failed to issue SLI_CONFIG ext-buffer "
4155 "mailbox command, rc:x%x\n", rc);
4156 rc = -EPIPE;
4157 goto job_error;
4158 }
4159
4160 /* wait for additional external buffers */
4161
4162 bsg_reply->result = 0;
4163 bsg_job_done(job, bsg_reply->result,
4164 bsg_reply->reply_payload_rcv_len);
4165 return SLI_CONFIG_HANDLED;
4166
4167 job_error:
4168 if (pmboxq)
4169 mempool_free(pmboxq, phba->mbox_mem_pool);
4170 kfree(dd_data);
4171
4172 return rc;
4173 }
4174
4175 /**
4176 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4177 * @phba: Pointer to HBA context object.
4178 * @job: Pointer to the job object.
4179 * @dmabuf: Pointer to a DMA buffer descriptor.
4180 *
4181 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4182 * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
4183 * with embedded subsystem 0x1 and opcodes with external HBDs.
4184 **/
4185 static int
lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4186 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4187 struct lpfc_dmabuf *dmabuf)
4188 {
4189 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4190 uint32_t subsys;
4191 uint32_t opcode;
4192 int rc = SLI_CONFIG_NOT_HANDLED;
4193
4194 /* state change on new multi-buffer pass-through mailbox command */
4195 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4196
4197 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4198
4199 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4200 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4201 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4202 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4203 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4204 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4205 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4206 switch (opcode) {
4207 case FCOE_OPCODE_READ_FCF:
4208 case FCOE_OPCODE_GET_DPORT_RESULTS:
4209 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4210 "2957 Handled SLI_CONFIG "
4211 "subsys_fcoe, opcode:x%x\n",
4212 opcode);
4213 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4214 nemb_mse, dmabuf);
4215 break;
4216 case FCOE_OPCODE_ADD_FCF:
4217 case FCOE_OPCODE_SET_DPORT_MODE:
4218 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4219 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4220 "2958 Handled SLI_CONFIG "
4221 "subsys_fcoe, opcode:x%x\n",
4222 opcode);
4223 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4224 nemb_mse, dmabuf);
4225 break;
4226 default:
4227 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4228 "2959 Reject SLI_CONFIG "
4229 "subsys_fcoe, opcode:x%x\n",
4230 opcode);
4231 rc = -EPERM;
4232 break;
4233 }
4234 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4235 switch (opcode) {
4236 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4237 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4238 case COMN_OPCODE_GET_PROFILE_CONFIG:
4239 case COMN_OPCODE_SET_FEATURES:
4240 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4241 "3106 Handled SLI_CONFIG "
4242 "subsys_comn, opcode:x%x\n",
4243 opcode);
4244 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4245 nemb_mse, dmabuf);
4246 break;
4247 default:
4248 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4249 "3107 Reject SLI_CONFIG "
4250 "subsys_comn, opcode:x%x\n",
4251 opcode);
4252 rc = -EPERM;
4253 break;
4254 }
4255 } else {
4256 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4257 "2977 Reject SLI_CONFIG "
4258 "subsys:x%d, opcode:x%x\n",
4259 subsys, opcode);
4260 rc = -EPERM;
4261 }
4262 } else {
4263 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4264 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4265 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4266 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4267 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4268 switch (opcode) {
4269 case COMN_OPCODE_READ_OBJECT:
4270 case COMN_OPCODE_READ_OBJECT_LIST:
4271 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4272 "2960 Handled SLI_CONFIG "
4273 "subsys_comn, opcode:x%x\n",
4274 opcode);
4275 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4276 nemb_hbd, dmabuf);
4277 break;
4278 case COMN_OPCODE_WRITE_OBJECT:
4279 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4280 "2961 Handled SLI_CONFIG "
4281 "subsys_comn, opcode:x%x\n",
4282 opcode);
4283 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4284 nemb_hbd, dmabuf);
4285 break;
4286 default:
4287 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4288 "2962 Not handled SLI_CONFIG "
4289 "subsys_comn, opcode:x%x\n",
4290 opcode);
4291 rc = SLI_CONFIG_NOT_HANDLED;
4292 break;
4293 }
4294 } else {
4295 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4296 "2978 Not handled SLI_CONFIG "
4297 "subsys:x%d, opcode:x%x\n",
4298 subsys, opcode);
4299 rc = SLI_CONFIG_NOT_HANDLED;
4300 }
4301 }
4302
4303 /* state reset on not handled new multi-buffer mailbox command */
4304 if (rc != SLI_CONFIG_HANDLED)
4305 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4306
4307 return rc;
4308 }
4309
4310 /**
4311 * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
4312 * @phba: Pointer to HBA context object.
4313 *
4314 * This routine is for requesting to abort a pass-through mailbox command with
4315 * multiple external buffers due to error condition.
4316 **/
4317 static void
lpfc_bsg_mbox_ext_abort(struct lpfc_hba * phba)4318 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4319 {
4320 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4321 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4322 else
4323 lpfc_bsg_mbox_ext_session_reset(phba);
4324 return;
4325 }
4326
4327 /**
4328 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4329 * @phba: Pointer to HBA context object.
4330 * @job: Pointer to the job object.
4331 *
4332 * This routine extracts the next mailbox read external buffer back to
4333 * user space through BSG.
4334 **/
4335 static int
lpfc_bsg_read_ebuf_get(struct lpfc_hba * phba,struct bsg_job * job)4336 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4337 {
4338 struct fc_bsg_reply *bsg_reply = job->reply;
4339 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4340 struct lpfc_dmabuf *dmabuf;
4341 uint8_t *pbuf;
4342 uint32_t size;
4343 uint32_t index;
4344
4345 index = phba->mbox_ext_buf_ctx.seqNum;
4346 phba->mbox_ext_buf_ctx.seqNum++;
4347
4348 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4349 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4350
4351 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4352 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4353 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4354 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4355 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4356 "buffer[%d], size:%d\n", index, size);
4357 } else {
4358 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4359 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4360 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4361 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4362 "buffer[%d], size:%d\n", index, size);
4363 }
4364 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4365 return -EPIPE;
4366 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4367 struct lpfc_dmabuf, list);
4368 list_del_init(&dmabuf->list);
4369
4370 /* after dma buffer descriptor setup */
4371 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4372 mbox_rd, dma_ebuf, sta_pos_addr,
4373 dmabuf, index);
4374
4375 pbuf = (uint8_t *)dmabuf->virt;
4376 bsg_reply->reply_payload_rcv_len =
4377 sg_copy_from_buffer(job->reply_payload.sg_list,
4378 job->reply_payload.sg_cnt,
4379 pbuf, size);
4380
4381 lpfc_bsg_dma_page_free(phba, dmabuf);
4382
4383 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4384 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4385 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4386 "command session done\n");
4387 lpfc_bsg_mbox_ext_session_reset(phba);
4388 }
4389
4390 bsg_reply->result = 0;
4391 bsg_job_done(job, bsg_reply->result,
4392 bsg_reply->reply_payload_rcv_len);
4393
4394 return SLI_CONFIG_HANDLED;
4395 }
4396
4397 /**
4398 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4399 * @phba: Pointer to HBA context object.
4400 * @job: Pointer to the job object.
4401 * @dmabuf: Pointer to a DMA buffer descriptor.
4402 *
4403 * This routine sets up the next mailbox read external buffer obtained
4404 * from user space through BSG.
4405 **/
4406 static int
lpfc_bsg_write_ebuf_set(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4407 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4408 struct lpfc_dmabuf *dmabuf)
4409 {
4410 struct fc_bsg_reply *bsg_reply = job->reply;
4411 struct bsg_job_data *dd_data = NULL;
4412 LPFC_MBOXQ_t *pmboxq = NULL;
4413 MAILBOX_t *pmb;
4414 enum nemb_type nemb_tp;
4415 uint8_t *pbuf;
4416 uint32_t size;
4417 uint32_t index;
4418 int rc;
4419
4420 index = phba->mbox_ext_buf_ctx.seqNum;
4421 phba->mbox_ext_buf_ctx.seqNum++;
4422 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4423
4424 pbuf = (uint8_t *)dmabuf->virt;
4425 size = job->request_payload.payload_len;
4426 sg_copy_to_buffer(job->request_payload.sg_list,
4427 job->request_payload.sg_cnt,
4428 pbuf, size);
4429
4430 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4431 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4432 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4433 "buffer[%d], size:%d\n",
4434 phba->mbox_ext_buf_ctx.seqNum, size);
4435
4436 } else {
4437 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4438 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4439 "buffer[%d], size:%d\n",
4440 phba->mbox_ext_buf_ctx.seqNum, size);
4441
4442 }
4443
4444 /* set up external buffer descriptor and add to external buffer list */
4445 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4446 phba->mbox_ext_buf_ctx.mbx_dmabuf,
4447 dmabuf);
4448 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4449
4450 /* after write dma buffer */
4451 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4452 mbox_wr, dma_ebuf, sta_pos_addr,
4453 dmabuf, index);
4454
4455 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4456 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4457 "2968 SLI_CONFIG ext-buffer wr all %d "
4458 "ebuffers received\n",
4459 phba->mbox_ext_buf_ctx.numBuf);
4460
4461 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4462 if (!dd_data) {
4463 rc = -ENOMEM;
4464 goto job_error;
4465 }
4466
4467 /* mailbox command structure for base driver */
4468 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4469 if (!pmboxq) {
4470 rc = -ENOMEM;
4471 goto job_error;
4472 }
4473 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4474 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4475 pmb = &pmboxq->u.mb;
4476 memcpy(pmb, pbuf, sizeof(*pmb));
4477 pmb->mbxOwner = OWN_HOST;
4478 pmboxq->vport = phba->pport;
4479
4480 /* callback for multi-buffer write mailbox command */
4481 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4482
4483 /* context fields to callback function */
4484 pmboxq->ctx_buf = dd_data;
4485 dd_data->type = TYPE_MBOX;
4486 dd_data->set_job = job;
4487 dd_data->context_un.mbox.pmboxq = pmboxq;
4488 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4489 job->dd_data = dd_data;
4490
4491 /* state change */
4492 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4493
4494 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4495 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4496 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4497 "2969 Issued SLI_CONFIG ext-buffer "
4498 "mailbox command, rc:x%x\n", rc);
4499 return SLI_CONFIG_HANDLED;
4500 }
4501 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4502 "2970 Failed to issue SLI_CONFIG ext-buffer "
4503 "mailbox command, rc:x%x\n", rc);
4504 rc = -EPIPE;
4505 goto job_error;
4506 }
4507
4508 /* wait for additional external buffers */
4509 bsg_reply->result = 0;
4510 bsg_job_done(job, bsg_reply->result,
4511 bsg_reply->reply_payload_rcv_len);
4512 return SLI_CONFIG_HANDLED;
4513
4514 job_error:
4515 if (pmboxq)
4516 mempool_free(pmboxq, phba->mbox_mem_pool);
4517 lpfc_bsg_dma_page_free(phba, dmabuf);
4518 kfree(dd_data);
4519
4520 return rc;
4521 }
4522
4523 /**
4524 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4525 * @phba: Pointer to HBA context object.
4526 * @job: Pointer to the job object.
4527 * @dmabuf: Pointer to a DMA buffer descriptor.
4528 *
4529 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4530 * command with multiple non-embedded external buffers.
4531 **/
4532 static int
lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4533 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4534 struct lpfc_dmabuf *dmabuf)
4535 {
4536 int rc;
4537
4538 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4539 "2971 SLI_CONFIG buffer (type:x%x)\n",
4540 phba->mbox_ext_buf_ctx.mboxType);
4541
4542 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4543 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4544 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4545 "2972 SLI_CONFIG rd buffer state "
4546 "mismatch:x%x\n",
4547 phba->mbox_ext_buf_ctx.state);
4548 lpfc_bsg_mbox_ext_abort(phba);
4549 return -EPIPE;
4550 }
4551 rc = lpfc_bsg_read_ebuf_get(phba, job);
4552 if (rc == SLI_CONFIG_HANDLED)
4553 lpfc_bsg_dma_page_free(phba, dmabuf);
4554 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4555 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4556 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4557 "2973 SLI_CONFIG wr buffer state "
4558 "mismatch:x%x\n",
4559 phba->mbox_ext_buf_ctx.state);
4560 lpfc_bsg_mbox_ext_abort(phba);
4561 return -EPIPE;
4562 }
4563 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4564 }
4565 return rc;
4566 }
4567
4568 /**
4569 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4570 * @phba: Pointer to HBA context object.
4571 * @job: Pointer to the job object.
4572 * @dmabuf: Pointer to a DMA buffer descriptor.
4573 *
4574 * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
4575 * (0x9B) mailbox commands and external buffers.
4576 **/
4577 static int
lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4578 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4579 struct lpfc_dmabuf *dmabuf)
4580 {
4581 struct fc_bsg_request *bsg_request = job->request;
4582 struct dfc_mbox_req *mbox_req;
4583 int rc = SLI_CONFIG_NOT_HANDLED;
4584
4585 mbox_req =
4586 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4587
4588 /* mbox command with/without single external buffer */
4589 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4590 return rc;
4591
4592 /* mbox command and first external buffer */
4593 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4594 if (mbox_req->extSeqNum == 1) {
4595 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4596 "2974 SLI_CONFIG mailbox: tag:%d, "
4597 "seq:%d\n", mbox_req->extMboxTag,
4598 mbox_req->extSeqNum);
4599 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4600 return rc;
4601 } else
4602 goto sli_cfg_ext_error;
4603 }
4604
4605 /*
4606 * handle additional external buffers
4607 */
4608
4609 /* check broken pipe conditions */
4610 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4611 goto sli_cfg_ext_error;
4612 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4613 goto sli_cfg_ext_error;
4614 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4615 goto sli_cfg_ext_error;
4616
4617 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4618 "2975 SLI_CONFIG mailbox external buffer: "
4619 "extSta:x%x, tag:%d, seq:%d\n",
4620 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4621 mbox_req->extSeqNum);
4622 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4623 return rc;
4624
4625 sli_cfg_ext_error:
4626 /* all other cases, broken pipe */
4627 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4628 "2976 SLI_CONFIG mailbox broken pipe: "
4629 "ctxSta:x%x, ctxNumBuf:%d "
4630 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4631 phba->mbox_ext_buf_ctx.state,
4632 phba->mbox_ext_buf_ctx.numBuf,
4633 phba->mbox_ext_buf_ctx.mbxTag,
4634 phba->mbox_ext_buf_ctx.seqNum,
4635 mbox_req->extMboxTag, mbox_req->extSeqNum);
4636
4637 lpfc_bsg_mbox_ext_session_reset(phba);
4638
4639 return -EPIPE;
4640 }
4641
4642 /**
4643 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4644 * @phba: Pointer to HBA context object.
4645 * @job: Pointer to the job object.
4646 * @vport: Pointer to a vport object.
4647 *
4648 * Allocate a tracking object, mailbox command memory, get a mailbox
4649 * from the mailbox pool, copy the caller mailbox command.
4650 *
4651 * If offline and the sli is active we need to poll for the command (port is
4652 * being reset) and complete the job, otherwise issue the mailbox command and
4653 * let our completion handler finish the command.
4654 **/
4655 static int
lpfc_bsg_issue_mbox(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_vport * vport)4656 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4657 struct lpfc_vport *vport)
4658 {
4659 struct fc_bsg_request *bsg_request = job->request;
4660 struct fc_bsg_reply *bsg_reply = job->reply;
4661 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4662 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4663 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4664 uint8_t *pmbx = NULL;
4665 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4666 struct lpfc_dmabuf *dmabuf = NULL;
4667 struct dfc_mbox_req *mbox_req;
4668 struct READ_EVENT_LOG_VAR *rdEventLog;
4669 uint32_t transmit_length, receive_length, mode;
4670 struct lpfc_mbx_sli4_config *sli4_config;
4671 struct lpfc_mbx_nembed_cmd *nembed_sge;
4672 struct ulp_bde64 *bde;
4673 uint8_t *ext = NULL;
4674 int rc = 0;
4675 uint8_t *from;
4676 uint32_t size;
4677
4678 /* in case no data is transferred */
4679 bsg_reply->reply_payload_rcv_len = 0;
4680
4681 /* sanity check to protect driver */
4682 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4683 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4684 rc = -ERANGE;
4685 goto job_done;
4686 }
4687
4688 /*
4689 * Don't allow mailbox commands to be sent when blocked or when in
4690 * the middle of discovery
4691 */
4692 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4693 rc = -EAGAIN;
4694 goto job_done;
4695 }
4696
4697 mbox_req =
4698 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4699
4700 /* check if requested extended data lengths are valid */
4701 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4702 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4703 rc = -ERANGE;
4704 goto job_done;
4705 }
4706
4707 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4708 if (!dmabuf || !dmabuf->virt) {
4709 rc = -ENOMEM;
4710 goto job_done;
4711 }
4712
4713 /* Get the mailbox command or external buffer from BSG */
4714 pmbx = (uint8_t *)dmabuf->virt;
4715 size = job->request_payload.payload_len;
4716 sg_copy_to_buffer(job->request_payload.sg_list,
4717 job->request_payload.sg_cnt, pmbx, size);
4718
4719 /* Handle possible SLI_CONFIG with non-embedded payloads */
4720 if (phba->sli_rev == LPFC_SLI_REV4) {
4721 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4722 if (rc == SLI_CONFIG_HANDLED)
4723 goto job_cont;
4724 if (rc)
4725 goto job_done;
4726 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4727 }
4728
4729 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4730 if (rc != 0)
4731 goto job_done; /* must be negative */
4732
4733 /* allocate our bsg tracking structure */
4734 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4735 if (!dd_data) {
4736 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4737 "2727 Failed allocation of dd_data\n");
4738 rc = -ENOMEM;
4739 goto job_done;
4740 }
4741
4742 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4743 if (!pmboxq) {
4744 rc = -ENOMEM;
4745 goto job_done;
4746 }
4747 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4748
4749 pmb = &pmboxq->u.mb;
4750 memcpy(pmb, pmbx, sizeof(*pmb));
4751 pmb->mbxOwner = OWN_HOST;
4752 pmboxq->vport = vport;
4753
4754 /* If HBA encountered an error attention, allow only DUMP
4755 * or RESTART mailbox commands until the HBA is restarted.
4756 */
4757 if (phba->pport->stopped &&
4758 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4759 pmb->mbxCommand != MBX_RESTART &&
4760 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4761 pmb->mbxCommand != MBX_WRITE_WWN)
4762 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4763 "2797 mbox: Issued mailbox cmd "
4764 "0x%x while in stopped state.\n",
4765 pmb->mbxCommand);
4766
4767 /* extended mailbox commands will need an extended buffer */
4768 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4769 from = pmbx;
4770 ext = from + sizeof(MAILBOX_t);
4771 pmboxq->ctx_buf = ext;
4772 pmboxq->in_ext_byte_len =
4773 mbox_req->inExtWLen * sizeof(uint32_t);
4774 pmboxq->out_ext_byte_len =
4775 mbox_req->outExtWLen * sizeof(uint32_t);
4776 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4777 }
4778
4779 /* biu diag will need a kernel buffer to transfer the data
4780 * allocate our own buffer and setup the mailbox command to
4781 * use ours
4782 */
4783 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4784 transmit_length = pmb->un.varWords[1];
4785 receive_length = pmb->un.varWords[4];
4786 /* transmit length cannot be greater than receive length or
4787 * mailbox extension size
4788 */
4789 if ((transmit_length > receive_length) ||
4790 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4791 rc = -ERANGE;
4792 goto job_done;
4793 }
4794 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4795 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4796 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4797 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4798
4799 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4800 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4801 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4802 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4803 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4804 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4805 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4806 rdEventLog = &pmb->un.varRdEventLog;
4807 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4808 mode = bf_get(lpfc_event_log, rdEventLog);
4809
4810 /* receive length cannot be greater than mailbox
4811 * extension size
4812 */
4813 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4814 rc = -ERANGE;
4815 goto job_done;
4816 }
4817
4818 /* mode zero uses a bde like biu diags command */
4819 if (mode == 0) {
4820 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4821 + sizeof(MAILBOX_t));
4822 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4823 + sizeof(MAILBOX_t));
4824 }
4825 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4826 /* Let type 4 (well known data) through because the data is
4827 * returned in varwords[4-8]
4828 * otherwise check the recieve length and fetch the buffer addr
4829 */
4830 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4831 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4832 /* rebuild the command for sli4 using our own buffers
4833 * like we do for biu diags
4834 */
4835 receive_length = pmb->un.varWords[2];
4836 /* receive length cannot be greater than mailbox
4837 * extension size
4838 */
4839 if (receive_length == 0) {
4840 rc = -ERANGE;
4841 goto job_done;
4842 }
4843 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4844 + sizeof(MAILBOX_t));
4845 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4846 + sizeof(MAILBOX_t));
4847 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4848 pmb->un.varUpdateCfg.co) {
4849 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4850
4851 /* bde size cannot be greater than mailbox ext size */
4852 if (bde->tus.f.bdeSize >
4853 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4854 rc = -ERANGE;
4855 goto job_done;
4856 }
4857 bde->addrHigh = putPaddrHigh(dmabuf->phys
4858 + sizeof(MAILBOX_t));
4859 bde->addrLow = putPaddrLow(dmabuf->phys
4860 + sizeof(MAILBOX_t));
4861 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4862 /* Handling non-embedded SLI_CONFIG mailbox command */
4863 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4864 if (!bf_get(lpfc_mbox_hdr_emb,
4865 &sli4_config->header.cfg_mhdr)) {
4866 /* rebuild the command for sli4 using our
4867 * own buffers like we do for biu diags
4868 */
4869 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4870 &pmb->un.varWords[0];
4871 receive_length = nembed_sge->sge[0].length;
4872
4873 /* receive length cannot be greater than
4874 * mailbox extension size
4875 */
4876 if ((receive_length == 0) ||
4877 (receive_length >
4878 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4879 rc = -ERANGE;
4880 goto job_done;
4881 }
4882
4883 nembed_sge->sge[0].pa_hi =
4884 putPaddrHigh(dmabuf->phys
4885 + sizeof(MAILBOX_t));
4886 nembed_sge->sge[0].pa_lo =
4887 putPaddrLow(dmabuf->phys
4888 + sizeof(MAILBOX_t));
4889 }
4890 }
4891 }
4892
4893 dd_data->context_un.mbox.dmabuffers = dmabuf;
4894
4895 /* setup wake call as IOCB callback */
4896 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4897
4898 /* setup context field to pass wait_queue pointer to wake function */
4899 pmboxq->ctx_ndlp = dd_data;
4900 dd_data->type = TYPE_MBOX;
4901 dd_data->set_job = job;
4902 dd_data->context_un.mbox.pmboxq = pmboxq;
4903 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4904 dd_data->context_un.mbox.ext = ext;
4905 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4906 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4907 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4908 job->dd_data = dd_data;
4909
4910 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4911 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4912 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4913 if (rc != MBX_SUCCESS) {
4914 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4915 goto job_done;
4916 }
4917
4918 /* job finished, copy the data */
4919 memcpy(pmbx, pmb, sizeof(*pmb));
4920 bsg_reply->reply_payload_rcv_len =
4921 sg_copy_from_buffer(job->reply_payload.sg_list,
4922 job->reply_payload.sg_cnt,
4923 pmbx, size);
4924 /* not waiting mbox already done */
4925 rc = 0;
4926 goto job_done;
4927 }
4928
4929 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4930 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4931 return 1; /* job started */
4932
4933 job_done:
4934 /* common exit for error or job completed inline */
4935 if (pmboxq)
4936 mempool_free(pmboxq, phba->mbox_mem_pool);
4937 lpfc_bsg_dma_page_free(phba, dmabuf);
4938 kfree(dd_data);
4939
4940 job_cont:
4941 return rc;
4942 }
4943
4944 /**
4945 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4946 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4947 **/
4948 static int
lpfc_bsg_mbox_cmd(struct bsg_job * job)4949 lpfc_bsg_mbox_cmd(struct bsg_job *job)
4950 {
4951 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4952 struct fc_bsg_request *bsg_request = job->request;
4953 struct fc_bsg_reply *bsg_reply = job->reply;
4954 struct lpfc_hba *phba = vport->phba;
4955 struct dfc_mbox_req *mbox_req;
4956 int rc = 0;
4957
4958 /* mix-and-match backward compatibility */
4959 bsg_reply->reply_payload_rcv_len = 0;
4960 if (job->request_len <
4961 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4962 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4963 "2737 Mix-and-match backward compatibility "
4964 "between MBOX_REQ old size:%d and "
4965 "new request size:%d\n",
4966 (int)(job->request_len -
4967 sizeof(struct fc_bsg_request)),
4968 (int)sizeof(struct dfc_mbox_req));
4969 mbox_req = (struct dfc_mbox_req *)
4970 bsg_request->rqst_data.h_vendor.vendor_cmd;
4971 mbox_req->extMboxTag = 0;
4972 mbox_req->extSeqNum = 0;
4973 }
4974
4975 rc = lpfc_bsg_issue_mbox(phba, job, vport);
4976
4977 if (rc == 0) {
4978 /* job done */
4979 bsg_reply->result = 0;
4980 job->dd_data = NULL;
4981 bsg_job_done(job, bsg_reply->result,
4982 bsg_reply->reply_payload_rcv_len);
4983 } else if (rc == 1)
4984 /* job submitted, will complete later*/
4985 rc = 0; /* return zero, no error */
4986 else {
4987 /* some error occurred */
4988 bsg_reply->result = rc;
4989 job->dd_data = NULL;
4990 }
4991
4992 return rc;
4993 }
4994
4995 /**
4996 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4997 * @phba: Pointer to HBA context object.
4998 * @cmdiocbq: Pointer to command iocb.
4999 * @rspiocbq: Pointer to response iocb.
5000 *
5001 * This function is the completion handler for iocbs issued using
5002 * lpfc_menlo_cmd function. This function is called by the
5003 * ring event handler function without any lock held. This function
5004 * can be called from both worker thread context and interrupt
5005 * context. This function also can be called from another thread which
5006 * cleans up the SLI layer objects.
5007 * This function copies the contents of the response iocb to the
5008 * response iocb memory object provided by the caller of
5009 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5010 * sleeps for the iocb completion.
5011 **/
5012 static void
lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)5013 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5014 struct lpfc_iocbq *cmdiocbq,
5015 struct lpfc_iocbq *rspiocbq)
5016 {
5017 struct bsg_job_data *dd_data;
5018 struct bsg_job *job;
5019 struct fc_bsg_reply *bsg_reply;
5020 IOCB_t *rsp;
5021 struct lpfc_dmabuf *bmp, *cmp, *rmp;
5022 struct lpfc_bsg_menlo *menlo;
5023 unsigned long flags;
5024 struct menlo_response *menlo_resp;
5025 unsigned int rsp_size;
5026 int rc = 0;
5027
5028 dd_data = cmdiocbq->context_un.dd_data;
5029 cmp = cmdiocbq->cmd_dmabuf;
5030 bmp = cmdiocbq->bpl_dmabuf;
5031 menlo = &dd_data->context_un.menlo;
5032 rmp = menlo->rmp;
5033 rsp = &rspiocbq->iocb;
5034
5035 /* Determine if job has been aborted */
5036 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5037 job = dd_data->set_job;
5038 if (job) {
5039 bsg_reply = job->reply;
5040 /* Prevent timeout handling from trying to abort job */
5041 job->dd_data = NULL;
5042 }
5043 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5044
5045 /* Copy the job data or set the failing status for the job */
5046
5047 if (job) {
5048 /* always return the xri, this would be used in the case
5049 * of a menlo download to allow the data to be sent as a
5050 * continuation of the exchange.
5051 */
5052
5053 menlo_resp = (struct menlo_response *)
5054 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5055 menlo_resp->xri = rsp->ulpContext;
5056 if (rsp->ulpStatus) {
5057 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5058 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5059 case IOERR_SEQUENCE_TIMEOUT:
5060 rc = -ETIMEDOUT;
5061 break;
5062 case IOERR_INVALID_RPI:
5063 rc = -EFAULT;
5064 break;
5065 default:
5066 rc = -EACCES;
5067 break;
5068 }
5069 } else {
5070 rc = -EACCES;
5071 }
5072 } else {
5073 rsp_size = rsp->un.genreq64.bdl.bdeSize;
5074 bsg_reply->reply_payload_rcv_len =
5075 lpfc_bsg_copy_data(rmp, &job->reply_payload,
5076 rsp_size, 0);
5077 }
5078
5079 }
5080
5081 lpfc_sli_release_iocbq(phba, cmdiocbq);
5082 lpfc_free_bsg_buffers(phba, cmp);
5083 lpfc_free_bsg_buffers(phba, rmp);
5084 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5085 kfree(bmp);
5086 kfree(dd_data);
5087
5088 /* Complete the job if active */
5089
5090 if (job) {
5091 bsg_reply->result = rc;
5092 bsg_job_done(job, bsg_reply->result,
5093 bsg_reply->reply_payload_rcv_len);
5094 }
5095
5096 return;
5097 }
5098
5099 /**
5100 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5101 * @job: fc_bsg_job to handle
5102 *
5103 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5104 * all the command completions will return the xri for the command.
5105 * For menlo data requests a gen request 64 CX is used to continue the exchange
5106 * supplied in the menlo request header xri field.
5107 **/
5108 static int
lpfc_menlo_cmd(struct bsg_job * job)5109 lpfc_menlo_cmd(struct bsg_job *job)
5110 {
5111 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5112 struct fc_bsg_request *bsg_request = job->request;
5113 struct fc_bsg_reply *bsg_reply = job->reply;
5114 struct lpfc_hba *phba = vport->phba;
5115 struct lpfc_iocbq *cmdiocbq;
5116 IOCB_t *cmd;
5117 int rc = 0;
5118 struct menlo_command *menlo_cmd;
5119 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5120 int request_nseg;
5121 int reply_nseg;
5122 struct bsg_job_data *dd_data;
5123 struct ulp_bde64 *bpl = NULL;
5124
5125 /* in case no data is returned return just the return code */
5126 bsg_reply->reply_payload_rcv_len = 0;
5127
5128 if (job->request_len <
5129 sizeof(struct fc_bsg_request) +
5130 sizeof(struct menlo_command)) {
5131 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5132 "2784 Received MENLO_CMD request below "
5133 "minimum size\n");
5134 rc = -ERANGE;
5135 goto no_dd_data;
5136 }
5137
5138 if (job->reply_len < sizeof(*bsg_reply) +
5139 sizeof(struct menlo_response)) {
5140 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5141 "2785 Received MENLO_CMD reply below "
5142 "minimum size\n");
5143 rc = -ERANGE;
5144 goto no_dd_data;
5145 }
5146
5147 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5148 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5149 "2786 Adapter does not support menlo "
5150 "commands\n");
5151 rc = -EPERM;
5152 goto no_dd_data;
5153 }
5154
5155 menlo_cmd = (struct menlo_command *)
5156 bsg_request->rqst_data.h_vendor.vendor_cmd;
5157
5158 /* allocate our bsg tracking structure */
5159 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5160 if (!dd_data) {
5161 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5162 "2787 Failed allocation of dd_data\n");
5163 rc = -ENOMEM;
5164 goto no_dd_data;
5165 }
5166
5167 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5168 if (!bmp) {
5169 rc = -ENOMEM;
5170 goto free_dd;
5171 }
5172
5173 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5174 if (!bmp->virt) {
5175 rc = -ENOMEM;
5176 goto free_bmp;
5177 }
5178
5179 INIT_LIST_HEAD(&bmp->list);
5180
5181 bpl = (struct ulp_bde64 *)bmp->virt;
5182 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5183 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5184 1, bpl, &request_nseg);
5185 if (!cmp) {
5186 rc = -ENOMEM;
5187 goto free_bmp;
5188 }
5189 lpfc_bsg_copy_data(cmp, &job->request_payload,
5190 job->request_payload.payload_len, 1);
5191
5192 bpl += request_nseg;
5193 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5194 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5195 bpl, &reply_nseg);
5196 if (!rmp) {
5197 rc = -ENOMEM;
5198 goto free_cmp;
5199 }
5200
5201 cmdiocbq = lpfc_sli_get_iocbq(phba);
5202 if (!cmdiocbq) {
5203 rc = -ENOMEM;
5204 goto free_rmp;
5205 }
5206
5207 cmd = &cmdiocbq->iocb;
5208 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5209 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5210 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5211 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5212 cmd->un.genreq64.bdl.bdeSize =
5213 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5214 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5215 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5216 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5217 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5218 cmd->ulpBdeCount = 1;
5219 cmd->ulpClass = CLASS3;
5220 cmd->ulpOwner = OWN_CHIP;
5221 cmd->ulpLe = 1; /* Limited Edition */
5222 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
5223 cmdiocbq->vport = phba->pport;
5224 /* We want the firmware to timeout before we do */
5225 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5226 cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
5227 cmdiocbq->context_un.dd_data = dd_data;
5228 cmdiocbq->cmd_dmabuf = cmp;
5229 cmdiocbq->bpl_dmabuf = bmp;
5230 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5231 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5232 cmd->ulpPU = MENLO_PU; /* 3 */
5233 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5234 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5235 } else {
5236 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5237 cmd->ulpPU = 1;
5238 cmd->un.ulpWord[4] = 0;
5239 cmd->ulpContext = menlo_cmd->xri;
5240 }
5241
5242 dd_data->type = TYPE_MENLO;
5243 dd_data->set_job = job;
5244 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5245 dd_data->context_un.menlo.rmp = rmp;
5246 job->dd_data = dd_data;
5247
5248 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5249 MENLO_TIMEOUT - 5);
5250 if (rc == IOCB_SUCCESS)
5251 return 0; /* done for now */
5252
5253 lpfc_sli_release_iocbq(phba, cmdiocbq);
5254
5255 free_rmp:
5256 lpfc_free_bsg_buffers(phba, rmp);
5257 free_cmp:
5258 lpfc_free_bsg_buffers(phba, cmp);
5259 free_bmp:
5260 if (bmp->virt)
5261 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5262 kfree(bmp);
5263 free_dd:
5264 kfree(dd_data);
5265 no_dd_data:
5266 /* make error code available to userspace */
5267 bsg_reply->result = rc;
5268 job->dd_data = NULL;
5269 return rc;
5270 }
5271
5272 static int
lpfc_forced_link_speed(struct bsg_job * job)5273 lpfc_forced_link_speed(struct bsg_job *job)
5274 {
5275 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5276 struct lpfc_vport *vport = shost_priv(shost);
5277 struct lpfc_hba *phba = vport->phba;
5278 struct fc_bsg_reply *bsg_reply = job->reply;
5279 struct forced_link_speed_support_reply *forced_reply;
5280 int rc = 0;
5281
5282 if (job->request_len <
5283 sizeof(struct fc_bsg_request) +
5284 sizeof(struct get_forced_link_speed_support)) {
5285 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5286 "0048 Received FORCED_LINK_SPEED request "
5287 "below minimum size\n");
5288 rc = -EINVAL;
5289 goto job_error;
5290 }
5291
5292 forced_reply = (struct forced_link_speed_support_reply *)
5293 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5294
5295 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5296 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5297 "0049 Received FORCED_LINK_SPEED reply below "
5298 "minimum size\n");
5299 rc = -EINVAL;
5300 goto job_error;
5301 }
5302
5303 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5304 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5305 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5306 job_error:
5307 bsg_reply->result = rc;
5308 if (rc == 0)
5309 bsg_job_done(job, bsg_reply->result,
5310 bsg_reply->reply_payload_rcv_len);
5311 return rc;
5312 }
5313
5314 /**
5315 * lpfc_check_fwlog_support: Check FW log support on the adapter
5316 * @phba: Pointer to HBA context object.
5317 *
5318 * Check if FW Logging support by the adapter
5319 **/
5320 int
lpfc_check_fwlog_support(struct lpfc_hba * phba)5321 lpfc_check_fwlog_support(struct lpfc_hba *phba)
5322 {
5323 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5324
5325 ras_fwlog = &phba->ras_fwlog;
5326
5327 if (!ras_fwlog->ras_hwsupport)
5328 return -EACCES;
5329 else if (!ras_fwlog->ras_enabled)
5330 return -EPERM;
5331 else
5332 return 0;
5333 }
5334
5335 /**
5336 * lpfc_bsg_get_ras_config: Get RAS configuration settings
5337 * @job: fc_bsg_job to handle
5338 *
5339 * Get RAS configuration values set.
5340 **/
5341 static int
lpfc_bsg_get_ras_config(struct bsg_job * job)5342 lpfc_bsg_get_ras_config(struct bsg_job *job)
5343 {
5344 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5345 struct lpfc_vport *vport = shost_priv(shost);
5346 struct fc_bsg_reply *bsg_reply = job->reply;
5347 struct lpfc_hba *phba = vport->phba;
5348 struct lpfc_bsg_get_ras_config_reply *ras_reply;
5349 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5350 int rc = 0;
5351
5352 if (job->request_len <
5353 sizeof(struct fc_bsg_request) +
5354 sizeof(struct lpfc_bsg_ras_req)) {
5355 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5356 "6192 FW_LOG request received "
5357 "below minimum size\n");
5358 rc = -EINVAL;
5359 goto ras_job_error;
5360 }
5361
5362 /* Check FW log status */
5363 rc = lpfc_check_fwlog_support(phba);
5364 if (rc)
5365 goto ras_job_error;
5366
5367 ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5368 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5369
5370 /* Current logging state */
5371 spin_lock_irq(&phba->hbalock);
5372 if (ras_fwlog->state == ACTIVE)
5373 ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5374 else
5375 ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5376 spin_unlock_irq(&phba->hbalock);
5377
5378 ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5379 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5380
5381 ras_job_error:
5382 /* make error code available to userspace */
5383 bsg_reply->result = rc;
5384
5385 /* complete the job back to userspace */
5386 if (!rc)
5387 bsg_job_done(job, bsg_reply->result,
5388 bsg_reply->reply_payload_rcv_len);
5389 return rc;
5390 }
5391
5392 /**
5393 * lpfc_bsg_set_ras_config: Set FW logging parameters
5394 * @job: fc_bsg_job to handle
5395 *
5396 * Set log-level parameters for FW-logging in host memory
5397 **/
5398 static int
lpfc_bsg_set_ras_config(struct bsg_job * job)5399 lpfc_bsg_set_ras_config(struct bsg_job *job)
5400 {
5401 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5402 struct lpfc_vport *vport = shost_priv(shost);
5403 struct lpfc_hba *phba = vport->phba;
5404 struct lpfc_bsg_set_ras_config_req *ras_req;
5405 struct fc_bsg_request *bsg_request = job->request;
5406 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5407 struct fc_bsg_reply *bsg_reply = job->reply;
5408 uint8_t action = 0, log_level = 0;
5409 int rc = 0, action_status = 0;
5410
5411 if (job->request_len <
5412 sizeof(struct fc_bsg_request) +
5413 sizeof(struct lpfc_bsg_set_ras_config_req)) {
5414 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5415 "6182 Received RAS_LOG request "
5416 "below minimum size\n");
5417 rc = -EINVAL;
5418 goto ras_job_error;
5419 }
5420
5421 /* Check FW log status */
5422 rc = lpfc_check_fwlog_support(phba);
5423 if (rc)
5424 goto ras_job_error;
5425
5426 ras_req = (struct lpfc_bsg_set_ras_config_req *)
5427 bsg_request->rqst_data.h_vendor.vendor_cmd;
5428 action = ras_req->action;
5429 log_level = ras_req->log_level;
5430
5431 if (action == LPFC_RASACTION_STOP_LOGGING) {
5432 /* Check if already disabled */
5433 spin_lock_irq(&phba->hbalock);
5434 if (ras_fwlog->state != ACTIVE) {
5435 spin_unlock_irq(&phba->hbalock);
5436 rc = -ESRCH;
5437 goto ras_job_error;
5438 }
5439 spin_unlock_irq(&phba->hbalock);
5440
5441 /* Disable logging */
5442 lpfc_ras_stop_fwlog(phba);
5443 } else {
5444 /*action = LPFC_RASACTION_START_LOGGING*/
5445
5446 /* Even though FW-logging is active re-initialize
5447 * FW-logging with new log-level. Return status
5448 * "Logging already Running" to caller.
5449 **/
5450 spin_lock_irq(&phba->hbalock);
5451 if (ras_fwlog->state != INACTIVE)
5452 action_status = -EINPROGRESS;
5453 spin_unlock_irq(&phba->hbalock);
5454
5455 /* Enable logging */
5456 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5457 LPFC_RAS_ENABLE_LOGGING);
5458 if (rc) {
5459 rc = -EINVAL;
5460 goto ras_job_error;
5461 }
5462
5463 /* Check if FW-logging is re-initialized */
5464 if (action_status == -EINPROGRESS)
5465 rc = action_status;
5466 }
5467 ras_job_error:
5468 /* make error code available to userspace */
5469 bsg_reply->result = rc;
5470
5471 /* complete the job back to userspace */
5472 if (!rc)
5473 bsg_job_done(job, bsg_reply->result,
5474 bsg_reply->reply_payload_rcv_len);
5475
5476 return rc;
5477 }
5478
5479 /**
5480 * lpfc_bsg_get_ras_lwpd: Get log write position data
5481 * @job: fc_bsg_job to handle
5482 *
5483 * Get Offset/Wrap count of the log message written
5484 * in host memory
5485 **/
5486 static int
lpfc_bsg_get_ras_lwpd(struct bsg_job * job)5487 lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5488 {
5489 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5490 struct lpfc_vport *vport = shost_priv(shost);
5491 struct lpfc_bsg_get_ras_lwpd *ras_reply;
5492 struct lpfc_hba *phba = vport->phba;
5493 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5494 struct fc_bsg_reply *bsg_reply = job->reply;
5495 u32 *lwpd_ptr = NULL;
5496 int rc = 0;
5497
5498 rc = lpfc_check_fwlog_support(phba);
5499 if (rc)
5500 goto ras_job_error;
5501
5502 if (job->request_len <
5503 sizeof(struct fc_bsg_request) +
5504 sizeof(struct lpfc_bsg_ras_req)) {
5505 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5506 "6183 Received RAS_LOG request "
5507 "below minimum size\n");
5508 rc = -EINVAL;
5509 goto ras_job_error;
5510 }
5511
5512 ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5513 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5514
5515 if (!ras_fwlog->lwpd.virt) {
5516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5517 "6193 Restart FW Logging\n");
5518 rc = -EINVAL;
5519 goto ras_job_error;
5520 }
5521
5522 /* Get lwpd offset */
5523 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5524 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5525
5526 /* Get wrap count */
5527 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5528
5529 ras_job_error:
5530 /* make error code available to userspace */
5531 bsg_reply->result = rc;
5532
5533 /* complete the job back to userspace */
5534 if (!rc)
5535 bsg_job_done(job, bsg_reply->result,
5536 bsg_reply->reply_payload_rcv_len);
5537
5538 return rc;
5539 }
5540
5541 /**
5542 * lpfc_bsg_get_ras_fwlog: Read FW log
5543 * @job: fc_bsg_job to handle
5544 *
5545 * Copy the FW log into the passed buffer.
5546 **/
5547 static int
lpfc_bsg_get_ras_fwlog(struct bsg_job * job)5548 lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5549 {
5550 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5551 struct lpfc_vport *vport = shost_priv(shost);
5552 struct lpfc_hba *phba = vport->phba;
5553 struct fc_bsg_request *bsg_request = job->request;
5554 struct fc_bsg_reply *bsg_reply = job->reply;
5555 struct lpfc_bsg_get_fwlog_req *ras_req;
5556 u32 rd_offset, rd_index, offset;
5557 void *src, *fwlog_buff;
5558 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5559 struct lpfc_dmabuf *dmabuf, *next;
5560 int rc = 0;
5561
5562 ras_fwlog = &phba->ras_fwlog;
5563
5564 rc = lpfc_check_fwlog_support(phba);
5565 if (rc)
5566 goto ras_job_error;
5567
5568 /* Logging to be stopped before reading */
5569 spin_lock_irq(&phba->hbalock);
5570 if (ras_fwlog->state == ACTIVE) {
5571 spin_unlock_irq(&phba->hbalock);
5572 rc = -EINPROGRESS;
5573 goto ras_job_error;
5574 }
5575 spin_unlock_irq(&phba->hbalock);
5576
5577 if (job->request_len <
5578 sizeof(struct fc_bsg_request) +
5579 sizeof(struct lpfc_bsg_get_fwlog_req)) {
5580 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5581 "6184 Received RAS_LOG request "
5582 "below minimum size\n");
5583 rc = -EINVAL;
5584 goto ras_job_error;
5585 }
5586
5587 ras_req = (struct lpfc_bsg_get_fwlog_req *)
5588 bsg_request->rqst_data.h_vendor.vendor_cmd;
5589 rd_offset = ras_req->read_offset;
5590
5591 /* Allocate memory to read fw log*/
5592 fwlog_buff = vmalloc(ras_req->read_size);
5593 if (!fwlog_buff) {
5594 rc = -ENOMEM;
5595 goto ras_job_error;
5596 }
5597
5598 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5599 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5600
5601 list_for_each_entry_safe(dmabuf, next,
5602 &ras_fwlog->fwlog_buff_list, list) {
5603
5604 if (dmabuf->buffer_tag < rd_index)
5605 continue;
5606
5607 src = dmabuf->virt + offset;
5608 memcpy(fwlog_buff, src, ras_req->read_size);
5609 break;
5610 }
5611
5612 bsg_reply->reply_payload_rcv_len =
5613 sg_copy_from_buffer(job->reply_payload.sg_list,
5614 job->reply_payload.sg_cnt,
5615 fwlog_buff, ras_req->read_size);
5616
5617 vfree(fwlog_buff);
5618
5619 ras_job_error:
5620 bsg_reply->result = rc;
5621 if (!rc)
5622 bsg_job_done(job, bsg_reply->result,
5623 bsg_reply->reply_payload_rcv_len);
5624
5625 return rc;
5626 }
5627
5628 static int
lpfc_get_trunk_info(struct bsg_job * job)5629 lpfc_get_trunk_info(struct bsg_job *job)
5630 {
5631 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5632 struct lpfc_hba *phba = vport->phba;
5633 struct fc_bsg_reply *bsg_reply = job->reply;
5634 struct lpfc_trunk_info *event_reply;
5635 int rc = 0;
5636
5637 if (job->request_len <
5638 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5639 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5640 "2744 Received GET TRUNK _INFO request below "
5641 "minimum size\n");
5642 rc = -EINVAL;
5643 goto job_error;
5644 }
5645
5646 event_reply = (struct lpfc_trunk_info *)
5647 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5648
5649 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5650 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5651 "2728 Received GET TRUNK _INFO reply below "
5652 "minimum size\n");
5653 rc = -EINVAL;
5654 goto job_error;
5655 }
5656 if (event_reply == NULL) {
5657 rc = -EINVAL;
5658 goto job_error;
5659 }
5660
5661 bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5662 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5663
5664 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5665 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5666
5667 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5668 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5669
5670 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5671 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5672
5673 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5674 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5675
5676 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5677 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5678
5679 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5680 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5681
5682 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5683 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5684
5685 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5686 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5687
5688 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5689 event_reply->logical_speed =
5690 phba->sli4_hba.link_state.logical_speed / 1000;
5691 job_error:
5692 bsg_reply->result = rc;
5693 if (!rc)
5694 bsg_job_done(job, bsg_reply->result,
5695 bsg_reply->reply_payload_rcv_len);
5696 return rc;
5697
5698 }
5699
5700 static int
lpfc_get_cgnbuf_info(struct bsg_job * job)5701 lpfc_get_cgnbuf_info(struct bsg_job *job)
5702 {
5703 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5704 struct lpfc_hba *phba = vport->phba;
5705 struct fc_bsg_request *bsg_request = job->request;
5706 struct fc_bsg_reply *bsg_reply = job->reply;
5707 struct get_cgnbuf_info_req *cgnbuf_req;
5708 struct lpfc_cgn_info *cp;
5709 uint8_t *cgn_buff;
5710 int size, cinfosz;
5711 int rc = 0;
5712
5713 if (job->request_len < sizeof(struct fc_bsg_request) +
5714 sizeof(struct get_cgnbuf_info_req)) {
5715 rc = -ENOMEM;
5716 goto job_exit;
5717 }
5718
5719 if (!phba->sli4_hba.pc_sli4_params.cmf) {
5720 rc = -ENOENT;
5721 goto job_exit;
5722 }
5723
5724 if (!phba->cgn_i || !phba->cgn_i->virt) {
5725 rc = -ENOENT;
5726 goto job_exit;
5727 }
5728
5729 cp = phba->cgn_i->virt;
5730 if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
5731 rc = -EPERM;
5732 goto job_exit;
5733 }
5734
5735 cgnbuf_req = (struct get_cgnbuf_info_req *)
5736 bsg_request->rqst_data.h_vendor.vendor_cmd;
5737
5738 /* For reset or size == 0 */
5739 bsg_reply->reply_payload_rcv_len = 0;
5740
5741 if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
5742 lpfc_init_congestion_stat(phba);
5743 goto job_exit;
5744 }
5745
5746 /* We don't want to include the CRC at the end */
5747 cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
5748
5749 size = cgnbuf_req->read_size;
5750 if (!size)
5751 goto job_exit;
5752
5753 if (size < cinfosz) {
5754 /* Just copy back what we can */
5755 cinfosz = size;
5756 rc = -E2BIG;
5757 }
5758
5759 /* Allocate memory to read congestion info */
5760 cgn_buff = vmalloc(cinfosz);
5761 if (!cgn_buff) {
5762 rc = -ENOMEM;
5763 goto job_exit;
5764 }
5765
5766 memcpy(cgn_buff, cp, cinfosz);
5767
5768 bsg_reply->reply_payload_rcv_len =
5769 sg_copy_from_buffer(job->reply_payload.sg_list,
5770 job->reply_payload.sg_cnt,
5771 cgn_buff, cinfosz);
5772
5773 vfree(cgn_buff);
5774
5775 job_exit:
5776 bsg_reply->result = rc;
5777 if (!rc)
5778 bsg_job_done(job, bsg_reply->result,
5779 bsg_reply->reply_payload_rcv_len);
5780 else
5781 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5782 "2724 GET CGNBUF error: %d\n", rc);
5783 return rc;
5784 }
5785
5786 /**
5787 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5788 * @job: fc_bsg_job to handle
5789 **/
5790 static int
lpfc_bsg_hst_vendor(struct bsg_job * job)5791 lpfc_bsg_hst_vendor(struct bsg_job *job)
5792 {
5793 struct fc_bsg_request *bsg_request = job->request;
5794 struct fc_bsg_reply *bsg_reply = job->reply;
5795 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5796 int rc;
5797
5798 switch (command) {
5799 case LPFC_BSG_VENDOR_SET_CT_EVENT:
5800 rc = lpfc_bsg_hba_set_event(job);
5801 break;
5802 case LPFC_BSG_VENDOR_GET_CT_EVENT:
5803 rc = lpfc_bsg_hba_get_event(job);
5804 break;
5805 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5806 rc = lpfc_bsg_send_mgmt_rsp(job);
5807 break;
5808 case LPFC_BSG_VENDOR_DIAG_MODE:
5809 rc = lpfc_bsg_diag_loopback_mode(job);
5810 break;
5811 case LPFC_BSG_VENDOR_DIAG_MODE_END:
5812 rc = lpfc_sli4_bsg_diag_mode_end(job);
5813 break;
5814 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5815 rc = lpfc_bsg_diag_loopback_run(job);
5816 break;
5817 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5818 rc = lpfc_sli4_bsg_link_diag_test(job);
5819 break;
5820 case LPFC_BSG_VENDOR_GET_MGMT_REV:
5821 rc = lpfc_bsg_get_dfc_rev(job);
5822 break;
5823 case LPFC_BSG_VENDOR_MBOX:
5824 rc = lpfc_bsg_mbox_cmd(job);
5825 break;
5826 case LPFC_BSG_VENDOR_MENLO_CMD:
5827 case LPFC_BSG_VENDOR_MENLO_DATA:
5828 rc = lpfc_menlo_cmd(job);
5829 break;
5830 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5831 rc = lpfc_forced_link_speed(job);
5832 break;
5833 case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5834 rc = lpfc_bsg_get_ras_lwpd(job);
5835 break;
5836 case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5837 rc = lpfc_bsg_get_ras_fwlog(job);
5838 break;
5839 case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5840 rc = lpfc_bsg_get_ras_config(job);
5841 break;
5842 case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5843 rc = lpfc_bsg_set_ras_config(job);
5844 break;
5845 case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5846 rc = lpfc_get_trunk_info(job);
5847 break;
5848 case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
5849 rc = lpfc_get_cgnbuf_info(job);
5850 break;
5851 default:
5852 rc = -EINVAL;
5853 bsg_reply->reply_payload_rcv_len = 0;
5854 /* make error code available to userspace */
5855 bsg_reply->result = rc;
5856 break;
5857 }
5858
5859 return rc;
5860 }
5861
5862 /**
5863 * lpfc_bsg_request - handle a bsg request from the FC transport
5864 * @job: bsg_job to handle
5865 **/
5866 int
lpfc_bsg_request(struct bsg_job * job)5867 lpfc_bsg_request(struct bsg_job *job)
5868 {
5869 struct fc_bsg_request *bsg_request = job->request;
5870 struct fc_bsg_reply *bsg_reply = job->reply;
5871 uint32_t msgcode;
5872 int rc;
5873
5874 msgcode = bsg_request->msgcode;
5875 switch (msgcode) {
5876 case FC_BSG_HST_VENDOR:
5877 rc = lpfc_bsg_hst_vendor(job);
5878 break;
5879 case FC_BSG_RPT_ELS:
5880 rc = lpfc_bsg_rport_els(job);
5881 break;
5882 case FC_BSG_RPT_CT:
5883 rc = lpfc_bsg_send_mgmt_cmd(job);
5884 break;
5885 default:
5886 rc = -EINVAL;
5887 bsg_reply->reply_payload_rcv_len = 0;
5888 /* make error code available to userspace */
5889 bsg_reply->result = rc;
5890 break;
5891 }
5892
5893 return rc;
5894 }
5895
5896 /**
5897 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5898 * @job: bsg_job that has timed out
5899 *
5900 * This function just aborts the job's IOCB. The aborted IOCB will return to
5901 * the waiting function which will handle passing the error back to userspace
5902 **/
5903 int
lpfc_bsg_timeout(struct bsg_job * job)5904 lpfc_bsg_timeout(struct bsg_job *job)
5905 {
5906 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5907 struct lpfc_hba *phba = vport->phba;
5908 struct lpfc_iocbq *cmdiocb;
5909 struct lpfc_sli_ring *pring;
5910 struct bsg_job_data *dd_data;
5911 unsigned long flags;
5912 int rc = 0;
5913 LIST_HEAD(completions);
5914 struct lpfc_iocbq *check_iocb, *next_iocb;
5915
5916 pring = lpfc_phba_elsring(phba);
5917 if (unlikely(!pring))
5918 return -EIO;
5919
5920 /* if job's driver data is NULL, the command completed or is in the
5921 * the process of completing. In this case, return status to request
5922 * so the timeout is retried. This avoids double completion issues
5923 * and the request will be pulled off the timer queue when the
5924 * command's completion handler executes. Otherwise, prevent the
5925 * command's completion handler from executing the job done callback
5926 * and continue processing to abort the outstanding the command.
5927 */
5928
5929 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5930 dd_data = (struct bsg_job_data *)job->dd_data;
5931 if (dd_data) {
5932 dd_data->set_job = NULL;
5933 job->dd_data = NULL;
5934 } else {
5935 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5936 return -EAGAIN;
5937 }
5938
5939 switch (dd_data->type) {
5940 case TYPE_IOCB:
5941 /* Check to see if IOCB was issued to the port or not. If not,
5942 * remove it from the txq queue and call cancel iocbs.
5943 * Otherwise, call abort iotag
5944 */
5945 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5946 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5947
5948 spin_lock_irqsave(&phba->hbalock, flags);
5949 /* make sure the I/O abort window is still open */
5950 if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
5951 spin_unlock_irqrestore(&phba->hbalock, flags);
5952 return -EAGAIN;
5953 }
5954 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5955 list) {
5956 if (check_iocb == cmdiocb) {
5957 list_move_tail(&check_iocb->list, &completions);
5958 break;
5959 }
5960 }
5961 if (list_empty(&completions))
5962 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5963 spin_unlock_irqrestore(&phba->hbalock, flags);
5964 if (!list_empty(&completions)) {
5965 lpfc_sli_cancel_iocbs(phba, &completions,
5966 IOSTAT_LOCAL_REJECT,
5967 IOERR_SLI_ABORTED);
5968 }
5969 break;
5970
5971 case TYPE_EVT:
5972 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5973 break;
5974
5975 case TYPE_MBOX:
5976 /* Update the ext buf ctx state if needed */
5977
5978 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5979 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5980 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5981 break;
5982 case TYPE_MENLO:
5983 /* Check to see if IOCB was issued to the port or not. If not,
5984 * remove it from the txq queue and call cancel iocbs.
5985 * Otherwise, call abort iotag.
5986 */
5987 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5988 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5989
5990 spin_lock_irqsave(&phba->hbalock, flags);
5991 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5992 list) {
5993 if (check_iocb == cmdiocb) {
5994 list_move_tail(&check_iocb->list, &completions);
5995 break;
5996 }
5997 }
5998 if (list_empty(&completions))
5999 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
6000 spin_unlock_irqrestore(&phba->hbalock, flags);
6001 if (!list_empty(&completions)) {
6002 lpfc_sli_cancel_iocbs(phba, &completions,
6003 IOSTAT_LOCAL_REJECT,
6004 IOERR_SLI_ABORTED);
6005 }
6006 break;
6007 default:
6008 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6009 break;
6010 }
6011
6012 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
6013 * otherwise an error message will be displayed on the console
6014 * so always return success (zero)
6015 */
6016 return rc;
6017 }
6018