1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <uapi/scsi/fc/fc_fs.h>
35 #include <uapi/scsi/fc/fc_els.h>
36
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_crtn.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_debugfs.h"
49
50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
51 struct lpfc_iocbq *);
52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
53 struct lpfc_iocbq *);
54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
56 struct lpfc_nodelist *ndlp, uint8_t retry);
57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
58 struct lpfc_iocbq *iocb);
59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
60 struct lpfc_iocbq *cmdiocb,
61 struct lpfc_iocbq *rspiocb);
62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
64
65 static int lpfc_max_els_tries = 3;
66
67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
70
71 /**
72 * lpfc_els_chk_latt - Check host link attention event for a vport
73 * @vport: pointer to a host virtual N_Port data structure.
74 *
75 * This routine checks whether there is an outstanding host link
76 * attention event during the discovery process with the @vport. It is done
77 * by reading the HBA's Host Attention (HA) register. If there is any host
78 * link attention events during this @vport's discovery process, the @vport
79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
80 * be issued if the link state is not already in host link cleared state,
81 * and a return code shall indicate whether the host link attention event
82 * had happened.
83 *
84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
85 * state in LPFC_VPORT_READY, the request for checking host link attention
86 * event will be ignored and a return code shall indicate no host link
87 * attention event had happened.
88 *
89 * Return codes
90 * 0 - no host link attention event happened
91 * 1 - host link attention event happened
92 **/
93 int
lpfc_els_chk_latt(struct lpfc_vport * vport)94 lpfc_els_chk_latt(struct lpfc_vport *vport)
95 {
96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
97 struct lpfc_hba *phba = vport->phba;
98 uint32_t ha_copy;
99
100 if (vport->port_state >= LPFC_VPORT_READY ||
101 phba->link_state == LPFC_LINK_DOWN ||
102 phba->sli_rev > LPFC_SLI_REV3)
103 return 0;
104
105 /* Read the HBA Host Attention Register */
106 if (lpfc_readl(phba->HAregaddr, &ha_copy))
107 return 1;
108
109 if (!(ha_copy & HA_LATT))
110 return 0;
111
112 /* Pending Link Event during Discovery */
113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
114 "0237 Pending Link Event during "
115 "Discovery: State x%x\n",
116 phba->pport->port_state);
117
118 /* CLEAR_LA should re-enable link attention events and
119 * we should then immediately take a LATT event. The
120 * LATT processing should call lpfc_linkdown() which
121 * will cleanup any left over in-progress discovery
122 * events.
123 */
124 spin_lock_irq(shost->host_lock);
125 vport->fc_flag |= FC_ABORT_DISCOVERY;
126 spin_unlock_irq(shost->host_lock);
127
128 if (phba->link_state != LPFC_CLEAR_LA)
129 lpfc_issue_clear_la(phba, vport);
130
131 return 1;
132 }
133
134 /**
135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
136 * @vport: pointer to a host virtual N_Port data structure.
137 * @expect_rsp: flag indicating whether response is expected.
138 * @cmd_size: size of the ELS command.
139 * @retry: number of retries to the command when it fails.
140 * @ndlp: pointer to a node-list data structure.
141 * @did: destination identifier.
142 * @elscmd: the ELS command code.
143 *
144 * This routine is used for allocating a lpfc-IOCB data structure from
145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
146 * passed into the routine for discovery state machine to issue an Extended
147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
148 * and preparation routine that is used by all the discovery state machine
149 * routines and the ELS command-specific fields will be later set up by
150 * the individual discovery machine routines after calling this routine
151 * allocating and preparing a generic IOCB data structure. It fills in the
152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
153 * payload and response payload (if expected). The reference count on the
154 * ndlp is incremented by 1 and the reference to the ndlp is put into
155 * ndlp of the IOCB data structure for this IOCB to hold the ndlp
156 * reference for the command's callback function to access later.
157 *
158 * Return code
159 * Pointer to the newly allocated/prepared els iocb data structure
160 * NULL - when els iocb data structure allocation/preparation failed
161 **/
162 struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport * vport,u8 expect_rsp,u16 cmd_size,u8 retry,struct lpfc_nodelist * ndlp,u32 did,u32 elscmd)163 lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
164 u16 cmd_size, u8 retry,
165 struct lpfc_nodelist *ndlp, u32 did,
166 u32 elscmd)
167 {
168 struct lpfc_hba *phba = vport->phba;
169 struct lpfc_iocbq *elsiocb;
170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
171 struct ulp_bde64_le *bpl;
172 u32 timeout = 0;
173
174 if (!lpfc_is_link_up(phba))
175 return NULL;
176
177 /* Allocate buffer for command iocb */
178 elsiocb = lpfc_sli_get_iocbq(phba);
179 if (!elsiocb)
180 return NULL;
181
182 /*
183 * If this command is for fabric controller and HBA running
184 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
185 */
186 if ((did == Fabric_DID) &&
187 (phba->hba_flag & HBA_FIP_SUPPORT) &&
188 ((elscmd == ELS_CMD_FLOGI) ||
189 (elscmd == ELS_CMD_FDISC) ||
190 (elscmd == ELS_CMD_LOGO)))
191 switch (elscmd) {
192 case ELS_CMD_FLOGI:
193 elsiocb->cmd_flag |=
194 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
195 & LPFC_FIP_ELS_ID_MASK);
196 break;
197 case ELS_CMD_FDISC:
198 elsiocb->cmd_flag |=
199 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
200 & LPFC_FIP_ELS_ID_MASK);
201 break;
202 case ELS_CMD_LOGO:
203 elsiocb->cmd_flag |=
204 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
205 & LPFC_FIP_ELS_ID_MASK);
206 break;
207 }
208 else
209 elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
210
211 /* fill in BDEs for command */
212 /* Allocate buffer for command payload */
213 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
214 if (pcmd)
215 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
216 if (!pcmd || !pcmd->virt)
217 goto els_iocb_free_pcmb_exit;
218
219 INIT_LIST_HEAD(&pcmd->list);
220
221 /* Allocate buffer for response payload */
222 if (expect_rsp) {
223 prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
224 if (prsp)
225 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
226 &prsp->phys);
227 if (!prsp || !prsp->virt)
228 goto els_iocb_free_prsp_exit;
229 INIT_LIST_HEAD(&prsp->list);
230 } else {
231 prsp = NULL;
232 }
233
234 /* Allocate buffer for Buffer ptr list */
235 pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
236 if (pbuflist)
237 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
238 &pbuflist->phys);
239 if (!pbuflist || !pbuflist->virt)
240 goto els_iocb_free_pbuf_exit;
241
242 INIT_LIST_HEAD(&pbuflist->list);
243
244 if (expect_rsp) {
245 switch (elscmd) {
246 case ELS_CMD_FLOGI:
247 timeout = FF_DEF_RATOV * 2;
248 break;
249 case ELS_CMD_LOGO:
250 timeout = phba->fc_ratov;
251 break;
252 default:
253 timeout = phba->fc_ratov * 2;
254 }
255
256 /* Fill SGE for the num bde count */
257 elsiocb->num_bdes = 2;
258 }
259
260 if (phba->sli_rev == LPFC_SLI_REV4)
261 bmp = pcmd;
262 else
263 bmp = pbuflist;
264
265 lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
266 elscmd, timeout, expect_rsp);
267
268 bpl = (struct ulp_bde64_le *)pbuflist->virt;
269 bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
270 bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
271 bpl->type_size = cpu_to_le32(cmd_size);
272 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
273
274 if (expect_rsp) {
275 bpl++;
276 bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
277 bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
278 bpl->type_size = cpu_to_le32(FCELSSIZE);
279 bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
280 }
281
282 elsiocb->cmd_dmabuf = pcmd;
283 elsiocb->bpl_dmabuf = pbuflist;
284 elsiocb->retry = retry;
285 elsiocb->vport = vport;
286 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
287
288 if (prsp)
289 list_add(&prsp->list, &pcmd->list);
290 if (expect_rsp) {
291 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
293 "0116 Xmit ELS command x%x to remote "
294 "NPORT x%x I/O tag: x%x, port state:x%x "
295 "rpi x%x fc_flag:x%x\n",
296 elscmd, did, elsiocb->iotag,
297 vport->port_state, ndlp->nlp_rpi,
298 vport->fc_flag);
299 } else {
300 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
301 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
302 "0117 Xmit ELS response x%x to remote "
303 "NPORT x%x I/O tag: x%x, size: x%x "
304 "port_state x%x rpi x%x fc_flag x%x\n",
305 elscmd, ndlp->nlp_DID, elsiocb->iotag,
306 cmd_size, vport->port_state,
307 ndlp->nlp_rpi, vport->fc_flag);
308 }
309
310 return elsiocb;
311
312 els_iocb_free_pbuf_exit:
313 if (expect_rsp)
314 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
315 kfree(pbuflist);
316
317 els_iocb_free_prsp_exit:
318 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
319 kfree(prsp);
320
321 els_iocb_free_pcmb_exit:
322 kfree(pcmd);
323 lpfc_sli_release_iocbq(phba, elsiocb);
324 return NULL;
325 }
326
327 /**
328 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
329 * @vport: pointer to a host virtual N_Port data structure.
330 *
331 * This routine issues a fabric registration login for a @vport. An
332 * active ndlp node with Fabric_DID must already exist for this @vport.
333 * The routine invokes two mailbox commands to carry out fabric registration
334 * login through the HBA firmware: the first mailbox command requests the
335 * HBA to perform link configuration for the @vport; and the second mailbox
336 * command requests the HBA to perform the actual fabric registration login
337 * with the @vport.
338 *
339 * Return code
340 * 0 - successfully issued fabric registration login for @vport
341 * -ENXIO -- failed to issue fabric registration login for @vport
342 **/
343 int
lpfc_issue_fabric_reglogin(struct lpfc_vport * vport)344 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 {
346 struct lpfc_hba *phba = vport->phba;
347 LPFC_MBOXQ_t *mbox;
348 struct lpfc_nodelist *ndlp;
349 struct serv_parm *sp;
350 int rc;
351 int err = 0;
352
353 sp = &phba->fc_fabparam;
354 ndlp = lpfc_findnode_did(vport, Fabric_DID);
355 if (!ndlp) {
356 err = 1;
357 goto fail;
358 }
359
360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
361 if (!mbox) {
362 err = 2;
363 goto fail;
364 }
365
366 vport->port_state = LPFC_FABRIC_CFG_LINK;
367 lpfc_config_link(phba, mbox);
368 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
369 mbox->vport = vport;
370
371 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
372 if (rc == MBX_NOT_FINISHED) {
373 err = 3;
374 goto fail_free_mbox;
375 }
376
377 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
378 if (!mbox) {
379 err = 4;
380 goto fail;
381 }
382 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
383 ndlp->nlp_rpi);
384 if (rc) {
385 err = 5;
386 goto fail_free_mbox;
387 }
388
389 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
390 mbox->vport = vport;
391 /* increment the reference count on ndlp to hold reference
392 * for the callback routine.
393 */
394 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
395 if (!mbox->ctx_ndlp) {
396 err = 6;
397 goto fail_free_mbox;
398 }
399
400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
401 if (rc == MBX_NOT_FINISHED) {
402 err = 7;
403 goto fail_issue_reg_login;
404 }
405
406 return 0;
407
408 fail_issue_reg_login:
409 /* decrement the reference count on ndlp just incremented
410 * for the failed mbox command.
411 */
412 lpfc_nlp_put(ndlp);
413 fail_free_mbox:
414 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
415 fail:
416 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
418 "0249 Cannot issue Register Fabric login: Err %d\n",
419 err);
420 return -ENXIO;
421 }
422
423 /**
424 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
425 * @vport: pointer to a host virtual N_Port data structure.
426 *
427 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
428 * the @vport. This mailbox command is necessary for SLI4 port only.
429 *
430 * Return code
431 * 0 - successfully issued REG_VFI for @vport
432 * A failure code otherwise.
433 **/
434 int
lpfc_issue_reg_vfi(struct lpfc_vport * vport)435 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
436 {
437 struct lpfc_hba *phba = vport->phba;
438 LPFC_MBOXQ_t *mboxq = NULL;
439 struct lpfc_nodelist *ndlp;
440 struct lpfc_dmabuf *dmabuf = NULL;
441 int rc = 0;
442
443 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
444 if ((phba->sli_rev == LPFC_SLI_REV4) &&
445 !(phba->link_flag & LS_LOOPBACK_MODE) &&
446 !(vport->fc_flag & FC_PT2PT)) {
447 ndlp = lpfc_findnode_did(vport, Fabric_DID);
448 if (!ndlp) {
449 rc = -ENODEV;
450 goto fail;
451 }
452 }
453
454 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
455 if (!mboxq) {
456 rc = -ENOMEM;
457 goto fail;
458 }
459
460 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
461 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
462 rc = lpfc_mbox_rsrc_prep(phba, mboxq);
463 if (rc) {
464 rc = -ENOMEM;
465 goto fail_mbox;
466 }
467 dmabuf = mboxq->ctx_buf;
468 memcpy(dmabuf->virt, &phba->fc_fabparam,
469 sizeof(struct serv_parm));
470 }
471
472 vport->port_state = LPFC_FABRIC_CFG_LINK;
473 if (dmabuf) {
474 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
475 /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
476 mboxq->ctx_buf = dmabuf;
477 } else {
478 lpfc_reg_vfi(mboxq, vport, 0);
479 }
480
481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
482 mboxq->vport = vport;
483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
484 if (rc == MBX_NOT_FINISHED) {
485 rc = -ENXIO;
486 goto fail_mbox;
487 }
488 return 0;
489
490 fail_mbox:
491 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
492 fail:
493 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
495 "0289 Issue Register VFI failed: Err %d\n", rc);
496 return rc;
497 }
498
499 /**
500 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
501 * @vport: pointer to a host virtual N_Port data structure.
502 *
503 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
504 * the @vport. This mailbox command is necessary for SLI4 port only.
505 *
506 * Return code
507 * 0 - successfully issued REG_VFI for @vport
508 * A failure code otherwise.
509 **/
510 int
lpfc_issue_unreg_vfi(struct lpfc_vport * vport)511 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
512 {
513 struct lpfc_hba *phba = vport->phba;
514 struct Scsi_Host *shost;
515 LPFC_MBOXQ_t *mboxq;
516 int rc;
517
518 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
519 if (!mboxq) {
520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
521 "2556 UNREG_VFI mbox allocation failed"
522 "HBA state x%x\n", phba->pport->port_state);
523 return -ENOMEM;
524 }
525
526 lpfc_unreg_vfi(mboxq, vport);
527 mboxq->vport = vport;
528 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
529
530 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
531 if (rc == MBX_NOT_FINISHED) {
532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
533 "2557 UNREG_VFI issue mbox failed rc x%x "
534 "HBA state x%x\n",
535 rc, phba->pport->port_state);
536 mempool_free(mboxq, phba->mbox_mem_pool);
537 return -EIO;
538 }
539
540 shost = lpfc_shost_from_vport(vport);
541 spin_lock_irq(shost->host_lock);
542 vport->fc_flag &= ~FC_VFI_REGISTERED;
543 spin_unlock_irq(shost->host_lock);
544 return 0;
545 }
546
547 /**
548 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
549 * @vport: pointer to a host virtual N_Port data structure.
550 * @sp: pointer to service parameter data structure.
551 *
552 * This routine is called from FLOGI/FDISC completion handler functions.
553 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
554 * node nodename is changed in the completion service parameter else return
555 * 0. This function also set flag in the vport data structure to delay
556 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
557 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
558 * node nodename is changed in the completion service parameter.
559 *
560 * Return code
561 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
562 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
563 *
564 **/
565 static uint8_t
lpfc_check_clean_addr_bit(struct lpfc_vport * vport,struct serv_parm * sp)566 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
567 struct serv_parm *sp)
568 {
569 struct lpfc_hba *phba = vport->phba;
570 uint8_t fabric_param_changed = 0;
571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
572
573 if ((vport->fc_prevDID != vport->fc_myDID) ||
574 memcmp(&vport->fabric_portname, &sp->portName,
575 sizeof(struct lpfc_name)) ||
576 memcmp(&vport->fabric_nodename, &sp->nodeName,
577 sizeof(struct lpfc_name)) ||
578 (vport->vport_flag & FAWWPN_PARAM_CHG)) {
579 fabric_param_changed = 1;
580 vport->vport_flag &= ~FAWWPN_PARAM_CHG;
581 }
582 /*
583 * Word 1 Bit 31 in common service parameter is overloaded.
584 * Word 1 Bit 31 in FLOGI request is multiple NPort request
585 * Word 1 Bit 31 in FLOGI response is clean address bit
586 *
587 * If fabric parameter is changed and clean address bit is
588 * cleared delay nport discovery if
589 * - vport->fc_prevDID != 0 (not initial discovery) OR
590 * - lpfc_delay_discovery module parameter is set.
591 */
592 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
593 (vport->fc_prevDID || phba->cfg_delay_discovery)) {
594 spin_lock_irq(shost->host_lock);
595 vport->fc_flag |= FC_DISC_DELAYED;
596 spin_unlock_irq(shost->host_lock);
597 }
598
599 return fabric_param_changed;
600 }
601
602
603 /**
604 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
605 * @vport: pointer to a host virtual N_Port data structure.
606 * @ndlp: pointer to a node-list data structure.
607 * @sp: pointer to service parameter data structure.
608 * @ulp_word4: command response value
609 *
610 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
611 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
612 * port in a fabric topology. It properly sets up the parameters to the @ndlp
613 * from the IOCB response. It also check the newly assigned N_Port ID to the
614 * @vport against the previously assigned N_Port ID. If it is different from
615 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
616 * is invoked on all the remaining nodes with the @vport to unregister the
617 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
618 * is invoked to register login to the fabric.
619 *
620 * Return code
621 * 0 - Success (currently, always return 0)
622 **/
623 static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp,uint32_t ulp_word4)624 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
625 struct serv_parm *sp, uint32_t ulp_word4)
626 {
627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
628 struct lpfc_hba *phba = vport->phba;
629 struct lpfc_nodelist *np;
630 struct lpfc_nodelist *next_np;
631 uint8_t fabric_param_changed;
632
633 spin_lock_irq(shost->host_lock);
634 vport->fc_flag |= FC_FABRIC;
635 spin_unlock_irq(shost->host_lock);
636
637 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
638 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
639 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
640
641 phba->fc_edtovResol = sp->cmn.edtovResolution;
642 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
643
644 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
645 spin_lock_irq(shost->host_lock);
646 vport->fc_flag |= FC_PUBLIC_LOOP;
647 spin_unlock_irq(shost->host_lock);
648 }
649
650 vport->fc_myDID = ulp_word4 & Mask_DID;
651 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
652 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
653 ndlp->nlp_class_sup = 0;
654 if (sp->cls1.classValid)
655 ndlp->nlp_class_sup |= FC_COS_CLASS1;
656 if (sp->cls2.classValid)
657 ndlp->nlp_class_sup |= FC_COS_CLASS2;
658 if (sp->cls3.classValid)
659 ndlp->nlp_class_sup |= FC_COS_CLASS3;
660 if (sp->cls4.classValid)
661 ndlp->nlp_class_sup |= FC_COS_CLASS4;
662 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
663 sp->cmn.bbRcvSizeLsb;
664
665 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
666 if (fabric_param_changed) {
667 /* Reset FDMI attribute masks based on config parameter */
668 if (phba->cfg_enable_SmartSAN ||
669 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
670 /* Setup appropriate attribute masks */
671 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
672 if (phba->cfg_enable_SmartSAN)
673 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
674 else
675 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
676 } else {
677 vport->fdmi_hba_mask = 0;
678 vport->fdmi_port_mask = 0;
679 }
680
681 }
682 memcpy(&vport->fabric_portname, &sp->portName,
683 sizeof(struct lpfc_name));
684 memcpy(&vport->fabric_nodename, &sp->nodeName,
685 sizeof(struct lpfc_name));
686 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
687
688 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
689 if (sp->cmn.response_multiple_NPort) {
690 lpfc_printf_vlog(vport, KERN_WARNING,
691 LOG_ELS | LOG_VPORT,
692 "1816 FLOGI NPIV supported, "
693 "response data 0x%x\n",
694 sp->cmn.response_multiple_NPort);
695 spin_lock_irq(&phba->hbalock);
696 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
697 spin_unlock_irq(&phba->hbalock);
698 } else {
699 /* Because we asked f/w for NPIV it still expects us
700 to call reg_vnpid at least for the physical host */
701 lpfc_printf_vlog(vport, KERN_WARNING,
702 LOG_ELS | LOG_VPORT,
703 "1817 Fabric does not support NPIV "
704 "- configuring single port mode.\n");
705 spin_lock_irq(&phba->hbalock);
706 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
707 spin_unlock_irq(&phba->hbalock);
708 }
709 }
710
711 /*
712 * For FC we need to do some special processing because of the SLI
713 * Port's default settings of the Common Service Parameters.
714 */
715 if ((phba->sli_rev == LPFC_SLI_REV4) &&
716 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
717 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
718 if (fabric_param_changed)
719 lpfc_unregister_fcf_prep(phba);
720
721 /* This should just update the VFI CSPs*/
722 if (vport->fc_flag & FC_VFI_REGISTERED)
723 lpfc_issue_reg_vfi(vport);
724 }
725
726 if (fabric_param_changed &&
727 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
728
729 /* If our NportID changed, we need to ensure all
730 * remaining NPORTs get unreg_login'ed.
731 */
732 list_for_each_entry_safe(np, next_np,
733 &vport->fc_nodes, nlp_listp) {
734 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
735 !(np->nlp_flag & NLP_NPR_ADISC))
736 continue;
737 spin_lock_irq(&np->lock);
738 np->nlp_flag &= ~NLP_NPR_ADISC;
739 spin_unlock_irq(&np->lock);
740 lpfc_unreg_rpi(vport, np);
741 }
742 lpfc_cleanup_pending_mbox(vport);
743
744 if (phba->sli_rev == LPFC_SLI_REV4) {
745 lpfc_sli4_unreg_all_rpis(vport);
746 lpfc_mbx_unreg_vpi(vport);
747 spin_lock_irq(shost->host_lock);
748 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
749 spin_unlock_irq(shost->host_lock);
750 }
751
752 /*
753 * For SLI3 and SLI4, the VPI needs to be reregistered in
754 * response to this fabric parameter change event.
755 */
756 spin_lock_irq(shost->host_lock);
757 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
758 spin_unlock_irq(shost->host_lock);
759 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
760 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
761 /*
762 * Driver needs to re-reg VPI in order for f/w
763 * to update the MAC address.
764 */
765 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
766 lpfc_register_new_vport(phba, vport, ndlp);
767 return 0;
768 }
769
770 if (phba->sli_rev < LPFC_SLI_REV4) {
771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
772 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
773 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
774 lpfc_register_new_vport(phba, vport, ndlp);
775 else
776 lpfc_issue_fabric_reglogin(vport);
777 } else {
778 ndlp->nlp_type |= NLP_FABRIC;
779 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
780 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
781 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
782 lpfc_start_fdiscs(phba);
783 lpfc_do_scr_ns_plogi(phba, vport);
784 } else if (vport->fc_flag & FC_VFI_REGISTERED)
785 lpfc_issue_init_vpi(vport);
786 else {
787 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
788 "3135 Need register VFI: (x%x/%x)\n",
789 vport->fc_prevDID, vport->fc_myDID);
790 lpfc_issue_reg_vfi(vport);
791 }
792 }
793 return 0;
794 }
795
796 /**
797 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
798 * @vport: pointer to a host virtual N_Port data structure.
799 * @ndlp: pointer to a node-list data structure.
800 * @sp: pointer to service parameter data structure.
801 *
802 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
803 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
804 * in a point-to-point topology. First, the @vport's N_Port Name is compared
805 * with the received N_Port Name: if the @vport's N_Port Name is greater than
806 * the received N_Port Name lexicographically, this node shall assign local
807 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
808 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
809 * this node shall just wait for the remote node to issue PLOGI and assign
810 * N_Port IDs.
811 *
812 * Return code
813 * 0 - Success
814 * -ENXIO - Fail
815 **/
816 static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp)817 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
818 struct serv_parm *sp)
819 {
820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
821 struct lpfc_hba *phba = vport->phba;
822 LPFC_MBOXQ_t *mbox;
823 int rc;
824
825 spin_lock_irq(shost->host_lock);
826 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
827 vport->fc_flag |= FC_PT2PT;
828 spin_unlock_irq(shost->host_lock);
829
830 /* If we are pt2pt with another NPort, force NPIV off! */
831 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
832
833 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
834 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
835 lpfc_unregister_fcf_prep(phba);
836
837 spin_lock_irq(shost->host_lock);
838 vport->fc_flag &= ~FC_VFI_REGISTERED;
839 spin_unlock_irq(shost->host_lock);
840 phba->fc_topology_changed = 0;
841 }
842
843 rc = memcmp(&vport->fc_portname, &sp->portName,
844 sizeof(vport->fc_portname));
845
846 if (rc >= 0) {
847 /* This side will initiate the PLOGI */
848 spin_lock_irq(shost->host_lock);
849 vport->fc_flag |= FC_PT2PT_PLOGI;
850 spin_unlock_irq(shost->host_lock);
851
852 /*
853 * N_Port ID cannot be 0, set our Id to LocalID
854 * the other side will be RemoteID.
855 */
856
857 /* not equal */
858 if (rc)
859 vport->fc_myDID = PT2PT_LocalID;
860
861 /* If not registered with a transport, decrement ndlp reference
862 * count indicating that ndlp can be safely released when other
863 * references are removed.
864 */
865 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
866 lpfc_nlp_put(ndlp);
867
868 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
869 if (!ndlp) {
870 /*
871 * Cannot find existing Fabric ndlp, so allocate a
872 * new one
873 */
874 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
875 if (!ndlp)
876 goto fail;
877 }
878
879 memcpy(&ndlp->nlp_portname, &sp->portName,
880 sizeof(struct lpfc_name));
881 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
882 sizeof(struct lpfc_name));
883 /* Set state will put ndlp onto node list if not already done */
884 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
885 spin_lock_irq(&ndlp->lock);
886 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
887 spin_unlock_irq(&ndlp->lock);
888
889 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
890 if (!mbox)
891 goto fail;
892
893 lpfc_config_link(phba, mbox);
894
895 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
896 mbox->vport = vport;
897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
898 if (rc == MBX_NOT_FINISHED) {
899 mempool_free(mbox, phba->mbox_mem_pool);
900 goto fail;
901 }
902 } else {
903 /* This side will wait for the PLOGI. If not registered with
904 * a transport, decrement node reference count indicating that
905 * ndlp can be released when other references are removed.
906 */
907 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
908 lpfc_nlp_put(ndlp);
909
910 /* Start discovery - this should just do CLEAR_LA */
911 lpfc_disc_start(vport);
912 }
913
914 return 0;
915 fail:
916 return -ENXIO;
917 }
918
919 /**
920 * lpfc_cmpl_els_flogi - Completion callback function for flogi
921 * @phba: pointer to lpfc hba data structure.
922 * @cmdiocb: pointer to lpfc command iocb data structure.
923 * @rspiocb: pointer to lpfc response iocb data structure.
924 *
925 * This routine is the top-level completion callback function for issuing
926 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
927 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
928 * retry has been made (either immediately or delayed with lpfc_els_retry()
929 * returning 1), the command IOCB will be released and function returned.
930 * If the retry attempt has been given up (possibly reach the maximum
931 * number of retries), one additional decrement of ndlp reference shall be
932 * invoked before going out after releasing the command IOCB. This will
933 * actually release the remote node (Note, lpfc_els_free_iocb() will also
934 * invoke one decrement of ndlp reference count). If no error reported in
935 * the IOCB status, the command Port ID field is used to determine whether
936 * this is a point-to-point topology or a fabric topology: if the Port ID
937 * field is assigned, it is a fabric topology; otherwise, it is a
938 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
939 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
940 * specific topology completion conditions.
941 **/
942 static void
lpfc_cmpl_els_flogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)943 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
944 struct lpfc_iocbq *rspiocb)
945 {
946 struct lpfc_vport *vport = cmdiocb->vport;
947 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
948 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
949 IOCB_t *irsp;
950 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
951 struct serv_parm *sp;
952 uint16_t fcf_index;
953 int rc;
954 u32 ulp_status, ulp_word4, tmo;
955
956 /* Check to see if link went down during discovery */
957 if (lpfc_els_chk_latt(vport)) {
958 /* One additional decrement on node reference count to
959 * trigger the release of the node
960 */
961 if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
962 lpfc_nlp_put(ndlp);
963 goto out;
964 }
965
966 ulp_status = get_job_ulpstatus(phba, rspiocb);
967 ulp_word4 = get_job_word4(phba, rspiocb);
968
969 if (phba->sli_rev == LPFC_SLI_REV4) {
970 tmo = get_wqe_tmo(cmdiocb);
971 } else {
972 irsp = &rspiocb->iocb;
973 tmo = irsp->ulpTimeout;
974 }
975
976 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
977 "FLOGI cmpl: status:x%x/x%x state:x%x",
978 ulp_status, ulp_word4,
979 vport->port_state);
980
981 if (ulp_status) {
982 /*
983 * In case of FIP mode, perform roundrobin FCF failover
984 * due to new FCF discovery
985 */
986 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
987 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
988 if (phba->link_state < LPFC_LINK_UP)
989 goto stop_rr_fcf_flogi;
990 if ((phba->fcoe_cvl_eventtag_attn ==
991 phba->fcoe_cvl_eventtag) &&
992 (ulp_status == IOSTAT_LOCAL_REJECT) &&
993 ((ulp_word4 & IOERR_PARAM_MASK) ==
994 IOERR_SLI_ABORTED))
995 goto stop_rr_fcf_flogi;
996 else
997 phba->fcoe_cvl_eventtag_attn =
998 phba->fcoe_cvl_eventtag;
999 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1000 "2611 FLOGI failed on FCF (x%x), "
1001 "status:x%x/x%x, tmo:x%x, perform "
1002 "roundrobin FCF failover\n",
1003 phba->fcf.current_rec.fcf_indx,
1004 ulp_status, ulp_word4, tmo);
1005 lpfc_sli4_set_fcf_flogi_fail(phba,
1006 phba->fcf.current_rec.fcf_indx);
1007 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1008 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1009 if (rc)
1010 goto out;
1011 }
1012
1013 stop_rr_fcf_flogi:
1014 /* FLOGI failure */
1015 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1016 ((ulp_word4 & IOERR_PARAM_MASK) ==
1017 IOERR_LOOP_OPEN_FAILURE)))
1018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1019 "2858 FLOGI failure Status:x%x/x%x TMO"
1020 ":x%x Data x%x x%x\n",
1021 ulp_status, ulp_word4, tmo,
1022 phba->hba_flag, phba->fcf.fcf_flag);
1023
1024 /* Check for retry */
1025 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1026 goto out;
1027
1028 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
1029 "0150 FLOGI failure Status:x%x/x%x "
1030 "xri x%x TMO:x%x refcnt %d\n",
1031 ulp_status, ulp_word4, cmdiocb->sli4_xritag,
1032 tmo, kref_read(&ndlp->kref));
1033
1034 /* If this is not a loop open failure, bail out */
1035 if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
1036 ((ulp_word4 & IOERR_PARAM_MASK) ==
1037 IOERR_LOOP_OPEN_FAILURE))) {
1038 /* FLOGI failure */
1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1040 "0100 FLOGI failure Status:x%x/x%x "
1041 "TMO:x%x\n",
1042 ulp_status, ulp_word4, tmo);
1043 goto flogifail;
1044 }
1045
1046 /* FLOGI failed, so there is no fabric */
1047 spin_lock_irq(shost->host_lock);
1048 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
1049 FC_PT2PT_NO_NVME);
1050 spin_unlock_irq(shost->host_lock);
1051
1052 /* If private loop, then allow max outstanding els to be
1053 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1054 * alpa map would take too long otherwise.
1055 */
1056 if (phba->alpa_map[0] == 0)
1057 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1058 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1059 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1060 (vport->fc_prevDID != vport->fc_myDID) ||
1061 phba->fc_topology_changed)) {
1062 if (vport->fc_flag & FC_VFI_REGISTERED) {
1063 if (phba->fc_topology_changed) {
1064 lpfc_unregister_fcf_prep(phba);
1065 spin_lock_irq(shost->host_lock);
1066 vport->fc_flag &= ~FC_VFI_REGISTERED;
1067 spin_unlock_irq(shost->host_lock);
1068 phba->fc_topology_changed = 0;
1069 } else {
1070 lpfc_sli4_unreg_all_rpis(vport);
1071 }
1072 }
1073
1074 /* Do not register VFI if the driver aborted FLOGI */
1075 if (!lpfc_error_lost_link(ulp_status, ulp_word4))
1076 lpfc_issue_reg_vfi(vport);
1077
1078 lpfc_nlp_put(ndlp);
1079 goto out;
1080 }
1081 goto flogifail;
1082 }
1083 spin_lock_irq(shost->host_lock);
1084 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1085 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1086 spin_unlock_irq(shost->host_lock);
1087
1088 /*
1089 * The FLogI succeeded. Sync the data for the CPU before
1090 * accessing it.
1091 */
1092 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1093 if (!prsp)
1094 goto out;
1095 sp = prsp->virt + sizeof(uint32_t);
1096
1097 /* FLOGI completes successfully */
1098 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1099 "0101 FLOGI completes successfully, I/O tag:x%x "
1100 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
1101 cmdiocb->iotag, cmdiocb->sli4_xritag,
1102 ulp_word4, sp->cmn.e_d_tov,
1103 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1104 vport->port_state, vport->fc_flag,
1105 sp->cmn.priority_tagging, kref_read(&ndlp->kref));
1106
1107 if (sp->cmn.priority_tagging)
1108 vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
1109 LPFC_VMID_TYPE_PRIO);
1110
1111 if (vport->port_state == LPFC_FLOGI) {
1112 /*
1113 * If Common Service Parameters indicate Nport
1114 * we are point to point, if Fport we are Fabric.
1115 */
1116 if (sp->cmn.fPort)
1117 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
1118 ulp_word4);
1119 else if (!(phba->hba_flag & HBA_FCOE_MODE))
1120 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1121 else {
1122 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1123 "2831 FLOGI response with cleared Fabric "
1124 "bit fcf_index 0x%x "
1125 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1126 "Fabric Name "
1127 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1128 phba->fcf.current_rec.fcf_indx,
1129 phba->fcf.current_rec.switch_name[0],
1130 phba->fcf.current_rec.switch_name[1],
1131 phba->fcf.current_rec.switch_name[2],
1132 phba->fcf.current_rec.switch_name[3],
1133 phba->fcf.current_rec.switch_name[4],
1134 phba->fcf.current_rec.switch_name[5],
1135 phba->fcf.current_rec.switch_name[6],
1136 phba->fcf.current_rec.switch_name[7],
1137 phba->fcf.current_rec.fabric_name[0],
1138 phba->fcf.current_rec.fabric_name[1],
1139 phba->fcf.current_rec.fabric_name[2],
1140 phba->fcf.current_rec.fabric_name[3],
1141 phba->fcf.current_rec.fabric_name[4],
1142 phba->fcf.current_rec.fabric_name[5],
1143 phba->fcf.current_rec.fabric_name[6],
1144 phba->fcf.current_rec.fabric_name[7]);
1145
1146 lpfc_nlp_put(ndlp);
1147 spin_lock_irq(&phba->hbalock);
1148 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1149 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1150 spin_unlock_irq(&phba->hbalock);
1151 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1152 goto out;
1153 }
1154 if (!rc) {
1155 /* Mark the FCF discovery process done */
1156 if (phba->hba_flag & HBA_FIP_SUPPORT)
1157 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1158 LOG_ELS,
1159 "2769 FLOGI to FCF (x%x) "
1160 "completed successfully\n",
1161 phba->fcf.current_rec.fcf_indx);
1162 spin_lock_irq(&phba->hbalock);
1163 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1164 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1165 spin_unlock_irq(&phba->hbalock);
1166 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1167 goto out;
1168 }
1169 } else if (vport->port_state > LPFC_FLOGI &&
1170 vport->fc_flag & FC_PT2PT) {
1171 /*
1172 * In a p2p topology, it is possible that discovery has
1173 * already progressed, and this completion can be ignored.
1174 * Recheck the indicated topology.
1175 */
1176 if (!sp->cmn.fPort)
1177 goto out;
1178 }
1179
1180 flogifail:
1181 spin_lock_irq(&phba->hbalock);
1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1183 spin_unlock_irq(&phba->hbalock);
1184
1185 if (!lpfc_error_lost_link(ulp_status, ulp_word4)) {
1186 /* FLOGI failed, so just use loop map to make discovery list */
1187 lpfc_disc_list_loopmap(vport);
1188
1189 /* Start discovery */
1190 lpfc_disc_start(vport);
1191 } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
1192 (((ulp_word4 & IOERR_PARAM_MASK) !=
1193 IOERR_SLI_ABORTED) &&
1194 ((ulp_word4 & IOERR_PARAM_MASK) !=
1195 IOERR_SLI_DOWN))) &&
1196 (phba->link_state != LPFC_CLEAR_LA)) {
1197 /* If FLOGI failed enable link interrupt. */
1198 lpfc_issue_clear_la(phba, vport);
1199 }
1200 out:
1201 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
1202 lpfc_els_free_iocb(phba, cmdiocb);
1203 lpfc_nlp_put(ndlp);
1204 }
1205
1206 /**
1207 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1208 * aborted during a link down
1209 * @phba: pointer to lpfc hba data structure.
1210 * @cmdiocb: pointer to lpfc command iocb data structure.
1211 * @rspiocb: pointer to lpfc response iocb data structure.
1212 *
1213 */
1214 static void
lpfc_cmpl_els_link_down(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1215 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1216 struct lpfc_iocbq *rspiocb)
1217 {
1218 uint32_t *pcmd;
1219 uint32_t cmd;
1220 u32 ulp_status, ulp_word4;
1221
1222 pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
1223 cmd = *pcmd;
1224
1225 ulp_status = get_job_ulpstatus(phba, rspiocb);
1226 ulp_word4 = get_job_word4(phba, rspiocb);
1227
1228 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1229 "6445 ELS completes after LINK_DOWN: "
1230 " Status %x/%x cmd x%x flg x%x\n",
1231 ulp_status, ulp_word4, cmd,
1232 cmdiocb->cmd_flag);
1233
1234 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
1235 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
1236 atomic_dec(&phba->fabric_iocb_count);
1237 }
1238 lpfc_els_free_iocb(phba, cmdiocb);
1239 }
1240
1241 /**
1242 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1243 * @vport: pointer to a host virtual N_Port data structure.
1244 * @ndlp: pointer to a node-list data structure.
1245 * @retry: number of retries to the command IOCB.
1246 *
1247 * This routine issues a Fabric Login (FLOGI) Request ELS command
1248 * for a @vport. The initiator service parameters are put into the payload
1249 * of the FLOGI Request IOCB and the top-level callback function pointer
1250 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1251 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1252 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1253 *
1254 * Note that the ndlp reference count will be incremented by 1 for holding the
1255 * ndlp and the reference to ndlp will be stored into the ndlp field of
1256 * the IOCB for the completion callback function to the FLOGI ELS command.
1257 *
1258 * Return code
1259 * 0 - successfully issued flogi iocb for @vport
1260 * 1 - failed to issue flogi iocb for @vport
1261 **/
1262 static int
lpfc_issue_els_flogi(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)1263 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1264 uint8_t retry)
1265 {
1266 struct lpfc_hba *phba = vport->phba;
1267 struct serv_parm *sp;
1268 union lpfc_wqe128 *wqe = NULL;
1269 IOCB_t *icmd = NULL;
1270 struct lpfc_iocbq *elsiocb;
1271 struct lpfc_iocbq defer_flogi_acc;
1272 u8 *pcmd, ct;
1273 uint16_t cmdsize;
1274 uint32_t tmo, did;
1275 int rc;
1276
1277 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1278 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1279 ndlp->nlp_DID, ELS_CMD_FLOGI);
1280
1281 if (!elsiocb)
1282 return 1;
1283
1284 wqe = &elsiocb->wqe;
1285 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
1286 icmd = &elsiocb->iocb;
1287
1288 /* For FLOGI request, remainder of payload is service parameters */
1289 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1290 pcmd += sizeof(uint32_t);
1291 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1292 sp = (struct serv_parm *) pcmd;
1293
1294 /* Setup CSPs accordingly for Fabric */
1295 sp->cmn.e_d_tov = 0;
1296 sp->cmn.w2.r_a_tov = 0;
1297 sp->cmn.virtual_fabric_support = 0;
1298 sp->cls1.classValid = 0;
1299 if (sp->cmn.fcphLow < FC_PH3)
1300 sp->cmn.fcphLow = FC_PH3;
1301 if (sp->cmn.fcphHigh < FC_PH3)
1302 sp->cmn.fcphHigh = FC_PH3;
1303
1304 /* Determine if switch supports priority tagging */
1305 if (phba->cfg_vmid_priority_tagging) {
1306 sp->cmn.priority_tagging = 1;
1307 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
1308 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
1309 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
1310 sizeof(phba->wwpn));
1311 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
1312 sizeof(phba->wwnn));
1313 }
1314 }
1315
1316 if (phba->sli_rev == LPFC_SLI_REV4) {
1317 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1318 LPFC_SLI_INTF_IF_TYPE_0) {
1319 /* FLOGI needs to be 3 for WQE FCFI */
1320 ct = SLI4_CT_FCFI;
1321 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
1322
1323 /* Set the fcfi to the fcfi we registered with */
1324 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
1325 phba->fcf.fcfi);
1326 }
1327
1328 /* Can't do SLI4 class2 without support sequence coalescing */
1329 sp->cls2.classValid = 0;
1330 sp->cls2.seqDelivery = 0;
1331 } else {
1332 /* Historical, setting sequential-delivery bit for SLI3 */
1333 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1334 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1335 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1336 sp->cmn.request_multiple_Nport = 1;
1337 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1338 icmd->ulpCt_h = 1;
1339 icmd->ulpCt_l = 0;
1340 } else {
1341 sp->cmn.request_multiple_Nport = 0;
1342 }
1343
1344 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1345 icmd->un.elsreq64.myID = 0;
1346 icmd->un.elsreq64.fl = 1;
1347 }
1348 }
1349
1350 tmo = phba->fc_ratov;
1351 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1352 lpfc_set_disctmo(vport);
1353 phba->fc_ratov = tmo;
1354
1355 phba->fc_stat.elsXmitFLOGI++;
1356 elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
1357
1358 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1359 "Issue FLOGI: opt:x%x",
1360 phba->sli3_options, 0, 0);
1361
1362 elsiocb->ndlp = lpfc_nlp_get(ndlp);
1363 if (!elsiocb->ndlp) {
1364 lpfc_els_free_iocb(phba, elsiocb);
1365 return 1;
1366 }
1367
1368 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1369 if (rc == IOCB_ERROR) {
1370 lpfc_els_free_iocb(phba, elsiocb);
1371 lpfc_nlp_put(ndlp);
1372 return 1;
1373 }
1374
1375 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
1376
1377 /* Clear external loopback plug detected flag */
1378 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
1379
1380 /* Check for a deferred FLOGI ACC condition */
1381 if (phba->defer_flogi_acc_flag) {
1382 /* lookup ndlp for received FLOGI */
1383 ndlp = lpfc_findnode_did(vport, 0);
1384 if (!ndlp)
1385 return 0;
1386
1387 did = vport->fc_myDID;
1388 vport->fc_myDID = Fabric_DID;
1389
1390 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1391
1392 if (phba->sli_rev == LPFC_SLI_REV4) {
1393 bf_set(wqe_ctxt_tag,
1394 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1395 phba->defer_flogi_acc_rx_id);
1396 bf_set(wqe_rcvoxid,
1397 &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
1398 phba->defer_flogi_acc_ox_id);
1399 } else {
1400 icmd = &defer_flogi_acc.iocb;
1401 icmd->ulpContext = phba->defer_flogi_acc_rx_id;
1402 icmd->unsli3.rcvsli3.ox_id =
1403 phba->defer_flogi_acc_ox_id;
1404 }
1405
1406 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1407 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1408 " ox_id: x%x, hba_flag x%x\n",
1409 phba->defer_flogi_acc_rx_id,
1410 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1411
1412 /* Send deferred FLOGI ACC */
1413 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1414 ndlp, NULL);
1415
1416 phba->defer_flogi_acc_flag = false;
1417 vport->fc_myDID = did;
1418
1419 /* Decrement ndlp reference count to indicate the node can be
1420 * released when other references are removed.
1421 */
1422 lpfc_nlp_put(ndlp);
1423 }
1424
1425 return 0;
1426 }
1427
1428 /**
1429 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1430 * @phba: pointer to lpfc hba data structure.
1431 *
1432 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1433 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1434 * list and issues an abort IOCB commond on each outstanding IOCB that
1435 * contains a active Fabric_DID ndlp. Note that this function is to issue
1436 * the abort IOCB command on all the outstanding IOCBs, thus when this
1437 * function returns, it does not guarantee all the IOCBs are actually aborted.
1438 *
1439 * Return code
1440 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1441 **/
1442 int
lpfc_els_abort_flogi(struct lpfc_hba * phba)1443 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1444 {
1445 struct lpfc_sli_ring *pring;
1446 struct lpfc_iocbq *iocb, *next_iocb;
1447 struct lpfc_nodelist *ndlp;
1448 u32 ulp_command;
1449
1450 /* Abort outstanding I/O on NPort <nlp_DID> */
1451 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1452 "0201 Abort outstanding I/O on NPort x%x\n",
1453 Fabric_DID);
1454
1455 pring = lpfc_phba_elsring(phba);
1456 if (unlikely(!pring))
1457 return -EIO;
1458
1459 /*
1460 * Check the txcmplq for an iocb that matches the nport the driver is
1461 * searching for.
1462 */
1463 spin_lock_irq(&phba->hbalock);
1464 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1465 ulp_command = get_job_cmnd(phba, iocb);
1466 if (ulp_command == CMD_ELS_REQUEST64_CR) {
1467 ndlp = iocb->ndlp;
1468 if (ndlp && ndlp->nlp_DID == Fabric_DID) {
1469 if ((phba->pport->fc_flag & FC_PT2PT) &&
1470 !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
1471 iocb->fabric_cmd_cmpl =
1472 lpfc_ignore_els_cmpl;
1473 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
1474 NULL);
1475 }
1476 }
1477 }
1478 /* Make sure HBA is alive */
1479 lpfc_issue_hb_tmo(phba);
1480
1481 spin_unlock_irq(&phba->hbalock);
1482
1483 return 0;
1484 }
1485
1486 /**
1487 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1488 * @vport: pointer to a host virtual N_Port data structure.
1489 *
1490 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1494 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1495 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1496 * @vport.
1497 *
1498 * Return code
1499 * 0 - failed to issue initial flogi for @vport
1500 * 1 - successfully issued initial flogi for @vport
1501 **/
1502 int
lpfc_initial_flogi(struct lpfc_vport * vport)1503 lpfc_initial_flogi(struct lpfc_vport *vport)
1504 {
1505 struct lpfc_nodelist *ndlp;
1506
1507 vport->port_state = LPFC_FLOGI;
1508 lpfc_set_disctmo(vport);
1509
1510 /* First look for the Fabric ndlp */
1511 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1512 if (!ndlp) {
1513 /* Cannot find existing Fabric ndlp, so allocate a new one */
1514 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1515 if (!ndlp)
1516 return 0;
1517 /* Set the node type */
1518 ndlp->nlp_type |= NLP_FABRIC;
1519
1520 /* Put ndlp onto node list */
1521 lpfc_enqueue_node(vport, ndlp);
1522 }
1523
1524 /* Reset the Fabric flag, topology change may have happened */
1525 vport->fc_flag &= ~FC_FABRIC;
1526 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1527 /* A node reference should be retained while registered with a
1528 * transport or dev-loss-evt work is pending.
1529 * Otherwise, decrement node reference to trigger release.
1530 */
1531 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1532 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1533 lpfc_nlp_put(ndlp);
1534 return 0;
1535 }
1536 return 1;
1537 }
1538
1539 /**
1540 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1541 * @vport: pointer to a host virtual N_Port data structure.
1542 *
1543 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1544 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1545 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1546 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1547 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1548 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1549 * @vport.
1550 *
1551 * Return code
1552 * 0 - failed to issue initial fdisc for @vport
1553 * 1 - successfully issued initial fdisc for @vport
1554 **/
1555 int
lpfc_initial_fdisc(struct lpfc_vport * vport)1556 lpfc_initial_fdisc(struct lpfc_vport *vport)
1557 {
1558 struct lpfc_nodelist *ndlp;
1559
1560 /* First look for the Fabric ndlp */
1561 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1562 if (!ndlp) {
1563 /* Cannot find existing Fabric ndlp, so allocate a new one */
1564 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1565 if (!ndlp)
1566 return 0;
1567
1568 /* NPIV is only supported in Fabrics. */
1569 ndlp->nlp_type |= NLP_FABRIC;
1570
1571 /* Put ndlp onto node list */
1572 lpfc_enqueue_node(vport, ndlp);
1573 }
1574
1575 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1576 /* A node reference should be retained while registered with a
1577 * transport or dev-loss-evt work is pending.
1578 * Otherwise, decrement node reference to trigger release.
1579 */
1580 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
1581 !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
1582 lpfc_nlp_put(ndlp);
1583 return 0;
1584 }
1585 return 1;
1586 }
1587
1588 /**
1589 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1590 * @vport: pointer to a host virtual N_Port data structure.
1591 *
1592 * This routine checks whether there are more remaining Port Logins
1593 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1594 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1595 * to issue ELS PLOGIs up to the configured discover threads with the
1596 * @vport (@vport->cfg_discovery_threads). The function also decrement
1597 * the @vport's num_disc_node by 1 if it is not already 0.
1598 **/
1599 void
lpfc_more_plogi(struct lpfc_vport * vport)1600 lpfc_more_plogi(struct lpfc_vport *vport)
1601 {
1602 if (vport->num_disc_nodes)
1603 vport->num_disc_nodes--;
1604
1605 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1606 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1607 "0232 Continue discovery with %d PLOGIs to go "
1608 "Data: x%x x%x x%x\n",
1609 vport->num_disc_nodes, vport->fc_plogi_cnt,
1610 vport->fc_flag, vport->port_state);
1611 /* Check to see if there are more PLOGIs to be sent */
1612 if (vport->fc_flag & FC_NLP_MORE)
1613 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1614 lpfc_els_disc_plogi(vport);
1615
1616 return;
1617 }
1618
1619 /**
1620 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
1621 * @phba: pointer to lpfc hba data structure.
1622 * @prsp: pointer to response IOCB payload.
1623 * @ndlp: pointer to a node-list data structure.
1624 *
1625 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1626 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1627 * The following cases are considered N_Port confirmed:
1628 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1629 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1630 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1631 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1632 * 1) if there is a node on vport list other than the @ndlp with the same
1633 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1634 * on that node to release the RPI associated with the node; 2) if there is
1635 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1636 * into, a new node shall be allocated (or activated). In either case, the
1637 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1638 * be released and the new_ndlp shall be put on to the vport node list and
1639 * its pointer returned as the confirmed node.
1640 *
1641 * Note that before the @ndlp got "released", the keepDID from not-matching
1642 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1643 * of the @ndlp. This is because the release of @ndlp is actually to put it
1644 * into an inactive state on the vport node list and the vport node list
1645 * management algorithm does not allow two node with a same DID.
1646 *
1647 * Return code
1648 * pointer to the PLOGI N_Port @ndlp
1649 **/
1650 static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba * phba,uint32_t * prsp,struct lpfc_nodelist * ndlp)1651 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1652 struct lpfc_nodelist *ndlp)
1653 {
1654 struct lpfc_vport *vport = ndlp->vport;
1655 struct lpfc_nodelist *new_ndlp;
1656 struct serv_parm *sp;
1657 uint8_t name[sizeof(struct lpfc_name)];
1658 uint32_t keepDID = 0, keep_nlp_flag = 0;
1659 uint32_t keep_new_nlp_flag = 0;
1660 uint16_t keep_nlp_state;
1661 u32 keep_nlp_fc4_type = 0;
1662 struct lpfc_nvme_rport *keep_nrport = NULL;
1663 unsigned long *active_rrqs_xri_bitmap = NULL;
1664
1665 /* Fabric nodes can have the same WWPN so we don't bother searching
1666 * by WWPN. Just return the ndlp that was given to us.
1667 */
1668 if (ndlp->nlp_type & NLP_FABRIC)
1669 return ndlp;
1670
1671 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1672 memset(name, 0, sizeof(struct lpfc_name));
1673
1674 /* Now we find out if the NPort we are logging into, matches the WWPN
1675 * we have for that ndlp. If not, we have some work to do.
1676 */
1677 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1678
1679 /* return immediately if the WWPN matches ndlp */
1680 if (!new_ndlp || (new_ndlp == ndlp))
1681 return ndlp;
1682
1683 /*
1684 * Unregister from backend if not done yet. Could have been skipped
1685 * due to ADISC
1686 */
1687 lpfc_nlp_unreg_node(vport, new_ndlp);
1688
1689 if (phba->sli_rev == LPFC_SLI_REV4) {
1690 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1691 GFP_KERNEL);
1692 if (active_rrqs_xri_bitmap)
1693 memset(active_rrqs_xri_bitmap, 0,
1694 phba->cfg_rrq_xri_bitmap_sz);
1695 }
1696
1697 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1698 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1699 "new_ndlp x%x x%x x%x\n",
1700 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
1701 (new_ndlp ? new_ndlp->nlp_DID : 0),
1702 (new_ndlp ? new_ndlp->nlp_flag : 0),
1703 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1704
1705 keepDID = new_ndlp->nlp_DID;
1706
1707 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
1708 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
1709 phba->cfg_rrq_xri_bitmap_sz);
1710
1711 /* At this point in this routine, we know new_ndlp will be
1712 * returned. however, any previous GID_FTs that were done
1713 * would have updated nlp_fc4_type in ndlp, so we must ensure
1714 * new_ndlp has the right value.
1715 */
1716 if (vport->fc_flag & FC_FABRIC) {
1717 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1718 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1719 }
1720
1721 lpfc_unreg_rpi(vport, new_ndlp);
1722 new_ndlp->nlp_DID = ndlp->nlp_DID;
1723 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1724 if (phba->sli_rev == LPFC_SLI_REV4)
1725 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1726 ndlp->active_rrqs_xri_bitmap,
1727 phba->cfg_rrq_xri_bitmap_sz);
1728
1729 /* Lock both ndlps */
1730 spin_lock_irq(&ndlp->lock);
1731 spin_lock_irq(&new_ndlp->lock);
1732 keep_new_nlp_flag = new_ndlp->nlp_flag;
1733 keep_nlp_flag = ndlp->nlp_flag;
1734 new_ndlp->nlp_flag = ndlp->nlp_flag;
1735
1736 /* if new_ndlp had NLP_UNREG_INP set, keep it */
1737 if (keep_new_nlp_flag & NLP_UNREG_INP)
1738 new_ndlp->nlp_flag |= NLP_UNREG_INP;
1739 else
1740 new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1741
1742 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1743 if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1744 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1745 else
1746 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1747
1748 /*
1749 * Retain the DROPPED flag. This will take care of the init
1750 * refcount when affecting the state change
1751 */
1752 if (keep_new_nlp_flag & NLP_DROPPED)
1753 new_ndlp->nlp_flag |= NLP_DROPPED;
1754 else
1755 new_ndlp->nlp_flag &= ~NLP_DROPPED;
1756
1757 ndlp->nlp_flag = keep_new_nlp_flag;
1758
1759 /* if ndlp had NLP_UNREG_INP set, keep it */
1760 if (keep_nlp_flag & NLP_UNREG_INP)
1761 ndlp->nlp_flag |= NLP_UNREG_INP;
1762 else
1763 ndlp->nlp_flag &= ~NLP_UNREG_INP;
1764
1765 /* if ndlp had NLP_RPI_REGISTERED set, keep it */
1766 if (keep_nlp_flag & NLP_RPI_REGISTERED)
1767 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1768 else
1769 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1770
1771 /*
1772 * Retain the DROPPED flag. This will take care of the init
1773 * refcount when affecting the state change
1774 */
1775 if (keep_nlp_flag & NLP_DROPPED)
1776 ndlp->nlp_flag |= NLP_DROPPED;
1777 else
1778 ndlp->nlp_flag &= ~NLP_DROPPED;
1779
1780 spin_unlock_irq(&new_ndlp->lock);
1781 spin_unlock_irq(&ndlp->lock);
1782
1783 /* Set nlp_states accordingly */
1784 keep_nlp_state = new_ndlp->nlp_state;
1785 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1786
1787 /* interchange the nvme remoteport structs */
1788 keep_nrport = new_ndlp->nrport;
1789 new_ndlp->nrport = ndlp->nrport;
1790
1791 /* Move this back to NPR state */
1792 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1793 /* The new_ndlp is replacing ndlp totally, so we need
1794 * to put ndlp on UNUSED list and try to free it.
1795 */
1796 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1797 "3179 PLOGI confirm NEW: %x %x\n",
1798 new_ndlp->nlp_DID, keepDID);
1799
1800 /* Two ndlps cannot have the same did on the nodelist.
1801 * Note: for this case, ndlp has a NULL WWPN so setting
1802 * the nlp_fc4_type isn't required.
1803 */
1804 ndlp->nlp_DID = keepDID;
1805 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1806 if (phba->sli_rev == LPFC_SLI_REV4 &&
1807 active_rrqs_xri_bitmap)
1808 memcpy(ndlp->active_rrqs_xri_bitmap,
1809 active_rrqs_xri_bitmap,
1810 phba->cfg_rrq_xri_bitmap_sz);
1811
1812 } else {
1813 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1814 "3180 PLOGI confirm SWAP: %x %x\n",
1815 new_ndlp->nlp_DID, keepDID);
1816
1817 lpfc_unreg_rpi(vport, ndlp);
1818
1819 /* Two ndlps cannot have the same did and the fc4
1820 * type must be transferred because the ndlp is in
1821 * flight.
1822 */
1823 ndlp->nlp_DID = keepDID;
1824 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1825
1826 if (phba->sli_rev == LPFC_SLI_REV4 &&
1827 active_rrqs_xri_bitmap)
1828 memcpy(ndlp->active_rrqs_xri_bitmap,
1829 active_rrqs_xri_bitmap,
1830 phba->cfg_rrq_xri_bitmap_sz);
1831
1832 /* Since we are switching over to the new_ndlp,
1833 * reset the old ndlp state
1834 */
1835 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1836 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1837 keep_nlp_state = NLP_STE_NPR_NODE;
1838 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1839 ndlp->nrport = keep_nrport;
1840 }
1841
1842 /*
1843 * If ndlp is not associated with any rport we can drop it here else
1844 * let dev_loss_tmo_callbk trigger DEVICE_RM event
1845 */
1846 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
1847 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1848
1849 if (phba->sli_rev == LPFC_SLI_REV4 &&
1850 active_rrqs_xri_bitmap)
1851 mempool_free(active_rrqs_xri_bitmap,
1852 phba->active_rrq_pool);
1853
1854 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1855 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1856 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1857 new_ndlp->nlp_fc4_type);
1858
1859 return new_ndlp;
1860 }
1861
1862 /**
1863 * lpfc_end_rscn - Check and handle more rscn for a vport
1864 * @vport: pointer to a host virtual N_Port data structure.
1865 *
1866 * This routine checks whether more Registration State Change
1867 * Notifications (RSCNs) came in while the discovery state machine was in
1868 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1869 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1870 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1871 * handling the RSCNs.
1872 **/
1873 void
lpfc_end_rscn(struct lpfc_vport * vport)1874 lpfc_end_rscn(struct lpfc_vport *vport)
1875 {
1876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1877
1878 if (vport->fc_flag & FC_RSCN_MODE) {
1879 /*
1880 * Check to see if more RSCNs came in while we were
1881 * processing this one.
1882 */
1883 if (vport->fc_rscn_id_cnt ||
1884 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1885 lpfc_els_handle_rscn(vport);
1886 else {
1887 spin_lock_irq(shost->host_lock);
1888 vport->fc_flag &= ~FC_RSCN_MODE;
1889 spin_unlock_irq(shost->host_lock);
1890 }
1891 }
1892 }
1893
1894 /**
1895 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1896 * @phba: pointer to lpfc hba data structure.
1897 * @cmdiocb: pointer to lpfc command iocb data structure.
1898 * @rspiocb: pointer to lpfc response iocb data structure.
1899 *
1900 * This routine will call the clear rrq function to free the rrq and
1901 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1902 * exist then the clear_rrq is still called because the rrq needs to
1903 * be freed.
1904 **/
1905
1906 static void
lpfc_cmpl_els_rrq(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1907 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1908 struct lpfc_iocbq *rspiocb)
1909 {
1910 struct lpfc_vport *vport = cmdiocb->vport;
1911 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
1912 struct lpfc_node_rrq *rrq;
1913 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1914 u32 ulp_word4 = get_job_word4(phba, rspiocb);
1915
1916 /* we pass cmdiocb to state machine which needs rspiocb as well */
1917 rrq = cmdiocb->context_un.rrq;
1918 cmdiocb->rsp_iocb = rspiocb;
1919
1920 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1921 "RRQ cmpl: status:x%x/x%x did:x%x",
1922 ulp_status, ulp_word4,
1923 get_job_els_rsp64_did(phba, cmdiocb));
1924
1925
1926 /* rrq completes to NPort <nlp_DID> */
1927 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1928 "2880 RRQ completes to DID x%x "
1929 "Data: x%x x%x x%x x%x x%x\n",
1930 ndlp->nlp_DID, ulp_status, ulp_word4,
1931 get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
1932
1933 if (ulp_status) {
1934 /* Check for retry */
1935 /* RRQ failed Don't print the vport to vport rjts */
1936 if (ulp_status != IOSTAT_LS_RJT ||
1937 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
1938 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
1939 (phba)->pport->cfg_log_verbose & LOG_ELS)
1940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1941 "2881 RRQ failure DID:%06X Status:"
1942 "x%x/x%x\n",
1943 ndlp->nlp_DID, ulp_status,
1944 ulp_word4);
1945 }
1946
1947 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1948 lpfc_els_free_iocb(phba, cmdiocb);
1949 lpfc_nlp_put(ndlp);
1950 return;
1951 }
1952 /**
1953 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1954 * @phba: pointer to lpfc hba data structure.
1955 * @cmdiocb: pointer to lpfc command iocb data structure.
1956 * @rspiocb: pointer to lpfc response iocb data structure.
1957 *
1958 * This routine is the completion callback function for issuing the Port
1959 * Login (PLOGI) command. For PLOGI completion, there must be an active
1960 * ndlp on the vport node list that matches the remote node ID from the
1961 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1962 * ignored and command IOCB released. The PLOGI response IOCB status is
1963 * checked for error conditions. If there is error status reported, PLOGI
1964 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1965 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1966 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1967 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1968 * there are additional N_Port nodes with the vport that need to perform
1969 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1970 * PLOGIs.
1971 **/
1972 static void
lpfc_cmpl_els_plogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1973 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1974 struct lpfc_iocbq *rspiocb)
1975 {
1976 struct lpfc_vport *vport = cmdiocb->vport;
1977 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1978 IOCB_t *irsp;
1979 struct lpfc_nodelist *ndlp, *free_ndlp;
1980 struct lpfc_dmabuf *prsp;
1981 int disc;
1982 struct serv_parm *sp = NULL;
1983 u32 ulp_status, ulp_word4, did, iotag;
1984 bool release_node = false;
1985
1986 /* we pass cmdiocb to state machine which needs rspiocb as well */
1987 cmdiocb->rsp_iocb = rspiocb;
1988
1989 ulp_status = get_job_ulpstatus(phba, rspiocb);
1990 ulp_word4 = get_job_word4(phba, rspiocb);
1991 did = get_job_els_rsp64_did(phba, cmdiocb);
1992
1993 if (phba->sli_rev == LPFC_SLI_REV4) {
1994 iotag = get_wqe_reqtag(cmdiocb);
1995 } else {
1996 irsp = &rspiocb->iocb;
1997 iotag = irsp->ulpIoTag;
1998 }
1999
2000 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2001 "PLOGI cmpl: status:x%x/x%x did:x%x",
2002 ulp_status, ulp_word4, did);
2003
2004 ndlp = lpfc_findnode_did(vport, did);
2005 if (!ndlp) {
2006 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2007 "0136 PLOGI completes to NPort x%x "
2008 "with no ndlp. Data: x%x x%x x%x\n",
2009 did, ulp_status, ulp_word4, iotag);
2010 goto out_freeiocb;
2011 }
2012
2013 /* Since ndlp can be freed in the disc state machine, note if this node
2014 * is being used during discovery.
2015 */
2016 spin_lock_irq(&ndlp->lock);
2017 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2018 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2019 spin_unlock_irq(&ndlp->lock);
2020
2021 /* PLOGI completes to NPort <nlp_DID> */
2022 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2023 "0102 PLOGI completes to NPort x%06x "
2024 "Data: x%x x%x x%x x%x x%x\n",
2025 ndlp->nlp_DID, ndlp->nlp_fc4_type,
2026 ulp_status, ulp_word4,
2027 disc, vport->num_disc_nodes);
2028
2029 /* Check to see if link went down during discovery */
2030 if (lpfc_els_chk_latt(vport)) {
2031 spin_lock_irq(&ndlp->lock);
2032 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2033 spin_unlock_irq(&ndlp->lock);
2034 goto out;
2035 }
2036
2037 if (ulp_status) {
2038 /* Check for retry */
2039 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2040 /* ELS command is being retried */
2041 if (disc) {
2042 spin_lock_irq(&ndlp->lock);
2043 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2044 spin_unlock_irq(&ndlp->lock);
2045 }
2046 goto out;
2047 }
2048 /* PLOGI failed Don't print the vport to vport rjts */
2049 if (ulp_status != IOSTAT_LS_RJT ||
2050 (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
2051 ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
2052 (phba)->pport->cfg_log_verbose & LOG_ELS)
2053 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2054 "2753 PLOGI failure DID:%06X "
2055 "Status:x%x/x%x\n",
2056 ndlp->nlp_DID, ulp_status,
2057 ulp_word4);
2058
2059 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2060 if (!lpfc_error_lost_link(ulp_status, ulp_word4))
2061 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2062 NLP_EVT_CMPL_PLOGI);
2063
2064 /* If a PLOGI collision occurred, the node needs to continue
2065 * with the reglogin process.
2066 */
2067 spin_lock_irq(&ndlp->lock);
2068 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
2069 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
2070 spin_unlock_irq(&ndlp->lock);
2071 goto out;
2072 }
2073
2074 /* No PLOGI collision and the node is not registered with the
2075 * scsi or nvme transport. It is no longer an active node. Just
2076 * start the device remove process.
2077 */
2078 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2079 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2080 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2081 release_node = true;
2082 }
2083 spin_unlock_irq(&ndlp->lock);
2084
2085 if (release_node)
2086 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2087 NLP_EVT_DEVICE_RM);
2088 } else {
2089 /* Good status, call state machine */
2090 prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
2091 struct lpfc_dmabuf, list);
2092 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2093
2094 sp = (struct serv_parm *)((u8 *)prsp->virt +
2095 sizeof(u32));
2096
2097 ndlp->vmid_support = 0;
2098 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
2099 (phba->cfg_vmid_priority_tagging &&
2100 sp->cmn.priority_tagging)) {
2101 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
2102 "4018 app_hdr_support %d tagging %d DID x%x\n",
2103 sp->cmn.app_hdr_support,
2104 sp->cmn.priority_tagging,
2105 ndlp->nlp_DID);
2106 /* if the dest port supports VMID, mark it in ndlp */
2107 ndlp->vmid_support = 1;
2108 }
2109
2110 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2111 NLP_EVT_CMPL_PLOGI);
2112 }
2113
2114 if (disc && vport->num_disc_nodes) {
2115 /* Check to see if there are more PLOGIs to be sent */
2116 lpfc_more_plogi(vport);
2117
2118 if (vport->num_disc_nodes == 0) {
2119 spin_lock_irq(shost->host_lock);
2120 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2121 spin_unlock_irq(shost->host_lock);
2122
2123 lpfc_can_disctmo(vport);
2124 lpfc_end_rscn(vport);
2125 }
2126 }
2127
2128 out:
2129 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2130 "PLOGI Cmpl PUT: did:x%x refcnt %d",
2131 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2132
2133 out_freeiocb:
2134 /* Release the reference on the original I/O request. */
2135 free_ndlp = cmdiocb->ndlp;
2136
2137 lpfc_els_free_iocb(phba, cmdiocb);
2138 lpfc_nlp_put(free_ndlp);
2139 return;
2140 }
2141
2142 /**
2143 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2144 * @vport: pointer to a host virtual N_Port data structure.
2145 * @did: destination port identifier.
2146 * @retry: number of retries to the command IOCB.
2147 *
2148 * This routine issues a Port Login (PLOGI) command to a remote N_Port
2149 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2150 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2151 * This routine constructs the proper fields of the PLOGI IOCB and invokes
2152 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2153 *
2154 * Note that the ndlp reference count will be incremented by 1 for holding
2155 * the ndlp and the reference to ndlp will be stored into the ndlp field
2156 * of the IOCB for the completion callback function to the PLOGI ELS command.
2157 *
2158 * Return code
2159 * 0 - Successfully issued a plogi for @vport
2160 * 1 - failed to issue a plogi for @vport
2161 **/
2162 int
lpfc_issue_els_plogi(struct lpfc_vport * vport,uint32_t did,uint8_t retry)2163 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2164 {
2165 struct lpfc_hba *phba = vport->phba;
2166 struct serv_parm *sp;
2167 struct lpfc_nodelist *ndlp;
2168 struct lpfc_iocbq *elsiocb;
2169 uint8_t *pcmd;
2170 uint16_t cmdsize;
2171 int ret;
2172
2173 ndlp = lpfc_findnode_did(vport, did);
2174 if (!ndlp)
2175 return 1;
2176
2177 /* Defer the processing of the issue PLOGI until after the
2178 * outstanding UNREG_RPI mbox command completes, unless we
2179 * are going offline. This logic does not apply for Fabric DIDs
2180 */
2181 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2182 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2183 !(vport->fc_flag & FC_OFFLINE_MODE)) {
2184 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2185 "4110 Issue PLOGI x%x deferred "
2186 "on NPort x%x rpi x%x Data: x%px\n",
2187 ndlp->nlp_defer_did, ndlp->nlp_DID,
2188 ndlp->nlp_rpi, ndlp);
2189
2190 /* We can only defer 1st PLOGI */
2191 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2192 ndlp->nlp_defer_did = did;
2193 return 0;
2194 }
2195
2196 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2197 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2198 ELS_CMD_PLOGI);
2199 if (!elsiocb)
2200 return 1;
2201
2202 spin_lock_irq(&ndlp->lock);
2203 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
2204 spin_unlock_irq(&ndlp->lock);
2205
2206 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2207
2208 /* For PLOGI request, remainder of payload is service parameters */
2209 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2210 pcmd += sizeof(uint32_t);
2211 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2212 sp = (struct serv_parm *) pcmd;
2213
2214 /*
2215 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2216 * to device on remote loops work.
2217 */
2218 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2219 sp->cmn.altBbCredit = 1;
2220
2221 if (sp->cmn.fcphLow < FC_PH_4_3)
2222 sp->cmn.fcphLow = FC_PH_4_3;
2223
2224 if (sp->cmn.fcphHigh < FC_PH3)
2225 sp->cmn.fcphHigh = FC_PH3;
2226
2227 sp->cmn.valid_vendor_ver_level = 0;
2228 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2229 sp->cmn.bbRcvSizeMsb &= 0xF;
2230
2231 /* Check if the destination port supports VMID */
2232 ndlp->vmid_support = 0;
2233 if (vport->vmid_priority_tagging)
2234 sp->cmn.priority_tagging = 1;
2235 else if (phba->cfg_vmid_app_header &&
2236 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
2237 sp->cmn.app_hdr_support = 1;
2238
2239 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2240 "Issue PLOGI: did:x%x",
2241 did, 0, 0);
2242
2243 /* If our firmware supports this feature, convey that
2244 * information to the target using the vendor specific field.
2245 */
2246 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2247 sp->cmn.valid_vendor_ver_level = 1;
2248 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2249 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2250 }
2251
2252 phba->fc_stat.elsXmitPLOGI++;
2253 elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
2254
2255 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2256 "Issue PLOGI: did:x%x refcnt %d",
2257 did, kref_read(&ndlp->kref), 0);
2258 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2259 if (!elsiocb->ndlp) {
2260 lpfc_els_free_iocb(phba, elsiocb);
2261 return 1;
2262 }
2263
2264 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2265 if (ret) {
2266 lpfc_els_free_iocb(phba, elsiocb);
2267 lpfc_nlp_put(ndlp);
2268 return 1;
2269 }
2270
2271 return 0;
2272 }
2273
2274 /**
2275 * lpfc_cmpl_els_prli - Completion callback function for prli
2276 * @phba: pointer to lpfc hba data structure.
2277 * @cmdiocb: pointer to lpfc command iocb data structure.
2278 * @rspiocb: pointer to lpfc response iocb data structure.
2279 *
2280 * This routine is the completion callback function for a Process Login
2281 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2282 * status. If there is error status reported, PRLI retry shall be attempted
2283 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2284 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2285 * ndlp to mark the PRLI completion.
2286 **/
2287 static void
lpfc_cmpl_els_prli(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2288 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2289 struct lpfc_iocbq *rspiocb)
2290 {
2291 struct lpfc_vport *vport = cmdiocb->vport;
2292 struct lpfc_nodelist *ndlp;
2293 char *mode;
2294 u32 loglevel;
2295 u32 ulp_status;
2296 u32 ulp_word4;
2297 bool release_node = false;
2298
2299 /* we pass cmdiocb to state machine which needs rspiocb as well */
2300 cmdiocb->rsp_iocb = rspiocb;
2301
2302 ndlp = cmdiocb->ndlp;
2303
2304 ulp_status = get_job_ulpstatus(phba, rspiocb);
2305 ulp_word4 = get_job_word4(phba, rspiocb);
2306
2307 spin_lock_irq(&ndlp->lock);
2308 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2309
2310 /* Driver supports multiple FC4 types. Counters matter. */
2311 vport->fc_prli_sent--;
2312 ndlp->fc4_prli_sent--;
2313 spin_unlock_irq(&ndlp->lock);
2314
2315 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2316 "PRLI cmpl: status:x%x/x%x did:x%x",
2317 ulp_status, ulp_word4,
2318 ndlp->nlp_DID);
2319
2320 /* PRLI completes to NPort <nlp_DID> */
2321 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2322 "0103 PRLI completes to NPort x%06x "
2323 "Data: x%x x%x x%x x%x\n",
2324 ndlp->nlp_DID, ulp_status, ulp_word4,
2325 vport->num_disc_nodes, ndlp->fc4_prli_sent);
2326
2327 /* Check to see if link went down during discovery */
2328 if (lpfc_els_chk_latt(vport))
2329 goto out;
2330
2331 if (ulp_status) {
2332 /* Check for retry */
2333 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2334 /* ELS command is being retried */
2335 goto out;
2336 }
2337
2338 /* If we don't send GFT_ID to Fabric, a PRLI error
2339 * could be expected.
2340 */
2341 if ((vport->fc_flag & FC_FABRIC) ||
2342 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
2343 mode = KERN_ERR;
2344 loglevel = LOG_TRACE_EVENT;
2345 } else {
2346 mode = KERN_INFO;
2347 loglevel = LOG_ELS;
2348 }
2349
2350 /* PRLI failed */
2351 lpfc_printf_vlog(vport, mode, loglevel,
2352 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2353 "data: x%x\n",
2354 ndlp->nlp_DID, ulp_status,
2355 ulp_word4, ndlp->fc4_prli_sent);
2356
2357 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2358 if (!lpfc_error_lost_link(ulp_status, ulp_word4))
2359 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2360 NLP_EVT_CMPL_PRLI);
2361
2362 /*
2363 * For P2P topology, retain the node so that PLOGI can be
2364 * attempted on it again.
2365 */
2366 if (vport->fc_flag & FC_PT2PT)
2367 goto out;
2368
2369 /* As long as this node is not registered with the SCSI
2370 * or NVMe transport and no other PRLIs are outstanding,
2371 * it is no longer an active node. Otherwise devloss
2372 * handles the final cleanup.
2373 */
2374 spin_lock_irq(&ndlp->lock);
2375 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
2376 !ndlp->fc4_prli_sent) {
2377 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2378 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2379 release_node = true;
2380 }
2381 spin_unlock_irq(&ndlp->lock);
2382
2383 if (release_node)
2384 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2385 NLP_EVT_DEVICE_RM);
2386 } else {
2387 /* Good status, call state machine. However, if another
2388 * PRLI is outstanding, don't call the state machine
2389 * because final disposition to Mapped or Unmapped is
2390 * completed there.
2391 */
2392 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2393 NLP_EVT_CMPL_PRLI);
2394 }
2395
2396 out:
2397 lpfc_els_free_iocb(phba, cmdiocb);
2398 lpfc_nlp_put(ndlp);
2399 return;
2400 }
2401
2402 /**
2403 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2404 * @vport: pointer to a host virtual N_Port data structure.
2405 * @ndlp: pointer to a node-list data structure.
2406 * @retry: number of retries to the command IOCB.
2407 *
2408 * This routine issues a Process Login (PRLI) ELS command for the
2409 * @vport. The PRLI service parameters are set up in the payload of the
2410 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2411 * is put to the IOCB completion callback func field before invoking the
2412 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2413 *
2414 * Note that the ndlp reference count will be incremented by 1 for holding the
2415 * ndlp and the reference to ndlp will be stored into the ndlp field of
2416 * the IOCB for the completion callback function to the PRLI ELS command.
2417 *
2418 * Return code
2419 * 0 - successfully issued prli iocb command for @vport
2420 * 1 - failed to issue prli iocb command for @vport
2421 **/
2422 int
lpfc_issue_els_prli(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2423 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2424 uint8_t retry)
2425 {
2426 int rc = 0;
2427 struct lpfc_hba *phba = vport->phba;
2428 PRLI *npr;
2429 struct lpfc_nvme_prli *npr_nvme;
2430 struct lpfc_iocbq *elsiocb;
2431 uint8_t *pcmd;
2432 uint16_t cmdsize;
2433 u32 local_nlp_type, elscmd;
2434
2435 /*
2436 * If we are in RSCN mode, the FC4 types supported from a
2437 * previous GFT_ID command may not be accurate. So, if we
2438 * are a NVME Initiator, always look for the possibility of
2439 * the remote NPort beng a NVME Target.
2440 */
2441 if (phba->sli_rev == LPFC_SLI_REV4 &&
2442 vport->fc_flag & FC_RSCN_MODE &&
2443 vport->nvmei_support)
2444 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2445 local_nlp_type = ndlp->nlp_fc4_type;
2446
2447 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2448 * fields here before any of them can complete.
2449 */
2450 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2451 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2452 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2453 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2454 ndlp->nvme_fb_size = 0;
2455
2456 send_next_prli:
2457 if (local_nlp_type & NLP_FC4_FCP) {
2458 /* Payload is 4 + 16 = 20 x14 bytes. */
2459 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2460 elscmd = ELS_CMD_PRLI;
2461 } else if (local_nlp_type & NLP_FC4_NVME) {
2462 /* Payload is 4 + 20 = 24 x18 bytes. */
2463 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2464 elscmd = ELS_CMD_NVMEPRLI;
2465 } else {
2466 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2467 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2468 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2469 return 1;
2470 }
2471
2472 /* SLI3 ports don't support NVME. If this rport is a strict NVME
2473 * FC4 type, implicitly LOGO.
2474 */
2475 if (phba->sli_rev == LPFC_SLI_REV3 &&
2476 ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2477 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2478 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2479 ndlp->nlp_type);
2480 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2481 return 1;
2482 }
2483
2484 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2485 ndlp->nlp_DID, elscmd);
2486 if (!elsiocb)
2487 return 1;
2488
2489 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2490
2491 /* For PRLI request, remainder of payload is service parameters */
2492 memset(pcmd, 0, cmdsize);
2493
2494 if (local_nlp_type & NLP_FC4_FCP) {
2495 /* Remainder of payload is FCP PRLI parameter page.
2496 * Note: this data structure is defined as
2497 * BE/LE in the structure definition so no
2498 * byte swap call is made.
2499 */
2500 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2501 pcmd += sizeof(uint32_t);
2502 npr = (PRLI *)pcmd;
2503
2504 /*
2505 * If our firmware version is 3.20 or later,
2506 * set the following bits for FC-TAPE support.
2507 */
2508 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2509 npr->ConfmComplAllowed = 1;
2510 npr->Retry = 1;
2511 npr->TaskRetryIdReq = 1;
2512 }
2513 npr->estabImagePair = 1;
2514 npr->readXferRdyDis = 1;
2515 if (vport->cfg_first_burst_size)
2516 npr->writeXferRdyDis = 1;
2517
2518 /* For FCP support */
2519 npr->prliType = PRLI_FCP_TYPE;
2520 npr->initiatorFunc = 1;
2521 elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
2522
2523 /* Remove FCP type - processed. */
2524 local_nlp_type &= ~NLP_FC4_FCP;
2525 } else if (local_nlp_type & NLP_FC4_NVME) {
2526 /* Remainder of payload is NVME PRLI parameter page.
2527 * This data structure is the newer definition that
2528 * uses bf macros so a byte swap is required.
2529 */
2530 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2531 pcmd += sizeof(uint32_t);
2532 npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2533 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2534 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
2535 if (phba->nsler) {
2536 bf_set(prli_nsler, npr_nvme, 1);
2537 bf_set(prli_conf, npr_nvme, 1);
2538 }
2539
2540 /* Only initiators request first burst. */
2541 if ((phba->cfg_nvme_enable_fb) &&
2542 !phba->nvmet_support)
2543 bf_set(prli_fba, npr_nvme, 1);
2544
2545 if (phba->nvmet_support) {
2546 bf_set(prli_tgt, npr_nvme, 1);
2547 bf_set(prli_disc, npr_nvme, 1);
2548 } else {
2549 bf_set(prli_init, npr_nvme, 1);
2550 bf_set(prli_conf, npr_nvme, 1);
2551 }
2552
2553 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2554 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2555 elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
2556
2557 /* Remove NVME type - processed. */
2558 local_nlp_type &= ~NLP_FC4_NVME;
2559 }
2560
2561 phba->fc_stat.elsXmitPRLI++;
2562 elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
2563
2564 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2565 "Issue PRLI: did:x%x refcnt %d",
2566 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2567 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2568 if (!elsiocb->ndlp) {
2569 lpfc_els_free_iocb(phba, elsiocb);
2570 return 1;
2571 }
2572
2573 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2574 if (rc == IOCB_ERROR) {
2575 lpfc_els_free_iocb(phba, elsiocb);
2576 lpfc_nlp_put(ndlp);
2577 return 1;
2578 }
2579
2580 /* The vport counters are used for lpfc_scan_finished, but
2581 * the ndlp is used to track outstanding PRLIs for different
2582 * FC4 types.
2583 */
2584 spin_lock_irq(&ndlp->lock);
2585 ndlp->nlp_flag |= NLP_PRLI_SND;
2586 vport->fc_prli_sent++;
2587 ndlp->fc4_prli_sent++;
2588 spin_unlock_irq(&ndlp->lock);
2589
2590 /* The driver supports 2 FC4 types. Make sure
2591 * a PRLI is issued for all types before exiting.
2592 */
2593 if (phba->sli_rev == LPFC_SLI_REV4 &&
2594 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2595 goto send_next_prli;
2596 else
2597 return 0;
2598 }
2599
2600 /**
2601 * lpfc_rscn_disc - Perform rscn discovery for a vport
2602 * @vport: pointer to a host virtual N_Port data structure.
2603 *
2604 * This routine performs Registration State Change Notification (RSCN)
2605 * discovery for a @vport. If the @vport's node port recovery count is not
2606 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2607 * the nodes that need recovery. If none of the PLOGI were needed through
2608 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2609 * invoked to check and handle possible more RSCN came in during the period
2610 * of processing the current ones.
2611 **/
2612 static void
lpfc_rscn_disc(struct lpfc_vport * vport)2613 lpfc_rscn_disc(struct lpfc_vport *vport)
2614 {
2615 lpfc_can_disctmo(vport);
2616
2617 /* RSCN discovery */
2618 /* go thru NPR nodes and issue ELS PLOGIs */
2619 if (vport->fc_npr_cnt)
2620 if (lpfc_els_disc_plogi(vport))
2621 return;
2622
2623 lpfc_end_rscn(vport);
2624 }
2625
2626 /**
2627 * lpfc_adisc_done - Complete the adisc phase of discovery
2628 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2629 *
2630 * This function is called when the final ADISC is completed during discovery.
2631 * This function handles clearing link attention or issuing reg_vpi depending
2632 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2633 * discovery.
2634 * This function is called with no locks held.
2635 **/
2636 static void
lpfc_adisc_done(struct lpfc_vport * vport)2637 lpfc_adisc_done(struct lpfc_vport *vport)
2638 {
2639 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2640 struct lpfc_hba *phba = vport->phba;
2641
2642 /*
2643 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2644 * and continue discovery.
2645 */
2646 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2647 !(vport->fc_flag & FC_RSCN_MODE) &&
2648 (phba->sli_rev < LPFC_SLI_REV4)) {
2649
2650 /*
2651 * If link is down, clear_la and reg_vpi will be done after
2652 * flogi following a link up event
2653 */
2654 if (!lpfc_is_link_up(phba))
2655 return;
2656
2657 /* The ADISCs are complete. Doesn't matter if they
2658 * succeeded or failed because the ADISC completion
2659 * routine guarantees to call the state machine and
2660 * the RPI is either unregistered (failed ADISC response)
2661 * or the RPI is still valid and the node is marked
2662 * mapped for a target. The exchanges should be in the
2663 * correct state. This code is specific to SLI3.
2664 */
2665 lpfc_issue_clear_la(phba, vport);
2666 lpfc_issue_reg_vpi(phba, vport);
2667 return;
2668 }
2669 /*
2670 * For SLI2, we need to set port_state to READY
2671 * and continue discovery.
2672 */
2673 if (vport->port_state < LPFC_VPORT_READY) {
2674 /* If we get here, there is nothing to ADISC */
2675 lpfc_issue_clear_la(phba, vport);
2676 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2677 vport->num_disc_nodes = 0;
2678 /* go thru NPR list, issue ELS PLOGIs */
2679 if (vport->fc_npr_cnt)
2680 lpfc_els_disc_plogi(vport);
2681 if (!vport->num_disc_nodes) {
2682 spin_lock_irq(shost->host_lock);
2683 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2684 spin_unlock_irq(shost->host_lock);
2685 lpfc_can_disctmo(vport);
2686 lpfc_end_rscn(vport);
2687 }
2688 }
2689 vport->port_state = LPFC_VPORT_READY;
2690 } else
2691 lpfc_rscn_disc(vport);
2692 }
2693
2694 /**
2695 * lpfc_more_adisc - Issue more adisc as needed
2696 * @vport: pointer to a host virtual N_Port data structure.
2697 *
2698 * This routine determines whether there are more ndlps on a @vport
2699 * node list need to have Address Discover (ADISC) issued. If so, it will
2700 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2701 * remaining nodes which need to have ADISC sent.
2702 **/
2703 void
lpfc_more_adisc(struct lpfc_vport * vport)2704 lpfc_more_adisc(struct lpfc_vport *vport)
2705 {
2706 if (vport->num_disc_nodes)
2707 vport->num_disc_nodes--;
2708 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2709 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2710 "0210 Continue discovery with %d ADISCs to go "
2711 "Data: x%x x%x x%x\n",
2712 vport->num_disc_nodes, vport->fc_adisc_cnt,
2713 vport->fc_flag, vport->port_state);
2714 /* Check to see if there are more ADISCs to be sent */
2715 if (vport->fc_flag & FC_NLP_MORE) {
2716 lpfc_set_disctmo(vport);
2717 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2718 lpfc_els_disc_adisc(vport);
2719 }
2720 if (!vport->num_disc_nodes)
2721 lpfc_adisc_done(vport);
2722 return;
2723 }
2724
2725 /**
2726 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2727 * @phba: pointer to lpfc hba data structure.
2728 * @cmdiocb: pointer to lpfc command iocb data structure.
2729 * @rspiocb: pointer to lpfc response iocb data structure.
2730 *
2731 * This routine is the completion function for issuing the Address Discover
2732 * (ADISC) command. It first checks to see whether link went down during
2733 * the discovery process. If so, the node will be marked as node port
2734 * recovery for issuing discover IOCB by the link attention handler and
2735 * exit. Otherwise, the response status is checked. If error was reported
2736 * in the response status, the ADISC command shall be retried by invoking
2737 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2738 * the response status, the state machine is invoked to set transition
2739 * with respect to NLP_EVT_CMPL_ADISC event.
2740 **/
2741 static void
lpfc_cmpl_els_adisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2742 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2743 struct lpfc_iocbq *rspiocb)
2744 {
2745 struct lpfc_vport *vport = cmdiocb->vport;
2746 IOCB_t *irsp;
2747 struct lpfc_nodelist *ndlp;
2748 int disc;
2749 u32 ulp_status, ulp_word4, tmo;
2750 bool release_node = false;
2751
2752 /* we pass cmdiocb to state machine which needs rspiocb as well */
2753 cmdiocb->rsp_iocb = rspiocb;
2754
2755 ndlp = cmdiocb->ndlp;
2756
2757 ulp_status = get_job_ulpstatus(phba, rspiocb);
2758 ulp_word4 = get_job_word4(phba, rspiocb);
2759
2760 if (phba->sli_rev == LPFC_SLI_REV4) {
2761 tmo = get_wqe_tmo(cmdiocb);
2762 } else {
2763 irsp = &rspiocb->iocb;
2764 tmo = irsp->ulpTimeout;
2765 }
2766
2767 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2768 "ADISC cmpl: status:x%x/x%x did:x%x",
2769 ulp_status, ulp_word4,
2770 ndlp->nlp_DID);
2771
2772 /* Since ndlp can be freed in the disc state machine, note if this node
2773 * is being used during discovery.
2774 */
2775 spin_lock_irq(&ndlp->lock);
2776 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2777 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2778 spin_unlock_irq(&ndlp->lock);
2779 /* ADISC completes to NPort <nlp_DID> */
2780 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2781 "0104 ADISC completes to NPort x%x "
2782 "Data: x%x x%x x%x x%x x%x\n",
2783 ndlp->nlp_DID, ulp_status, ulp_word4,
2784 tmo, disc, vport->num_disc_nodes);
2785 /* Check to see if link went down during discovery */
2786 if (lpfc_els_chk_latt(vport)) {
2787 spin_lock_irq(&ndlp->lock);
2788 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2789 spin_unlock_irq(&ndlp->lock);
2790 goto out;
2791 }
2792
2793 if (ulp_status) {
2794 /* Check for retry */
2795 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2796 /* ELS command is being retried */
2797 if (disc) {
2798 spin_lock_irq(&ndlp->lock);
2799 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2800 spin_unlock_irq(&ndlp->lock);
2801 lpfc_set_disctmo(vport);
2802 }
2803 goto out;
2804 }
2805 /* ADISC failed */
2806 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2807 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2808 ndlp->nlp_DID, ulp_status,
2809 ulp_word4);
2810 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2811 NLP_EVT_CMPL_ADISC);
2812
2813 /* As long as this node is not registered with the SCSI or NVMe
2814 * transport, it is no longer an active node. Otherwise
2815 * devloss handles the final cleanup.
2816 */
2817 spin_lock_irq(&ndlp->lock);
2818 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2819 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2820 if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
2821 release_node = true;
2822 }
2823 spin_unlock_irq(&ndlp->lock);
2824
2825 if (release_node)
2826 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2827 NLP_EVT_DEVICE_RM);
2828 } else
2829 /* Good status, call state machine */
2830 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2831 NLP_EVT_CMPL_ADISC);
2832
2833 /* Check to see if there are more ADISCs to be sent */
2834 if (disc && vport->num_disc_nodes)
2835 lpfc_more_adisc(vport);
2836 out:
2837 lpfc_els_free_iocb(phba, cmdiocb);
2838 lpfc_nlp_put(ndlp);
2839 return;
2840 }
2841
2842 /**
2843 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2844 * @vport: pointer to a virtual N_Port data structure.
2845 * @ndlp: pointer to a node-list data structure.
2846 * @retry: number of retries to the command IOCB.
2847 *
2848 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2849 * @vport. It prepares the payload of the ADISC ELS command, updates the
2850 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2851 * to issue the ADISC ELS command.
2852 *
2853 * Note that the ndlp reference count will be incremented by 1 for holding the
2854 * ndlp and the reference to ndlp will be stored into the ndlp field of
2855 * the IOCB for the completion callback function to the ADISC ELS command.
2856 *
2857 * Return code
2858 * 0 - successfully issued adisc
2859 * 1 - failed to issue adisc
2860 **/
2861 int
lpfc_issue_els_adisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2862 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2863 uint8_t retry)
2864 {
2865 int rc = 0;
2866 struct lpfc_hba *phba = vport->phba;
2867 ADISC *ap;
2868 struct lpfc_iocbq *elsiocb;
2869 uint8_t *pcmd;
2870 uint16_t cmdsize;
2871
2872 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2873 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2874 ndlp->nlp_DID, ELS_CMD_ADISC);
2875 if (!elsiocb)
2876 return 1;
2877
2878 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
2879
2880 /* For ADISC request, remainder of payload is service parameters */
2881 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2882 pcmd += sizeof(uint32_t);
2883
2884 /* Fill in ADISC payload */
2885 ap = (ADISC *) pcmd;
2886 ap->hardAL_PA = phba->fc_pref_ALPA;
2887 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2888 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2889 ap->DID = be32_to_cpu(vport->fc_myDID);
2890
2891 phba->fc_stat.elsXmitADISC++;
2892 elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
2893 spin_lock_irq(&ndlp->lock);
2894 ndlp->nlp_flag |= NLP_ADISC_SND;
2895 spin_unlock_irq(&ndlp->lock);
2896 elsiocb->ndlp = lpfc_nlp_get(ndlp);
2897 if (!elsiocb->ndlp) {
2898 lpfc_els_free_iocb(phba, elsiocb);
2899 goto err;
2900 }
2901
2902 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2903 "Issue ADISC: did:x%x refcnt %d",
2904 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2905
2906 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2907 if (rc == IOCB_ERROR) {
2908 lpfc_els_free_iocb(phba, elsiocb);
2909 lpfc_nlp_put(ndlp);
2910 goto err;
2911 }
2912
2913 return 0;
2914
2915 err:
2916 spin_lock_irq(&ndlp->lock);
2917 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2918 spin_unlock_irq(&ndlp->lock);
2919 return 1;
2920 }
2921
2922 /**
2923 * lpfc_cmpl_els_logo - Completion callback function for logo
2924 * @phba: pointer to lpfc hba data structure.
2925 * @cmdiocb: pointer to lpfc command iocb data structure.
2926 * @rspiocb: pointer to lpfc response iocb data structure.
2927 *
2928 * This routine is the completion function for issuing the ELS Logout (LOGO)
2929 * command. If no error status was reported from the LOGO response, the
2930 * state machine of the associated ndlp shall be invoked for transition with
2931 * respect to NLP_EVT_CMPL_LOGO event.
2932 **/
2933 static void
lpfc_cmpl_els_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2934 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2935 struct lpfc_iocbq *rspiocb)
2936 {
2937 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
2938 struct lpfc_vport *vport = ndlp->vport;
2939 IOCB_t *irsp;
2940 unsigned long flags;
2941 uint32_t skip_recovery = 0;
2942 int wake_up_waiter = 0;
2943 u32 ulp_status;
2944 u32 ulp_word4;
2945 u32 tmo;
2946
2947 /* we pass cmdiocb to state machine which needs rspiocb as well */
2948 cmdiocb->rsp_iocb = rspiocb;
2949
2950 ulp_status = get_job_ulpstatus(phba, rspiocb);
2951 ulp_word4 = get_job_word4(phba, rspiocb);
2952
2953 if (phba->sli_rev == LPFC_SLI_REV4) {
2954 tmo = get_wqe_tmo(cmdiocb);
2955 } else {
2956 irsp = &rspiocb->iocb;
2957 tmo = irsp->ulpTimeout;
2958 }
2959
2960 spin_lock_irq(&ndlp->lock);
2961 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2962 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
2963 wake_up_waiter = 1;
2964 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
2965 }
2966 spin_unlock_irq(&ndlp->lock);
2967
2968 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2969 "LOGO cmpl: status:x%x/x%x did:x%x",
2970 ulp_status, ulp_word4,
2971 ndlp->nlp_DID);
2972
2973 /* LOGO completes to NPort <nlp_DID> */
2974 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2975 "0105 LOGO completes to NPort x%x "
2976 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
2977 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
2978 ulp_status, ulp_word4,
2979 tmo, vport->num_disc_nodes);
2980
2981 if (lpfc_els_chk_latt(vport)) {
2982 skip_recovery = 1;
2983 goto out;
2984 }
2985
2986 /* The LOGO will not be retried on failure. A LOGO was
2987 * issued to the remote rport and a ACC or RJT or no Answer are
2988 * all acceptable. Note the failure and move forward with
2989 * discovery. The PLOGI will retry.
2990 */
2991 if (ulp_status) {
2992 /* LOGO failed */
2993 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2994 "2756 LOGO failure, No Retry DID:%06X "
2995 "Status:x%x/x%x\n",
2996 ndlp->nlp_DID, ulp_status,
2997 ulp_word4);
2998
2999 if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
3000 skip_recovery = 1;
3001 goto out;
3002 }
3003 }
3004
3005 /* Call state machine. This will unregister the rpi if needed. */
3006 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
3007
3008 /* The driver sets this flag for an NPIV instance that doesn't want to
3009 * log into the remote port.
3010 */
3011 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
3012 spin_lock_irq(&ndlp->lock);
3013 if (phba->sli_rev == LPFC_SLI_REV4)
3014 ndlp->nlp_flag |= NLP_RELEASE_RPI;
3015 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3016 spin_unlock_irq(&ndlp->lock);
3017 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3018 NLP_EVT_DEVICE_RM);
3019 goto out_rsrc_free;
3020 }
3021
3022 out:
3023 /* At this point, the LOGO processing is complete. NOTE: For a
3024 * pt2pt topology, we are assuming the NPortID will only change
3025 * on link up processing. For a LOGO / PLOGI initiated by the
3026 * Initiator, we are assuming the NPortID is not going to change.
3027 */
3028
3029 if (wake_up_waiter && ndlp->logo_waitq)
3030 wake_up(ndlp->logo_waitq);
3031 /*
3032 * If the node is a target, the handling attempts to recover the port.
3033 * For any other port type, the rpi is unregistered as an implicit
3034 * LOGO.
3035 */
3036 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
3037 skip_recovery == 0) {
3038 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3039 spin_lock_irqsave(&ndlp->lock, flags);
3040 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
3041 spin_unlock_irqrestore(&ndlp->lock, flags);
3042
3043 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3044 "3187 LOGO completes to NPort x%x: Start "
3045 "Recovery Data: x%x x%x x%x x%x\n",
3046 ndlp->nlp_DID, ulp_status,
3047 ulp_word4, tmo,
3048 vport->num_disc_nodes);
3049
3050 lpfc_els_free_iocb(phba, cmdiocb);
3051 lpfc_nlp_put(ndlp);
3052
3053 lpfc_disc_start(vport);
3054 return;
3055 }
3056
3057 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
3058 * driver sends a LOGO to the rport to cleanup. For fabric and
3059 * initiator ports cleanup the node as long as it the node is not
3060 * register with the transport.
3061 */
3062 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
3063 spin_lock_irq(&ndlp->lock);
3064 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3065 spin_unlock_irq(&ndlp->lock);
3066 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3067 NLP_EVT_DEVICE_RM);
3068 }
3069 out_rsrc_free:
3070 /* Driver is done with the I/O. */
3071 lpfc_els_free_iocb(phba, cmdiocb);
3072 lpfc_nlp_put(ndlp);
3073 }
3074
3075 /**
3076 * lpfc_issue_els_logo - Issue a logo to an node on a vport
3077 * @vport: pointer to a virtual N_Port data structure.
3078 * @ndlp: pointer to a node-list data structure.
3079 * @retry: number of retries to the command IOCB.
3080 *
3081 * This routine constructs and issues an ELS Logout (LOGO) iocb command
3082 * to a remote node, referred by an @ndlp on a @vport. It constructs the
3083 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
3084 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
3085 *
3086 * Note that the ndlp reference count will be incremented by 1 for holding the
3087 * ndlp and the reference to ndlp will be stored into the ndlp field of
3088 * the IOCB for the completion callback function to the LOGO ELS command.
3089 *
3090 * Callers of this routine are expected to unregister the RPI first
3091 *
3092 * Return code
3093 * 0 - successfully issued logo
3094 * 1 - failed to issue logo
3095 **/
3096 int
lpfc_issue_els_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)3097 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3098 uint8_t retry)
3099 {
3100 struct lpfc_hba *phba = vport->phba;
3101 struct lpfc_iocbq *elsiocb;
3102 uint8_t *pcmd;
3103 uint16_t cmdsize;
3104 int rc;
3105
3106 spin_lock_irq(&ndlp->lock);
3107 if (ndlp->nlp_flag & NLP_LOGO_SND) {
3108 spin_unlock_irq(&ndlp->lock);
3109 return 0;
3110 }
3111 spin_unlock_irq(&ndlp->lock);
3112
3113 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
3114 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3115 ndlp->nlp_DID, ELS_CMD_LOGO);
3116 if (!elsiocb)
3117 return 1;
3118
3119 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3120 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
3121 pcmd += sizeof(uint32_t);
3122
3123 /* Fill in LOGO payload */
3124 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
3125 pcmd += sizeof(uint32_t);
3126 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
3127
3128 phba->fc_stat.elsXmitLOGO++;
3129 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
3130 spin_lock_irq(&ndlp->lock);
3131 ndlp->nlp_flag |= NLP_LOGO_SND;
3132 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
3133 spin_unlock_irq(&ndlp->lock);
3134 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3135 if (!elsiocb->ndlp) {
3136 lpfc_els_free_iocb(phba, elsiocb);
3137 goto err;
3138 }
3139
3140 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3141 "Issue LOGO: did:x%x refcnt %d",
3142 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3143
3144 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3145 if (rc == IOCB_ERROR) {
3146 lpfc_els_free_iocb(phba, elsiocb);
3147 lpfc_nlp_put(ndlp);
3148 goto err;
3149 }
3150
3151 spin_lock_irq(&ndlp->lock);
3152 ndlp->nlp_prev_state = ndlp->nlp_state;
3153 spin_unlock_irq(&ndlp->lock);
3154 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3155 return 0;
3156
3157 err:
3158 spin_lock_irq(&ndlp->lock);
3159 ndlp->nlp_flag &= ~NLP_LOGO_SND;
3160 spin_unlock_irq(&ndlp->lock);
3161 return 1;
3162 }
3163
3164 /**
3165 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
3166 * @phba: pointer to lpfc hba data structure.
3167 * @cmdiocb: pointer to lpfc command iocb data structure.
3168 * @rspiocb: pointer to lpfc response iocb data structure.
3169 *
3170 * This routine is a generic completion callback function for ELS commands.
3171 * Specifically, it is the callback function which does not need to perform
3172 * any command specific operations. It is currently used by the ELS command
3173 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
3174 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
3175 * Other than certain debug loggings, this callback function simply invokes the
3176 * lpfc_els_chk_latt() routine to check whether link went down during the
3177 * discovery process.
3178 **/
3179 static void
lpfc_cmpl_els_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3180 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3181 struct lpfc_iocbq *rspiocb)
3182 {
3183 struct lpfc_vport *vport = cmdiocb->vport;
3184 struct lpfc_nodelist *free_ndlp;
3185 IOCB_t *irsp;
3186 u32 ulp_status, ulp_word4, tmo, did, iotag;
3187
3188 ulp_status = get_job_ulpstatus(phba, rspiocb);
3189 ulp_word4 = get_job_word4(phba, rspiocb);
3190 did = get_job_els_rsp64_did(phba, cmdiocb);
3191
3192 if (phba->sli_rev == LPFC_SLI_REV4) {
3193 tmo = get_wqe_tmo(cmdiocb);
3194 iotag = get_wqe_reqtag(cmdiocb);
3195 } else {
3196 irsp = &rspiocb->iocb;
3197 tmo = irsp->ulpTimeout;
3198 iotag = irsp->ulpIoTag;
3199 }
3200
3201 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3202 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3203 ulp_status, ulp_word4, did);
3204
3205 /* ELS cmd tag <ulpIoTag> completes */
3206 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3207 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
3208 iotag, ulp_status, ulp_word4, tmo);
3209
3210 /* Check to see if link went down during discovery */
3211 lpfc_els_chk_latt(vport);
3212
3213 free_ndlp = cmdiocb->ndlp;
3214
3215 lpfc_els_free_iocb(phba, cmdiocb);
3216 lpfc_nlp_put(free_ndlp);
3217 }
3218
3219 /**
3220 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
3221 * @vport: pointer to lpfc_vport data structure.
3222 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
3223 *
3224 * This routine registers the rpi assigned to the fabric controller
3225 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
3226 * state triggering a registration with the SCSI transport.
3227 *
3228 * This routine is single out because the fabric controller node
3229 * does not receive a PLOGI. This routine is consumed by the
3230 * SCR and RDF ELS commands. Callers are expected to qualify
3231 * with SLI4 first.
3232 **/
3233 static int
lpfc_reg_fab_ctrl_node(struct lpfc_vport * vport,struct lpfc_nodelist * fc_ndlp)3234 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
3235 {
3236 int rc = 0;
3237 struct lpfc_hba *phba = vport->phba;
3238 struct lpfc_nodelist *ns_ndlp;
3239 LPFC_MBOXQ_t *mbox;
3240
3241 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
3242 return rc;
3243
3244 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
3245 if (!ns_ndlp)
3246 return -ENODEV;
3247
3248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3249 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
3250 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
3251 ns_ndlp->nlp_state);
3252 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3253 return -ENODEV;
3254
3255 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3256 if (!mbox) {
3257 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3258 "0936 %s: no memory for reg_login "
3259 "Data: x%x x%x x%x x%x\n", __func__,
3260 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3261 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3262 return -ENOMEM;
3263 }
3264 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
3265 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
3266 if (rc) {
3267 rc = -EACCES;
3268 goto out;
3269 }
3270
3271 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
3272 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
3273 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
3274 if (!mbox->ctx_ndlp) {
3275 rc = -ENOMEM;
3276 goto out;
3277 }
3278
3279 mbox->vport = vport;
3280 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3281 if (rc == MBX_NOT_FINISHED) {
3282 rc = -ENODEV;
3283 lpfc_nlp_put(fc_ndlp);
3284 goto out;
3285 }
3286 /* Success path. Exit. */
3287 lpfc_nlp_set_state(vport, fc_ndlp,
3288 NLP_STE_REG_LOGIN_ISSUE);
3289 return 0;
3290
3291 out:
3292 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
3293 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3294 "0938 %s: failed to format reg_login "
3295 "Data: x%x x%x x%x x%x\n", __func__,
3296 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3297 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3298 return rc;
3299 }
3300
3301 /**
3302 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
3303 * @phba: pointer to lpfc hba data structure.
3304 * @cmdiocb: pointer to lpfc command iocb data structure.
3305 * @rspiocb: pointer to lpfc response iocb data structure.
3306 *
3307 * This routine is a generic completion callback function for Discovery ELS cmd.
3308 * Currently used by the ELS command issuing routines for the ELS State Change
3309 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
3310 * These commands will be retried once only for ELS timeout errors.
3311 **/
3312 static void
lpfc_cmpl_els_disc_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3313 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3314 struct lpfc_iocbq *rspiocb)
3315 {
3316 struct lpfc_vport *vport = cmdiocb->vport;
3317 IOCB_t *irsp;
3318 struct lpfc_els_rdf_rsp *prdf;
3319 struct lpfc_dmabuf *pcmd, *prsp;
3320 u32 *pdata;
3321 u32 cmd;
3322 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3323 u32 ulp_status, ulp_word4, tmo, did, iotag;
3324
3325 ulp_status = get_job_ulpstatus(phba, rspiocb);
3326 ulp_word4 = get_job_word4(phba, rspiocb);
3327 did = get_job_els_rsp64_did(phba, cmdiocb);
3328
3329 if (phba->sli_rev == LPFC_SLI_REV4) {
3330 tmo = get_wqe_tmo(cmdiocb);
3331 iotag = get_wqe_reqtag(cmdiocb);
3332 } else {
3333 irsp = &rspiocb->iocb;
3334 tmo = irsp->ulpTimeout;
3335 iotag = irsp->ulpIoTag;
3336 }
3337
3338 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3339 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3340 ulp_status, ulp_word4, did);
3341
3342 /* ELS cmd tag <ulpIoTag> completes */
3343 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3344 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
3345 iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
3346
3347 pcmd = cmdiocb->cmd_dmabuf;
3348 if (!pcmd)
3349 goto out;
3350
3351 pdata = (u32 *)pcmd->virt;
3352 if (!pdata)
3353 goto out;
3354 cmd = *pdata;
3355
3356 /* Only 1 retry for ELS Timeout only */
3357 if (ulp_status == IOSTAT_LOCAL_REJECT &&
3358 ((ulp_word4 & IOERR_PARAM_MASK) ==
3359 IOERR_SEQUENCE_TIMEOUT)) {
3360 cmdiocb->retry++;
3361 if (cmdiocb->retry <= 1) {
3362 switch (cmd) {
3363 case ELS_CMD_SCR:
3364 lpfc_issue_els_scr(vport, cmdiocb->retry);
3365 break;
3366 case ELS_CMD_EDC:
3367 lpfc_issue_els_edc(vport, cmdiocb->retry);
3368 break;
3369 case ELS_CMD_RDF:
3370 lpfc_issue_els_rdf(vport, cmdiocb->retry);
3371 break;
3372 }
3373 goto out;
3374 }
3375 phba->fc_stat.elsRetryExceeded++;
3376 }
3377 if (cmd == ELS_CMD_EDC) {
3378 /* must be called before checking uplStatus and returning */
3379 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
3380 return;
3381 }
3382 if (ulp_status) {
3383 /* ELS discovery cmd completes with error */
3384 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
3385 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
3386 ulp_status, ulp_word4);
3387 goto out;
3388 }
3389
3390 /* The RDF response doesn't have any impact on the running driver
3391 * but the notification descriptors are dumped here for support.
3392 */
3393 if (cmd == ELS_CMD_RDF) {
3394 int i;
3395
3396 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3397 if (!prsp)
3398 goto out;
3399
3400 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
3401 if (!prdf)
3402 goto out;
3403
3404 for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
3405 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
3406 lpfc_printf_vlog(vport, KERN_INFO,
3407 LOG_ELS | LOG_CGN_MGMT,
3408 "4677 Fabric RDF Notification Grant "
3409 "Data: 0x%08x Reg: %x %x\n",
3410 be32_to_cpu(
3411 prdf->reg_d1.desc_tags[i]),
3412 phba->cgn_reg_signal,
3413 phba->cgn_reg_fpin);
3414 }
3415
3416 out:
3417 /* Check to see if link went down during discovery */
3418 lpfc_els_chk_latt(vport);
3419 lpfc_els_free_iocb(phba, cmdiocb);
3420 lpfc_nlp_put(ndlp);
3421 return;
3422 }
3423
3424 /**
3425 * lpfc_issue_els_scr - Issue a scr to an node on a vport
3426 * @vport: pointer to a host virtual N_Port data structure.
3427 * @retry: retry counter for the command IOCB.
3428 *
3429 * This routine issues a State Change Request (SCR) to a fabric node
3430 * on a @vport. The remote node is Fabric Controller (0xfffffd). It
3431 * first search the @vport node list to find the matching ndlp. If no such
3432 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3433 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3434 * routine is invoked to send the SCR IOCB.
3435 *
3436 * Note that the ndlp reference count will be incremented by 1 for holding the
3437 * ndlp and the reference to ndlp will be stored into the ndlp field of
3438 * the IOCB for the completion callback function to the SCR ELS command.
3439 *
3440 * Return code
3441 * 0 - Successfully issued scr command
3442 * 1 - Failed to issue scr command
3443 **/
3444 int
lpfc_issue_els_scr(struct lpfc_vport * vport,uint8_t retry)3445 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
3446 {
3447 int rc = 0;
3448 struct lpfc_hba *phba = vport->phba;
3449 struct lpfc_iocbq *elsiocb;
3450 uint8_t *pcmd;
3451 uint16_t cmdsize;
3452 struct lpfc_nodelist *ndlp;
3453
3454 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3455
3456 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3457 if (!ndlp) {
3458 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3459 if (!ndlp)
3460 return 1;
3461 lpfc_enqueue_node(vport, ndlp);
3462 }
3463
3464 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3465 ndlp->nlp_DID, ELS_CMD_SCR);
3466 if (!elsiocb)
3467 return 1;
3468
3469 if (phba->sli_rev == LPFC_SLI_REV4) {
3470 rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
3471 if (rc) {
3472 lpfc_els_free_iocb(phba, elsiocb);
3473 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3474 "0937 %s: Failed to reg fc node, rc %d\n",
3475 __func__, rc);
3476 return 1;
3477 }
3478 }
3479 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3480
3481 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3482 pcmd += sizeof(uint32_t);
3483
3484 /* For SCR, remainder of payload is SCR parameter page */
3485 memset(pcmd, 0, sizeof(SCR));
3486 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3487
3488 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3489 "Issue SCR: did:x%x",
3490 ndlp->nlp_DID, 0, 0);
3491
3492 phba->fc_stat.elsXmitSCR++;
3493 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3494 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3495 if (!elsiocb->ndlp) {
3496 lpfc_els_free_iocb(phba, elsiocb);
3497 return 1;
3498 }
3499
3500 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3501 "Issue SCR: did:x%x refcnt %d",
3502 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3503
3504 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3505 if (rc == IOCB_ERROR) {
3506 lpfc_els_free_iocb(phba, elsiocb);
3507 lpfc_nlp_put(ndlp);
3508 return 1;
3509 }
3510
3511 return 0;
3512 }
3513
3514 /**
3515 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3516 * or the other nport (pt2pt).
3517 * @vport: pointer to a host virtual N_Port data structure.
3518 * @retry: number of retries to the command IOCB.
3519 *
3520 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3521 * when connected to a fabric, or to the remote port when connected
3522 * in point-to-point mode. When sent to the Fabric Controller, it will
3523 * replay the RSCN to registered recipients.
3524 *
3525 * Note that the ndlp reference count will be incremented by 1 for holding the
3526 * ndlp and the reference to ndlp will be stored into the ndlp field of
3527 * the IOCB for the completion callback function to the RSCN ELS command.
3528 *
3529 * Return code
3530 * 0 - Successfully issued RSCN command
3531 * 1 - Failed to issue RSCN command
3532 **/
3533 int
lpfc_issue_els_rscn(struct lpfc_vport * vport,uint8_t retry)3534 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3535 {
3536 int rc = 0;
3537 struct lpfc_hba *phba = vport->phba;
3538 struct lpfc_iocbq *elsiocb;
3539 struct lpfc_nodelist *ndlp;
3540 struct {
3541 struct fc_els_rscn rscn;
3542 struct fc_els_rscn_page portid;
3543 } *event;
3544 uint32_t nportid;
3545 uint16_t cmdsize = sizeof(*event);
3546
3547 /* Not supported for private loop */
3548 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3549 !(vport->fc_flag & FC_PUBLIC_LOOP))
3550 return 1;
3551
3552 if (vport->fc_flag & FC_PT2PT) {
3553 /* find any mapped nport - that would be the other nport */
3554 ndlp = lpfc_findnode_mapped(vport);
3555 if (!ndlp)
3556 return 1;
3557 } else {
3558 nportid = FC_FID_FCTRL;
3559 /* find the fabric controller node */
3560 ndlp = lpfc_findnode_did(vport, nportid);
3561 if (!ndlp) {
3562 /* if one didn't exist, make one */
3563 ndlp = lpfc_nlp_init(vport, nportid);
3564 if (!ndlp)
3565 return 1;
3566 lpfc_enqueue_node(vport, ndlp);
3567 }
3568 }
3569
3570 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3571 ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3572
3573 if (!elsiocb)
3574 return 1;
3575
3576 event = elsiocb->cmd_dmabuf->virt;
3577
3578 event->rscn.rscn_cmd = ELS_RSCN;
3579 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3580 event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3581
3582 nportid = vport->fc_myDID;
3583 /* appears that page flags must be 0 for fabric to broadcast RSCN */
3584 event->portid.rscn_page_flags = 0;
3585 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3586 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3587 event->portid.rscn_fid[2] = nportid & 0x000000FF;
3588
3589 phba->fc_stat.elsXmitRSCN++;
3590 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3591 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3592 if (!elsiocb->ndlp) {
3593 lpfc_els_free_iocb(phba, elsiocb);
3594 return 1;
3595 }
3596
3597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3598 "Issue RSCN: did:x%x",
3599 ndlp->nlp_DID, 0, 0);
3600
3601 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3602 if (rc == IOCB_ERROR) {
3603 lpfc_els_free_iocb(phba, elsiocb);
3604 lpfc_nlp_put(ndlp);
3605 return 1;
3606 }
3607
3608 return 0;
3609 }
3610
3611 /**
3612 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3613 * @vport: pointer to a host virtual N_Port data structure.
3614 * @nportid: N_Port identifier to the remote node.
3615 * @retry: number of retries to the command IOCB.
3616 *
3617 * This routine issues a Fibre Channel Address Resolution Response
3618 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3619 * is passed into the function. It first search the @vport node list to find
3620 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3621 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3622 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3623 *
3624 * Note that the ndlp reference count will be incremented by 1 for holding the
3625 * ndlp and the reference to ndlp will be stored into the ndlp field of
3626 * the IOCB for the completion callback function to the FARPR ELS command.
3627 *
3628 * Return code
3629 * 0 - Successfully issued farpr command
3630 * 1 - Failed to issue farpr command
3631 **/
3632 static int
lpfc_issue_els_farpr(struct lpfc_vport * vport,uint32_t nportid,uint8_t retry)3633 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3634 {
3635 int rc = 0;
3636 struct lpfc_hba *phba = vport->phba;
3637 struct lpfc_iocbq *elsiocb;
3638 FARP *fp;
3639 uint8_t *pcmd;
3640 uint32_t *lp;
3641 uint16_t cmdsize;
3642 struct lpfc_nodelist *ondlp;
3643 struct lpfc_nodelist *ndlp;
3644
3645 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3646
3647 ndlp = lpfc_findnode_did(vport, nportid);
3648 if (!ndlp) {
3649 ndlp = lpfc_nlp_init(vport, nportid);
3650 if (!ndlp)
3651 return 1;
3652 lpfc_enqueue_node(vport, ndlp);
3653 }
3654
3655 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3656 ndlp->nlp_DID, ELS_CMD_FARPR);
3657 if (!elsiocb)
3658 return 1;
3659
3660 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
3661
3662 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3663 pcmd += sizeof(uint32_t);
3664
3665 /* Fill in FARPR payload */
3666 fp = (FARP *) (pcmd);
3667 memset(fp, 0, sizeof(FARP));
3668 lp = (uint32_t *) pcmd;
3669 *lp++ = be32_to_cpu(nportid);
3670 *lp++ = be32_to_cpu(vport->fc_myDID);
3671 fp->Rflags = 0;
3672 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3673
3674 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3675 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3676 ondlp = lpfc_findnode_did(vport, nportid);
3677 if (ondlp) {
3678 memcpy(&fp->OportName, &ondlp->nlp_portname,
3679 sizeof(struct lpfc_name));
3680 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3681 sizeof(struct lpfc_name));
3682 }
3683
3684 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3685 "Issue FARPR: did:x%x",
3686 ndlp->nlp_DID, 0, 0);
3687
3688 phba->fc_stat.elsXmitFARPR++;
3689 elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
3690 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3691 if (!elsiocb->ndlp) {
3692 lpfc_els_free_iocb(phba, elsiocb);
3693 return 1;
3694 }
3695
3696 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3697 if (rc == IOCB_ERROR) {
3698 /* The additional lpfc_nlp_put will cause the following
3699 * lpfc_els_free_iocb routine to trigger the release of
3700 * the node.
3701 */
3702 lpfc_els_free_iocb(phba, elsiocb);
3703 lpfc_nlp_put(ndlp);
3704 return 1;
3705 }
3706 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3707 * trigger the release of the node.
3708 */
3709 /* Don't release reference count as RDF is likely outstanding */
3710 return 0;
3711 }
3712
3713 /**
3714 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
3715 * @vport: pointer to a host virtual N_Port data structure.
3716 * @retry: retry counter for the command IOCB.
3717 *
3718 * This routine issues an ELS RDF to the Fabric Controller to register
3719 * for diagnostic functions.
3720 *
3721 * Note that the ndlp reference count will be incremented by 1 for holding the
3722 * ndlp and the reference to ndlp will be stored into the ndlp field of
3723 * the IOCB for the completion callback function to the RDF ELS command.
3724 *
3725 * Return code
3726 * 0 - Successfully issued rdf command
3727 * 1 - Failed to issue rdf command
3728 **/
3729 int
lpfc_issue_els_rdf(struct lpfc_vport * vport,uint8_t retry)3730 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
3731 {
3732 struct lpfc_hba *phba = vport->phba;
3733 struct lpfc_iocbq *elsiocb;
3734 struct lpfc_els_rdf_req *prdf;
3735 struct lpfc_nodelist *ndlp;
3736 uint16_t cmdsize;
3737 int rc;
3738
3739 cmdsize = sizeof(*prdf);
3740
3741 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3742 if (!ndlp) {
3743 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3744 if (!ndlp)
3745 return -ENODEV;
3746 lpfc_enqueue_node(vport, ndlp);
3747 }
3748
3749 /* RDF ELS is not required on an NPIV VN_Port. */
3750 if (vport->port_type == LPFC_NPIV_PORT)
3751 return -EACCES;
3752
3753 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3754 ndlp->nlp_DID, ELS_CMD_RDF);
3755 if (!elsiocb)
3756 return -ENOMEM;
3757
3758 /* Configure the payload for the supported FPIN events. */
3759 prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
3760 memset(prdf, 0, cmdsize);
3761 prdf->rdf.fpin_cmd = ELS_RDF;
3762 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
3763 sizeof(struct fc_els_rdf));
3764 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
3765 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
3766 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
3767 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
3768 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
3769 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
3770 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
3771 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
3772
3773 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3774 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
3775 ndlp->nlp_DID, phba->cgn_reg_signal,
3776 phba->cgn_reg_fpin);
3777
3778 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
3779 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
3780 elsiocb->ndlp = lpfc_nlp_get(ndlp);
3781 if (!elsiocb->ndlp) {
3782 lpfc_els_free_iocb(phba, elsiocb);
3783 return -EIO;
3784 }
3785
3786 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3787 "Issue RDF: did:x%x refcnt %d",
3788 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3789
3790 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3791 if (rc == IOCB_ERROR) {
3792 lpfc_els_free_iocb(phba, elsiocb);
3793 lpfc_nlp_put(ndlp);
3794 return -EIO;
3795 }
3796 return 0;
3797 }
3798
3799 /**
3800 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
3801 * @vport: pointer to a host virtual N_Port data structure.
3802 * @cmdiocb: pointer to lpfc command iocb data structure.
3803 * @ndlp: pointer to a node-list data structure.
3804 *
3805 * A received RDF implies a possible change to fabric supported diagnostic
3806 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
3807 * RDF request to reregister for supported diagnostic functions.
3808 *
3809 * Return code
3810 * 0 - Success
3811 * -EIO - Failed to process received RDF
3812 **/
3813 static int
lpfc_els_rcv_rdf(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)3814 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3815 struct lpfc_nodelist *ndlp)
3816 {
3817 /* Send LS_ACC */
3818 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
3819 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3820 "1623 Failed to RDF_ACC from x%x for x%x\n",
3821 ndlp->nlp_DID, vport->fc_myDID);
3822 return -EIO;
3823 }
3824
3825 /* Issue new RDF for reregistering */
3826 if (lpfc_issue_els_rdf(vport, 0)) {
3827 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3828 "2623 Failed to re register RDF for x%x\n",
3829 vport->fc_myDID);
3830 return -EIO;
3831 }
3832
3833 return 0;
3834 }
3835
3836 /**
3837 * lpfc_least_capable_settings - helper function for EDC rsp processing
3838 * @phba: pointer to lpfc hba data structure.
3839 * @pcgd: pointer to congestion detection descriptor in EDC rsp.
3840 *
3841 * This helper routine determines the least capable setting for
3842 * congestion signals, signal freq, including scale, from the
3843 * congestion detection descriptor in the EDC rsp. The routine
3844 * sets @phba values in preparation for a set_featues mailbox.
3845 **/
3846 static void
lpfc_least_capable_settings(struct lpfc_hba * phba,struct fc_diag_cg_sig_desc * pcgd)3847 lpfc_least_capable_settings(struct lpfc_hba *phba,
3848 struct fc_diag_cg_sig_desc *pcgd)
3849 {
3850 u32 rsp_sig_cap = 0, drv_sig_cap = 0;
3851 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
3852
3853 /* Get rsp signal and frequency capabilities. */
3854 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
3855 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
3856 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
3857
3858 /* If the Fport does not support signals. Set FPIN only */
3859 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
3860 goto out_no_support;
3861
3862 /* Apply the xmt scale to the xmt cycle to get the correct frequency.
3863 * Adapter default is 100 millisSeconds. Convert all xmt cycle values
3864 * to milliSeconds.
3865 */
3866 switch (rsp_sig_freq_scale) {
3867 case EDC_CG_SIGFREQ_SEC:
3868 rsp_sig_freq_cyc *= MSEC_PER_SEC;
3869 break;
3870 case EDC_CG_SIGFREQ_MSEC:
3871 rsp_sig_freq_cyc = 1;
3872 break;
3873 default:
3874 goto out_no_support;
3875 }
3876
3877 /* Convenient shorthand. */
3878 drv_sig_cap = phba->cgn_reg_signal;
3879
3880 /* Choose the least capable frequency. */
3881 if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
3882 phba->cgn_sig_freq = rsp_sig_freq_cyc;
3883
3884 /* Should be some common signals support. Settle on least capable
3885 * signal and adjust FPIN values. Initialize defaults to ease the
3886 * decision.
3887 */
3888 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
3889 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3890 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
3891 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
3892 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
3893 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3894 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3895 }
3896 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3897 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
3898 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
3899 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
3900 }
3901 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
3902 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
3903 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
3904 }
3905 }
3906
3907 /* We are NOT recording signal frequency in congestion info buffer */
3908 return;
3909
3910 out_no_support:
3911 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
3912 phba->cgn_sig_freq = 0;
3913 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
3914 }
3915
3916 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
3917 FC_LS_TLV_DTAG_INIT);
3918
3919 /**
3920 * lpfc_cmpl_els_edc - Completion callback function for EDC
3921 * @phba: pointer to lpfc hba data structure.
3922 * @cmdiocb: pointer to lpfc command iocb data structure.
3923 * @rspiocb: pointer to lpfc response iocb data structure.
3924 *
3925 * This routine is the completion callback function for issuing the Exchange
3926 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
3927 * notify the FPort of its Congestion and Link Fault capabilities. This
3928 * routine parses the FPort's response and decides on the least common
3929 * values applicable to both FPort and NPort for Warnings and Alarms that
3930 * are communicated via hardware signals.
3931 **/
3932 static void
lpfc_cmpl_els_edc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3933 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3934 struct lpfc_iocbq *rspiocb)
3935 {
3936 IOCB_t *irsp_iocb;
3937 struct fc_els_edc_resp *edc_rsp;
3938 struct fc_tlv_desc *tlv;
3939 struct fc_diag_cg_sig_desc *pcgd;
3940 struct fc_diag_lnkflt_desc *plnkflt;
3941 struct lpfc_dmabuf *pcmd, *prsp;
3942 const char *dtag_nm;
3943 u32 *pdata, dtag;
3944 int desc_cnt = 0, bytes_remain;
3945 bool rcv_cap_desc = false;
3946 struct lpfc_nodelist *ndlp;
3947 u32 ulp_status, ulp_word4, tmo, did, iotag;
3948
3949 ndlp = cmdiocb->ndlp;
3950
3951 ulp_status = get_job_ulpstatus(phba, rspiocb);
3952 ulp_word4 = get_job_word4(phba, rspiocb);
3953 did = get_job_els_rsp64_did(phba, rspiocb);
3954
3955 if (phba->sli_rev == LPFC_SLI_REV4) {
3956 tmo = get_wqe_tmo(rspiocb);
3957 iotag = get_wqe_reqtag(rspiocb);
3958 } else {
3959 irsp_iocb = &rspiocb->iocb;
3960 tmo = irsp_iocb->ulpTimeout;
3961 iotag = irsp_iocb->ulpIoTag;
3962 }
3963
3964 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
3965 "EDC cmpl: status:x%x/x%x did:x%x",
3966 ulp_status, ulp_word4, did);
3967
3968 /* ELS cmd tag <ulpIoTag> completes */
3969 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3970 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
3971 iotag, ulp_status, ulp_word4, tmo);
3972
3973 pcmd = cmdiocb->cmd_dmabuf;
3974 if (!pcmd)
3975 goto out;
3976
3977 pdata = (u32 *)pcmd->virt;
3978 if (!pdata)
3979 goto out;
3980
3981 /* Need to clear signal values, send features MB and RDF with FPIN. */
3982 if (ulp_status)
3983 goto out;
3984
3985 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3986 if (!prsp)
3987 goto out;
3988
3989 edc_rsp = prsp->virt;
3990 if (!edc_rsp)
3991 goto out;
3992
3993 /* ELS cmd tag <ulpIoTag> completes */
3994 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
3995 "4676 Fabric EDC Rsp: "
3996 "0x%02x, 0x%08x\n",
3997 edc_rsp->acc_hdr.la_cmd,
3998 be32_to_cpu(edc_rsp->desc_list_len));
3999
4000 /*
4001 * Payload length in bytes is the response descriptor list
4002 * length minus the 12 bytes of Link Service Request
4003 * Information descriptor in the reply.
4004 */
4005 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
4006 sizeof(struct fc_els_lsri_desc);
4007 if (bytes_remain <= 0)
4008 goto out;
4009
4010 tlv = edc_rsp->desc;
4011
4012 /*
4013 * cycle through EDC diagnostic descriptors to find the
4014 * congestion signaling capability descriptor
4015 */
4016 while (bytes_remain) {
4017 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
4018 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4019 "6461 Truncated TLV hdr on "
4020 "Diagnostic descriptor[%d]\n",
4021 desc_cnt);
4022 goto out;
4023 }
4024
4025 dtag = be32_to_cpu(tlv->desc_tag);
4026 switch (dtag) {
4027 case ELS_DTAG_LNK_FAULT_CAP:
4028 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4029 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4030 sizeof(struct fc_diag_lnkflt_desc)) {
4031 lpfc_printf_log(
4032 phba, KERN_WARNING, LOG_CGN_MGMT,
4033 "6462 Truncated Link Fault Diagnostic "
4034 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4035 desc_cnt, bytes_remain,
4036 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4037 sizeof(struct fc_diag_cg_sig_desc));
4038 goto out;
4039 }
4040 plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
4041 lpfc_printf_log(
4042 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4043 "4617 Link Fault Desc Data: 0x%08x 0x%08x "
4044 "0x%08x 0x%08x 0x%08x\n",
4045 be32_to_cpu(plnkflt->desc_tag),
4046 be32_to_cpu(plnkflt->desc_len),
4047 be32_to_cpu(
4048 plnkflt->degrade_activate_threshold),
4049 be32_to_cpu(
4050 plnkflt->degrade_deactivate_threshold),
4051 be32_to_cpu(plnkflt->fec_degrade_interval));
4052 break;
4053 case ELS_DTAG_CG_SIGNAL_CAP:
4054 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
4055 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
4056 sizeof(struct fc_diag_cg_sig_desc)) {
4057 lpfc_printf_log(
4058 phba, KERN_WARNING, LOG_CGN_MGMT,
4059 "6463 Truncated Cgn Signal Diagnostic "
4060 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
4061 desc_cnt, bytes_remain,
4062 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
4063 sizeof(struct fc_diag_cg_sig_desc));
4064 goto out;
4065 }
4066
4067 pcgd = (struct fc_diag_cg_sig_desc *)tlv;
4068 lpfc_printf_log(
4069 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4070 "4616 CGN Desc Data: 0x%08x 0x%08x "
4071 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
4072 be32_to_cpu(pcgd->desc_tag),
4073 be32_to_cpu(pcgd->desc_len),
4074 be32_to_cpu(pcgd->xmt_signal_capability),
4075 be16_to_cpu(pcgd->xmt_signal_frequency.count),
4076 be16_to_cpu(pcgd->xmt_signal_frequency.units),
4077 be32_to_cpu(pcgd->rcv_signal_capability),
4078 be16_to_cpu(pcgd->rcv_signal_frequency.count),
4079 be16_to_cpu(pcgd->rcv_signal_frequency.units));
4080
4081 /* Compare driver and Fport capabilities and choose
4082 * least common.
4083 */
4084 lpfc_least_capable_settings(phba, pcgd);
4085 rcv_cap_desc = true;
4086 break;
4087 default:
4088 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
4089 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
4090 "4919 unknown Diagnostic "
4091 "Descriptor[%d]: tag x%x (%s)\n",
4092 desc_cnt, dtag, dtag_nm);
4093 }
4094
4095 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
4096 tlv = fc_tlv_next_desc(tlv);
4097 desc_cnt++;
4098 }
4099
4100 out:
4101 if (!rcv_cap_desc) {
4102 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
4103 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4104 phba->cgn_sig_freq = 0;
4105 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
4106 "4202 EDC rsp error - sending RDF "
4107 "for FPIN only.\n");
4108 }
4109
4110 lpfc_config_cgn_signal(phba);
4111
4112 /* Check to see if link went down during discovery */
4113 lpfc_els_chk_latt(phba->pport);
4114 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
4115 "EDC Cmpl: did:x%x refcnt %d",
4116 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
4117 lpfc_els_free_iocb(phba, cmdiocb);
4118 lpfc_nlp_put(ndlp);
4119 }
4120
4121 static void
lpfc_format_edc_cgn_desc(struct lpfc_hba * phba,struct fc_diag_cg_sig_desc * cgd)4122 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
4123 {
4124 /* We are assuming cgd was zero'ed before calling this routine */
4125
4126 /* Configure the congestion detection capability */
4127 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
4128
4129 /* Descriptor len doesn't include the tag or len fields. */
4130 cgd->desc_len = cpu_to_be32(
4131 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
4132
4133 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4134 * xmt_signal_frequency.count already set to 0.
4135 * xmt_signal_frequency.units already set to 0.
4136 */
4137
4138 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
4139 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
4140 * rcv_signal_frequency.count already set to 0.
4141 * rcv_signal_frequency.units already set to 0.
4142 */
4143 phba->cgn_sig_freq = 0;
4144 return;
4145 }
4146 switch (phba->cgn_reg_signal) {
4147 case EDC_CG_SIG_WARN_ONLY:
4148 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
4149 break;
4150 case EDC_CG_SIG_WARN_ALARM:
4151 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
4152 break;
4153 default:
4154 /* rcv_signal_capability left 0 thus no support */
4155 break;
4156 }
4157
4158 /* We start negotiation with lpfc_fabric_cgn_frequency, after
4159 * the completion we settle on the higher frequency.
4160 */
4161 cgd->rcv_signal_frequency.count =
4162 cpu_to_be16(lpfc_fabric_cgn_frequency);
4163 cgd->rcv_signal_frequency.units =
4164 cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
4165 }
4166
4167 /**
4168 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
4169 * @vport: pointer to a host virtual N_Port data structure.
4170 * @retry: retry counter for the command iocb.
4171 *
4172 * This routine issues an ELS EDC to the F-Port Controller to communicate
4173 * this N_Port's support of hardware signals in its Congestion
4174 * Capabilities Descriptor.
4175 *
4176 * Note: This routine does not check if one or more signals are
4177 * set in the cgn_reg_signal parameter. The caller makes the
4178 * decision to enforce cgn_reg_signal as nonzero or zero depending
4179 * on the conditions. During Fabric requests, the driver
4180 * requires cgn_reg_signals to be nonzero. But a dynamic request
4181 * to set the congestion mode to OFF from Monitor or Manage
4182 * would correctly issue an EDC with no signals enabled to
4183 * turn off switch functionality and then update the FW.
4184 *
4185 * Return code
4186 * 0 - Successfully issued edc command
4187 * 1 - Failed to issue edc command
4188 **/
4189 int
lpfc_issue_els_edc(struct lpfc_vport * vport,uint8_t retry)4190 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
4191 {
4192 struct lpfc_hba *phba = vport->phba;
4193 struct lpfc_iocbq *elsiocb;
4194 struct lpfc_els_edc_req *edc_req;
4195 struct fc_diag_cg_sig_desc *cgn_desc;
4196 u16 cmdsize;
4197 struct lpfc_nodelist *ndlp;
4198 u8 *pcmd = NULL;
4199 u32 edc_req_size, cgn_desc_size;
4200 int rc;
4201
4202 if (vport->port_type == LPFC_NPIV_PORT)
4203 return -EACCES;
4204
4205 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4206 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
4207 return -ENODEV;
4208
4209 /* If HBA doesn't support signals, drop into RDF */
4210 if (!phba->cgn_init_reg_signal)
4211 goto try_rdf;
4212
4213 edc_req_size = sizeof(struct fc_els_edc);
4214 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
4215 cmdsize = edc_req_size + cgn_desc_size;
4216 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
4217 ndlp->nlp_DID, ELS_CMD_EDC);
4218 if (!elsiocb)
4219 goto try_rdf;
4220
4221 /* Configure the payload for the supported Diagnostics capabilities. */
4222 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
4223 memset(pcmd, 0, cmdsize);
4224 edc_req = (struct lpfc_els_edc_req *)pcmd;
4225 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
4226 edc_req->edc.edc_cmd = ELS_EDC;
4227
4228 cgn_desc = &edc_req->cgn_desc;
4229
4230 lpfc_format_edc_cgn_desc(phba, cgn_desc);
4231
4232 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
4233
4234 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
4235 "4623 Xmit EDC to remote "
4236 "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
4237 ndlp->nlp_DID, phba->cgn_reg_signal,
4238 phba->cgn_reg_fpin);
4239
4240 elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
4241 elsiocb->ndlp = lpfc_nlp_get(ndlp);
4242 if (!elsiocb->ndlp) {
4243 lpfc_els_free_iocb(phba, elsiocb);
4244 return -EIO;
4245 }
4246
4247 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4248 "Issue EDC: did:x%x refcnt %d",
4249 ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
4250 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4251 if (rc == IOCB_ERROR) {
4252 /* The additional lpfc_nlp_put will cause the following
4253 * lpfc_els_free_iocb routine to trigger the rlease of
4254 * the node.
4255 */
4256 lpfc_els_free_iocb(phba, elsiocb);
4257 lpfc_nlp_put(ndlp);
4258 goto try_rdf;
4259 }
4260 return 0;
4261 try_rdf:
4262 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
4263 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
4264 rc = lpfc_issue_els_rdf(vport, 0);
4265 return rc;
4266 }
4267
4268 /**
4269 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
4270 * @vport: pointer to a host virtual N_Port data structure.
4271 * @nlp: pointer to a node-list data structure.
4272 *
4273 * This routine cancels the timer with a delayed IOCB-command retry for
4274 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
4275 * removes the ELS retry event if it presents. In addition, if the
4276 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
4277 * commands are sent for the @vport's nodes that require issuing discovery
4278 * ADISC.
4279 **/
4280 void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport * vport,struct lpfc_nodelist * nlp)4281 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
4282 {
4283 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4284 struct lpfc_work_evt *evtp;
4285
4286 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
4287 return;
4288 spin_lock_irq(&nlp->lock);
4289 nlp->nlp_flag &= ~NLP_DELAY_TMO;
4290 spin_unlock_irq(&nlp->lock);
4291 del_timer_sync(&nlp->nlp_delayfunc);
4292 nlp->nlp_last_elscmd = 0;
4293 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
4294 list_del_init(&nlp->els_retry_evt.evt_listp);
4295 /* Decrement nlp reference count held for the delayed retry */
4296 evtp = &nlp->els_retry_evt;
4297 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
4298 }
4299 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
4300 spin_lock_irq(&nlp->lock);
4301 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4302 spin_unlock_irq(&nlp->lock);
4303 if (vport->num_disc_nodes) {
4304 if (vport->port_state < LPFC_VPORT_READY) {
4305 /* Check if there are more ADISCs to be sent */
4306 lpfc_more_adisc(vport);
4307 } else {
4308 /* Check if there are more PLOGIs to be sent */
4309 lpfc_more_plogi(vport);
4310 if (vport->num_disc_nodes == 0) {
4311 spin_lock_irq(shost->host_lock);
4312 vport->fc_flag &= ~FC_NDISC_ACTIVE;
4313 spin_unlock_irq(shost->host_lock);
4314 lpfc_can_disctmo(vport);
4315 lpfc_end_rscn(vport);
4316 }
4317 }
4318 }
4319 }
4320 return;
4321 }
4322
4323 /**
4324 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
4325 * @t: pointer to the timer function associated data (ndlp).
4326 *
4327 * This routine is invoked by the ndlp delayed-function timer to check
4328 * whether there is any pending ELS retry event(s) with the node. If not, it
4329 * simply returns. Otherwise, if there is at least one ELS delayed event, it
4330 * adds the delayed events to the HBA work list and invokes the
4331 * lpfc_worker_wake_up() routine to wake up worker thread to process the
4332 * event. Note that lpfc_nlp_get() is called before posting the event to
4333 * the work list to hold reference count of ndlp so that it guarantees the
4334 * reference to ndlp will still be available when the worker thread gets
4335 * to the event associated with the ndlp.
4336 **/
4337 void
lpfc_els_retry_delay(struct timer_list * t)4338 lpfc_els_retry_delay(struct timer_list *t)
4339 {
4340 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
4341 struct lpfc_vport *vport = ndlp->vport;
4342 struct lpfc_hba *phba = vport->phba;
4343 unsigned long flags;
4344 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
4345
4346 spin_lock_irqsave(&phba->hbalock, flags);
4347 if (!list_empty(&evtp->evt_listp)) {
4348 spin_unlock_irqrestore(&phba->hbalock, flags);
4349 return;
4350 }
4351
4352 /* We need to hold the node by incrementing the reference
4353 * count until the queued work is done
4354 */
4355 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
4356 if (evtp->evt_arg1) {
4357 evtp->evt = LPFC_EVT_ELS_RETRY;
4358 list_add_tail(&evtp->evt_listp, &phba->work_list);
4359 lpfc_worker_wake_up(phba);
4360 }
4361 spin_unlock_irqrestore(&phba->hbalock, flags);
4362 return;
4363 }
4364
4365 /**
4366 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
4367 * @ndlp: pointer to a node-list data structure.
4368 *
4369 * This routine is the worker-thread handler for processing the @ndlp delayed
4370 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
4371 * the last ELS command from the associated ndlp and invokes the proper ELS
4372 * function according to the delayed ELS command to retry the command.
4373 **/
4374 void
lpfc_els_retry_delay_handler(struct lpfc_nodelist * ndlp)4375 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
4376 {
4377 struct lpfc_vport *vport = ndlp->vport;
4378 uint32_t cmd, retry;
4379
4380 spin_lock_irq(&ndlp->lock);
4381 cmd = ndlp->nlp_last_elscmd;
4382 ndlp->nlp_last_elscmd = 0;
4383
4384 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
4385 spin_unlock_irq(&ndlp->lock);
4386 return;
4387 }
4388
4389 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4390 spin_unlock_irq(&ndlp->lock);
4391 /*
4392 * If a discovery event readded nlp_delayfunc after timer
4393 * firing and before processing the timer, cancel the
4394 * nlp_delayfunc.
4395 */
4396 del_timer_sync(&ndlp->nlp_delayfunc);
4397 retry = ndlp->nlp_retry;
4398 ndlp->nlp_retry = 0;
4399
4400 switch (cmd) {
4401 case ELS_CMD_FLOGI:
4402 lpfc_issue_els_flogi(vport, ndlp, retry);
4403 break;
4404 case ELS_CMD_PLOGI:
4405 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
4406 ndlp->nlp_prev_state = ndlp->nlp_state;
4407 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4408 }
4409 break;
4410 case ELS_CMD_ADISC:
4411 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
4412 ndlp->nlp_prev_state = ndlp->nlp_state;
4413 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4414 }
4415 break;
4416 case ELS_CMD_PRLI:
4417 case ELS_CMD_NVMEPRLI:
4418 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
4419 ndlp->nlp_prev_state = ndlp->nlp_state;
4420 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4421 }
4422 break;
4423 case ELS_CMD_LOGO:
4424 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
4425 ndlp->nlp_prev_state = ndlp->nlp_state;
4426 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4427 }
4428 break;
4429 case ELS_CMD_FDISC:
4430 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
4431 lpfc_issue_els_fdisc(vport, ndlp, retry);
4432 break;
4433 }
4434 return;
4435 }
4436
4437 /**
4438 * lpfc_link_reset - Issue link reset
4439 * @vport: pointer to a virtual N_Port data structure.
4440 *
4441 * This routine performs link reset by sending INIT_LINK mailbox command.
4442 * For SLI-3 adapter, link attention interrupt is enabled before issuing
4443 * INIT_LINK mailbox command.
4444 *
4445 * Return code
4446 * 0 - Link reset initiated successfully
4447 * 1 - Failed to initiate link reset
4448 **/
4449 int
lpfc_link_reset(struct lpfc_vport * vport)4450 lpfc_link_reset(struct lpfc_vport *vport)
4451 {
4452 struct lpfc_hba *phba = vport->phba;
4453 LPFC_MBOXQ_t *mbox;
4454 uint32_t control;
4455 int rc;
4456
4457 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4458 "2851 Attempt link reset\n");
4459 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4460 if (!mbox) {
4461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4462 "2852 Failed to allocate mbox memory");
4463 return 1;
4464 }
4465
4466 /* Enable Link attention interrupts */
4467 if (phba->sli_rev <= LPFC_SLI_REV3) {
4468 spin_lock_irq(&phba->hbalock);
4469 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4470 control = readl(phba->HCregaddr);
4471 control |= HC_LAINT_ENA;
4472 writel(control, phba->HCregaddr);
4473 readl(phba->HCregaddr); /* flush */
4474 spin_unlock_irq(&phba->hbalock);
4475 }
4476
4477 lpfc_init_link(phba, mbox, phba->cfg_topology,
4478 phba->cfg_link_speed);
4479 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4480 mbox->vport = vport;
4481 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4482 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4484 "2853 Failed to issue INIT_LINK "
4485 "mbox command, rc:x%x\n", rc);
4486 mempool_free(mbox, phba->mbox_mem_pool);
4487 return 1;
4488 }
4489
4490 return 0;
4491 }
4492
4493 /**
4494 * lpfc_els_retry - Make retry decision on an els command iocb
4495 * @phba: pointer to lpfc hba data structure.
4496 * @cmdiocb: pointer to lpfc command iocb data structure.
4497 * @rspiocb: pointer to lpfc response iocb data structure.
4498 *
4499 * This routine makes a retry decision on an ELS command IOCB, which has
4500 * failed. The following ELS IOCBs use this function for retrying the command
4501 * when previously issued command responsed with error status: FLOGI, PLOGI,
4502 * PRLI, ADISC and FDISC. Based on the ELS command type and the
4503 * returned error status, it makes the decision whether a retry shall be
4504 * issued for the command, and whether a retry shall be made immediately or
4505 * delayed. In the former case, the corresponding ELS command issuing-function
4506 * is called to retry the command. In the later case, the ELS command shall
4507 * be posted to the ndlp delayed event and delayed function timer set to the
4508 * ndlp for the delayed command issusing.
4509 *
4510 * Return code
4511 * 0 - No retry of els command is made
4512 * 1 - Immediate or delayed retry of els command is made
4513 **/
4514 static int
lpfc_els_retry(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)4515 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4516 struct lpfc_iocbq *rspiocb)
4517 {
4518 struct lpfc_vport *vport = cmdiocb->vport;
4519 union lpfc_wqe128 *irsp = &rspiocb->wqe;
4520 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
4521 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
4522 uint32_t *elscmd;
4523 struct ls_rjt stat;
4524 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
4525 int logerr = 0;
4526 uint32_t cmd = 0;
4527 uint32_t did;
4528 int link_reset = 0, rc;
4529 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
4530 u32 ulp_word4 = get_job_word4(phba, rspiocb);
4531
4532
4533 /* Note: cmd_dmabuf may be 0 for internal driver abort
4534 * of delays ELS command.
4535 */
4536
4537 if (pcmd && pcmd->virt) {
4538 elscmd = (uint32_t *) (pcmd->virt);
4539 cmd = *elscmd++;
4540 }
4541
4542 if (ndlp)
4543 did = ndlp->nlp_DID;
4544 else {
4545 /* We should only hit this case for retrying PLOGI */
4546 did = get_job_els_rsp64_did(phba, rspiocb);
4547 ndlp = lpfc_findnode_did(vport, did);
4548 if (!ndlp && (cmd != ELS_CMD_PLOGI))
4549 return 0;
4550 }
4551
4552 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4553 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
4554 *(((uint32_t *)irsp) + 7), ulp_word4, did);
4555
4556 switch (ulp_status) {
4557 case IOSTAT_FCP_RSP_ERROR:
4558 break;
4559 case IOSTAT_REMOTE_STOP:
4560 if (phba->sli_rev == LPFC_SLI_REV4) {
4561 /* This IO was aborted by the target, we don't
4562 * know the rxid and because we did not send the
4563 * ABTS we cannot generate and RRQ.
4564 */
4565 lpfc_set_rrq_active(phba, ndlp,
4566 cmdiocb->sli4_lxritag, 0, 0);
4567 }
4568 break;
4569 case IOSTAT_LOCAL_REJECT:
4570 switch ((ulp_word4 & IOERR_PARAM_MASK)) {
4571 case IOERR_LOOP_OPEN_FAILURE:
4572 if (cmd == ELS_CMD_FLOGI) {
4573 if (PCI_DEVICE_ID_HORNET ==
4574 phba->pcidev->device) {
4575 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
4576 phba->pport->fc_myDID = 0;
4577 phba->alpa_map[0] = 0;
4578 phba->alpa_map[1] = 0;
4579 }
4580 }
4581 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
4582 delay = 1000;
4583 retry = 1;
4584 break;
4585
4586 case IOERR_ILLEGAL_COMMAND:
4587 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4588 "0124 Retry illegal cmd x%x "
4589 "retry:x%x delay:x%x\n",
4590 cmd, cmdiocb->retry, delay);
4591 retry = 1;
4592 /* All command's retry policy */
4593 maxretry = 8;
4594 if (cmdiocb->retry > 2)
4595 delay = 1000;
4596 break;
4597
4598 case IOERR_NO_RESOURCES:
4599 logerr = 1; /* HBA out of resources */
4600 retry = 1;
4601 if (cmdiocb->retry > 100)
4602 delay = 100;
4603 maxretry = 250;
4604 break;
4605
4606 case IOERR_ILLEGAL_FRAME:
4607 delay = 100;
4608 retry = 1;
4609 break;
4610
4611 case IOERR_INVALID_RPI:
4612 if (cmd == ELS_CMD_PLOGI &&
4613 did == NameServer_DID) {
4614 /* Continue forever if plogi to */
4615 /* the nameserver fails */
4616 maxretry = 0;
4617 delay = 100;
4618 }
4619 retry = 1;
4620 break;
4621
4622 case IOERR_SEQUENCE_TIMEOUT:
4623 if (cmd == ELS_CMD_PLOGI &&
4624 did == NameServer_DID &&
4625 (cmdiocb->retry + 1) == maxretry) {
4626 /* Reset the Link */
4627 link_reset = 1;
4628 break;
4629 }
4630 retry = 1;
4631 delay = 100;
4632 break;
4633 case IOERR_SLI_ABORTED:
4634 /* Retry ELS PLOGI command?
4635 * Possibly the rport just wasn't ready.
4636 */
4637 if (cmd == ELS_CMD_PLOGI) {
4638 /* No retry if state change */
4639 if (ndlp &&
4640 ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
4641 goto out_retry;
4642 retry = 1;
4643 maxretry = 2;
4644 }
4645 break;
4646 }
4647 break;
4648
4649 case IOSTAT_NPORT_RJT:
4650 case IOSTAT_FABRIC_RJT:
4651 if (ulp_word4 & RJT_UNAVAIL_TEMP) {
4652 retry = 1;
4653 break;
4654 }
4655 break;
4656
4657 case IOSTAT_NPORT_BSY:
4658 case IOSTAT_FABRIC_BSY:
4659 logerr = 1; /* Fabric / Remote NPort out of resources */
4660 retry = 1;
4661 break;
4662
4663 case IOSTAT_LS_RJT:
4664 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
4665 /* Added for Vendor specifc support
4666 * Just keep retrying for these Rsn / Exp codes
4667 */
4668 if ((vport->fc_flag & FC_PT2PT) &&
4669 cmd == ELS_CMD_NVMEPRLI) {
4670 switch (stat.un.b.lsRjtRsnCode) {
4671 case LSRJT_UNABLE_TPC:
4672 case LSRJT_INVALID_CMD:
4673 case LSRJT_LOGICAL_ERR:
4674 case LSRJT_CMD_UNSUPPORTED:
4675 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
4676 "0168 NVME PRLI LS_RJT "
4677 "reason %x port doesn't "
4678 "support NVME, disabling NVME\n",
4679 stat.un.b.lsRjtRsnCode);
4680 retry = 0;
4681 vport->fc_flag |= FC_PT2PT_NO_NVME;
4682 goto out_retry;
4683 }
4684 }
4685 switch (stat.un.b.lsRjtRsnCode) {
4686 case LSRJT_UNABLE_TPC:
4687 /* The driver has a VALID PLOGI but the rport has
4688 * rejected the PRLI - can't do it now. Delay
4689 * for 1 second and try again.
4690 *
4691 * However, if explanation is REQ_UNSUPPORTED there's
4692 * no point to retry PRLI.
4693 */
4694 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
4695 stat.un.b.lsRjtRsnCodeExp !=
4696 LSEXP_REQ_UNSUPPORTED) {
4697 delay = 1000;
4698 maxretry = lpfc_max_els_tries + 1;
4699 retry = 1;
4700 break;
4701 }
4702
4703 /* Legacy bug fix code for targets with PLOGI delays. */
4704 if (stat.un.b.lsRjtRsnCodeExp ==
4705 LSEXP_CMD_IN_PROGRESS) {
4706 if (cmd == ELS_CMD_PLOGI) {
4707 delay = 1000;
4708 maxretry = 48;
4709 }
4710 retry = 1;
4711 break;
4712 }
4713 if (stat.un.b.lsRjtRsnCodeExp ==
4714 LSEXP_CANT_GIVE_DATA) {
4715 if (cmd == ELS_CMD_PLOGI) {
4716 delay = 1000;
4717 maxretry = 48;
4718 }
4719 retry = 1;
4720 break;
4721 }
4722 if (cmd == ELS_CMD_PLOGI) {
4723 delay = 1000;
4724 maxretry = lpfc_max_els_tries + 1;
4725 retry = 1;
4726 break;
4727 }
4728 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4729 (cmd == ELS_CMD_FDISC) &&
4730 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
4731 lpfc_printf_vlog(vport, KERN_ERR,
4732 LOG_TRACE_EVENT,
4733 "0125 FDISC Failed (x%x). "
4734 "Fabric out of resources\n",
4735 stat.un.lsRjtError);
4736 lpfc_vport_set_state(vport,
4737 FC_VPORT_NO_FABRIC_RSCS);
4738 }
4739 break;
4740
4741 case LSRJT_LOGICAL_BSY:
4742 if ((cmd == ELS_CMD_PLOGI) ||
4743 (cmd == ELS_CMD_PRLI) ||
4744 (cmd == ELS_CMD_NVMEPRLI)) {
4745 delay = 1000;
4746 maxretry = 48;
4747 } else if (cmd == ELS_CMD_FDISC) {
4748 /* FDISC retry policy */
4749 maxretry = 48;
4750 if (cmdiocb->retry >= 32)
4751 delay = 1000;
4752 }
4753 retry = 1;
4754 break;
4755
4756 case LSRJT_LOGICAL_ERR:
4757 /* There are some cases where switches return this
4758 * error when they are not ready and should be returning
4759 * Logical Busy. We should delay every time.
4760 */
4761 if (cmd == ELS_CMD_FDISC &&
4762 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
4763 maxretry = 3;
4764 delay = 1000;
4765 retry = 1;
4766 } else if (cmd == ELS_CMD_FLOGI &&
4767 stat.un.b.lsRjtRsnCodeExp ==
4768 LSEXP_NOTHING_MORE) {
4769 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
4770 retry = 1;
4771 lpfc_printf_vlog(vport, KERN_ERR,
4772 LOG_TRACE_EVENT,
4773 "0820 FLOGI Failed (x%x). "
4774 "BBCredit Not Supported\n",
4775 stat.un.lsRjtError);
4776 }
4777 break;
4778
4779 case LSRJT_PROTOCOL_ERR:
4780 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4781 (cmd == ELS_CMD_FDISC) &&
4782 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
4783 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
4784 ) {
4785 lpfc_printf_vlog(vport, KERN_ERR,
4786 LOG_TRACE_EVENT,
4787 "0122 FDISC Failed (x%x). "
4788 "Fabric Detected Bad WWN\n",
4789 stat.un.lsRjtError);
4790 lpfc_vport_set_state(vport,
4791 FC_VPORT_FABRIC_REJ_WWN);
4792 }
4793 break;
4794 case LSRJT_VENDOR_UNIQUE:
4795 if ((stat.un.b.vendorUnique == 0x45) &&
4796 (cmd == ELS_CMD_FLOGI)) {
4797 goto out_retry;
4798 }
4799 break;
4800 case LSRJT_CMD_UNSUPPORTED:
4801 /* lpfc nvmet returns this type of LS_RJT when it
4802 * receives an FCP PRLI because lpfc nvmet only
4803 * support NVME. ELS request is terminated for FCP4
4804 * on this rport.
4805 */
4806 if (stat.un.b.lsRjtRsnCodeExp ==
4807 LSEXP_REQ_UNSUPPORTED) {
4808 if (cmd == ELS_CMD_PRLI) {
4809 spin_lock_irq(&ndlp->lock);
4810 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
4811 spin_unlock_irq(&ndlp->lock);
4812 retry = 0;
4813 goto out_retry;
4814 }
4815 }
4816 break;
4817 }
4818 break;
4819
4820 case IOSTAT_INTERMED_RSP:
4821 case IOSTAT_BA_RJT:
4822 break;
4823
4824 default:
4825 break;
4826 }
4827
4828 if (link_reset) {
4829 rc = lpfc_link_reset(vport);
4830 if (rc) {
4831 /* Do not give up. Retry PLOGI one more time and attempt
4832 * link reset if PLOGI fails again.
4833 */
4834 retry = 1;
4835 delay = 100;
4836 goto out_retry;
4837 }
4838 return 1;
4839 }
4840
4841 if (did == FDMI_DID)
4842 retry = 1;
4843
4844 if ((cmd == ELS_CMD_FLOGI) &&
4845 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
4846 !lpfc_error_lost_link(ulp_status, ulp_word4)) {
4847 /* FLOGI retry policy */
4848 retry = 1;
4849 /* retry FLOGI forever */
4850 if (phba->link_flag != LS_LOOPBACK_MODE)
4851 maxretry = 0;
4852 else
4853 maxretry = 2;
4854
4855 if (cmdiocb->retry >= 100)
4856 delay = 5000;
4857 else if (cmdiocb->retry >= 32)
4858 delay = 1000;
4859 } else if ((cmd == ELS_CMD_FDISC) &&
4860 !lpfc_error_lost_link(ulp_status, ulp_word4)) {
4861 /* retry FDISCs every second up to devloss */
4862 retry = 1;
4863 maxretry = vport->cfg_devloss_tmo;
4864 delay = 1000;
4865 }
4866
4867 cmdiocb->retry++;
4868 if (maxretry && (cmdiocb->retry >= maxretry)) {
4869 phba->fc_stat.elsRetryExceeded++;
4870 retry = 0;
4871 }
4872
4873 if ((vport->load_flag & FC_UNLOADING) != 0)
4874 retry = 0;
4875
4876 out_retry:
4877 if (retry) {
4878 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
4879 /* Stop retrying PLOGI and FDISC if in FCF discovery */
4880 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4881 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4882 "2849 Stop retry ELS command "
4883 "x%x to remote NPORT x%x, "
4884 "Data: x%x x%x\n", cmd, did,
4885 cmdiocb->retry, delay);
4886 return 0;
4887 }
4888 }
4889
4890 /* Retry ELS command <elsCmd> to remote NPORT <did> */
4891 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4892 "0107 Retry ELS command x%x to remote "
4893 "NPORT x%x Data: x%x x%x\n",
4894 cmd, did, cmdiocb->retry, delay);
4895
4896 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
4897 ((ulp_status != IOSTAT_LOCAL_REJECT) ||
4898 ((ulp_word4 & IOERR_PARAM_MASK) !=
4899 IOERR_NO_RESOURCES))) {
4900 /* Don't reset timer for no resources */
4901
4902 /* If discovery / RSCN timer is running, reset it */
4903 if (timer_pending(&vport->fc_disctmo) ||
4904 (vport->fc_flag & FC_RSCN_MODE))
4905 lpfc_set_disctmo(vport);
4906 }
4907
4908 phba->fc_stat.elsXmitRetry++;
4909 if (ndlp && delay) {
4910 phba->fc_stat.elsDelayRetry++;
4911 ndlp->nlp_retry = cmdiocb->retry;
4912
4913 /* delay is specified in milliseconds */
4914 mod_timer(&ndlp->nlp_delayfunc,
4915 jiffies + msecs_to_jiffies(delay));
4916 spin_lock_irq(&ndlp->lock);
4917 ndlp->nlp_flag |= NLP_DELAY_TMO;
4918 spin_unlock_irq(&ndlp->lock);
4919
4920 ndlp->nlp_prev_state = ndlp->nlp_state;
4921 if ((cmd == ELS_CMD_PRLI) ||
4922 (cmd == ELS_CMD_NVMEPRLI))
4923 lpfc_nlp_set_state(vport, ndlp,
4924 NLP_STE_PRLI_ISSUE);
4925 else if (cmd != ELS_CMD_ADISC)
4926 lpfc_nlp_set_state(vport, ndlp,
4927 NLP_STE_NPR_NODE);
4928 ndlp->nlp_last_elscmd = cmd;
4929
4930 return 1;
4931 }
4932 switch (cmd) {
4933 case ELS_CMD_FLOGI:
4934 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
4935 return 1;
4936 case ELS_CMD_FDISC:
4937 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
4938 return 1;
4939 case ELS_CMD_PLOGI:
4940 if (ndlp) {
4941 ndlp->nlp_prev_state = ndlp->nlp_state;
4942 lpfc_nlp_set_state(vport, ndlp,
4943 NLP_STE_PLOGI_ISSUE);
4944 }
4945 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
4946 return 1;
4947 case ELS_CMD_ADISC:
4948 ndlp->nlp_prev_state = ndlp->nlp_state;
4949 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4950 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
4951 return 1;
4952 case ELS_CMD_PRLI:
4953 case ELS_CMD_NVMEPRLI:
4954 ndlp->nlp_prev_state = ndlp->nlp_state;
4955 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4956 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
4957 return 1;
4958 case ELS_CMD_LOGO:
4959 ndlp->nlp_prev_state = ndlp->nlp_state;
4960 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4961 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
4962 return 1;
4963 }
4964 }
4965 /* No retry ELS command <elsCmd> to remote NPORT <did> */
4966 if (logerr) {
4967 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4968 "0137 No retry ELS command x%x to remote "
4969 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
4970 cmd, did, ulp_status,
4971 ulp_word4);
4972 }
4973 else {
4974 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4975 "0108 No retry ELS command x%x to remote "
4976 "NPORT x%x Retried:%d Error:x%x/%x\n",
4977 cmd, did, cmdiocb->retry, ulp_status,
4978 ulp_word4);
4979 }
4980 return 0;
4981 }
4982
4983 /**
4984 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
4985 * @phba: pointer to lpfc hba data structure.
4986 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
4987 *
4988 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
4989 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
4990 * checks to see whether there is a lpfc DMA buffer associated with the
4991 * response of the command IOCB. If so, it will be released before releasing
4992 * the lpfc DMA buffer associated with the IOCB itself.
4993 *
4994 * Return code
4995 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
4996 **/
4997 static int
lpfc_els_free_data(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr1)4998 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
4999 {
5000 struct lpfc_dmabuf *buf_ptr;
5001
5002 /* Free the response before processing the command. */
5003 if (!list_empty(&buf_ptr1->list)) {
5004 list_remove_head(&buf_ptr1->list, buf_ptr,
5005 struct lpfc_dmabuf,
5006 list);
5007 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5008 kfree(buf_ptr);
5009 }
5010 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
5011 kfree(buf_ptr1);
5012 return 0;
5013 }
5014
5015 /**
5016 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
5017 * @phba: pointer to lpfc hba data structure.
5018 * @buf_ptr: pointer to the lpfc dma buffer data structure.
5019 *
5020 * This routine releases the lpfc Direct Memory Access (DMA) buffer
5021 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
5022 * pool.
5023 *
5024 * Return code
5025 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
5026 **/
5027 static int
lpfc_els_free_bpl(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr)5028 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
5029 {
5030 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
5031 kfree(buf_ptr);
5032 return 0;
5033 }
5034
5035 /**
5036 * lpfc_els_free_iocb - Free a command iocb and its associated resources
5037 * @phba: pointer to lpfc hba data structure.
5038 * @elsiocb: pointer to lpfc els command iocb data structure.
5039 *
5040 * This routine frees a command IOCB and its associated resources. The
5041 * command IOCB data structure contains the reference to various associated
5042 * resources, these fields must be set to NULL if the associated reference
5043 * not present:
5044 * cmd_dmabuf - reference to cmd.
5045 * cmd_dmabuf->next - reference to rsp
5046 * rsp_dmabuf - unused
5047 * bpl_dmabuf - reference to bpl
5048 *
5049 * It first properly decrements the reference count held on ndlp for the
5050 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
5051 * set, it invokes the lpfc_els_free_data() routine to release the Direct
5052 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
5053 * adds the DMA buffer the @phba data structure for the delayed release.
5054 * If reference to the Buffer Pointer List (BPL) is present, the
5055 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
5056 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
5057 * invoked to release the IOCB data structure back to @phba IOCBQ list.
5058 *
5059 * Return code
5060 * 0 - Success (currently, always return 0)
5061 **/
5062 int
lpfc_els_free_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * elsiocb)5063 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
5064 {
5065 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
5066
5067 /* The I/O iocb is complete. Clear the node and first dmbuf */
5068 elsiocb->ndlp = NULL;
5069
5070 /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
5071 if (elsiocb->cmd_dmabuf) {
5072 if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
5073 /* Firmware could still be in progress of DMAing
5074 * payload, so don't free data buffer till after
5075 * a hbeat.
5076 */
5077 elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
5078 buf_ptr = elsiocb->cmd_dmabuf;
5079 elsiocb->cmd_dmabuf = NULL;
5080 if (buf_ptr) {
5081 buf_ptr1 = NULL;
5082 spin_lock_irq(&phba->hbalock);
5083 if (!list_empty(&buf_ptr->list)) {
5084 list_remove_head(&buf_ptr->list,
5085 buf_ptr1, struct lpfc_dmabuf,
5086 list);
5087 INIT_LIST_HEAD(&buf_ptr1->list);
5088 list_add_tail(&buf_ptr1->list,
5089 &phba->elsbuf);
5090 phba->elsbuf_cnt++;
5091 }
5092 INIT_LIST_HEAD(&buf_ptr->list);
5093 list_add_tail(&buf_ptr->list, &phba->elsbuf);
5094 phba->elsbuf_cnt++;
5095 spin_unlock_irq(&phba->hbalock);
5096 }
5097 } else {
5098 buf_ptr1 = elsiocb->cmd_dmabuf;
5099 lpfc_els_free_data(phba, buf_ptr1);
5100 elsiocb->cmd_dmabuf = NULL;
5101 }
5102 }
5103
5104 if (elsiocb->bpl_dmabuf) {
5105 buf_ptr = elsiocb->bpl_dmabuf;
5106 lpfc_els_free_bpl(phba, buf_ptr);
5107 elsiocb->bpl_dmabuf = NULL;
5108 }
5109 lpfc_sli_release_iocbq(phba, elsiocb);
5110 return 0;
5111 }
5112
5113 /**
5114 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
5115 * @phba: pointer to lpfc hba data structure.
5116 * @cmdiocb: pointer to lpfc command iocb data structure.
5117 * @rspiocb: pointer to lpfc response iocb data structure.
5118 *
5119 * This routine is the completion callback function to the Logout (LOGO)
5120 * Accept (ACC) Response ELS command. This routine is invoked to indicate
5121 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
5122 * release the ndlp if it has the last reference remaining (reference count
5123 * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
5124 * field to NULL to inform the following lpfc_els_free_iocb() routine no
5125 * ndlp reference count needs to be decremented. Otherwise, the ndlp
5126 * reference use-count shall be decremented by the lpfc_els_free_iocb()
5127 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
5128 * IOCB data structure.
5129 **/
5130 static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)5131 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5132 struct lpfc_iocbq *rspiocb)
5133 {
5134 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5135 struct lpfc_vport *vport = cmdiocb->vport;
5136 u32 ulp_status, ulp_word4;
5137
5138 ulp_status = get_job_ulpstatus(phba, rspiocb);
5139 ulp_word4 = get_job_word4(phba, rspiocb);
5140
5141 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5142 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
5143 ulp_status, ulp_word4, ndlp->nlp_DID);
5144 /* ACC to LOGO completes to NPort <nlp_DID> */
5145 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5146 "0109 ACC to LOGO completes to NPort x%x refcnt %d "
5147 "Data: x%x x%x x%x\n",
5148 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
5149 ndlp->nlp_state, ndlp->nlp_rpi);
5150
5151 /* This clause allows the LOGO ACC to complete and free resources
5152 * for the Fabric Domain Controller. It does deliberately skip
5153 * the unreg_rpi and release rpi because some fabrics send RDP
5154 * requests after logging out from the initiator.
5155 */
5156 if (ndlp->nlp_type & NLP_FABRIC &&
5157 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
5158 goto out;
5159
5160 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
5161 /* If PLOGI is being retried, PLOGI completion will cleanup the
5162 * node. The NLP_NPR_2B_DISC flag needs to be retained to make
5163 * progress on nodes discovered from last RSCN.
5164 */
5165 if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
5166 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
5167 goto out;
5168
5169 /* NPort Recovery mode or node is just allocated */
5170 if (!lpfc_nlp_not_used(ndlp)) {
5171 /* A LOGO is completing and the node is in NPR state.
5172 * Just unregister the RPI because the node is still
5173 * required.
5174 */
5175 lpfc_unreg_rpi(vport, ndlp);
5176 } else {
5177 /* Indicate the node has already released, should
5178 * not reference to it from within lpfc_els_free_iocb.
5179 */
5180 cmdiocb->ndlp = NULL;
5181 }
5182 }
5183 out:
5184 /*
5185 * The driver received a LOGO from the rport and has ACK'd it.
5186 * At this point, the driver is done so release the IOCB
5187 */
5188 lpfc_els_free_iocb(phba, cmdiocb);
5189 lpfc_nlp_put(ndlp);
5190 }
5191
5192 /**
5193 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
5194 * @phba: pointer to lpfc hba data structure.
5195 * @pmb: pointer to the driver internal queue element for mailbox command.
5196 *
5197 * This routine is the completion callback function for unregister default
5198 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
5199 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
5200 * decrements the ndlp reference count held for this completion callback
5201 * function. After that, it invokes the lpfc_nlp_not_used() to check
5202 * whether there is only one reference left on the ndlp. If so, it will
5203 * perform one more decrement and trigger the release of the ndlp.
5204 **/
5205 void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)5206 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5207 {
5208 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
5209 u32 mbx_flag = pmb->mbox_flag;
5210 u32 mbx_cmd = pmb->u.mb.mbxCommand;
5211
5212 if (ndlp) {
5213 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5214 "0006 rpi x%x DID:%x flg:%x %d x%px "
5215 "mbx_cmd x%x mbx_flag x%x x%px\n",
5216 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5217 kref_read(&ndlp->kref), ndlp, mbx_cmd,
5218 mbx_flag, pmb);
5219
5220 /* This ends the default/temporary RPI cleanup logic for this
5221 * ndlp and the node and rpi needs to be released. Free the rpi
5222 * first on an UNREG_LOGIN and then release the final
5223 * references.
5224 */
5225 spin_lock_irq(&ndlp->lock);
5226 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5227 if (mbx_cmd == MBX_UNREG_LOGIN)
5228 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5229 spin_unlock_irq(&ndlp->lock);
5230 lpfc_nlp_put(ndlp);
5231 lpfc_drop_node(ndlp->vport, ndlp);
5232 }
5233
5234 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5235 }
5236
5237 /**
5238 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
5239 * @phba: pointer to lpfc hba data structure.
5240 * @cmdiocb: pointer to lpfc command iocb data structure.
5241 * @rspiocb: pointer to lpfc response iocb data structure.
5242 *
5243 * This routine is the completion callback function for ELS Response IOCB
5244 * command. In normal case, this callback function just properly sets the
5245 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
5246 * field in the command IOCB is not NULL, the referred mailbox command will
5247 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
5248 * the IOCB.
5249 **/
5250 static void
lpfc_cmpl_els_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)5251 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
5252 struct lpfc_iocbq *rspiocb)
5253 {
5254 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
5255 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
5256 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
5257 IOCB_t *irsp;
5258 LPFC_MBOXQ_t *mbox = NULL;
5259 u32 ulp_status, ulp_word4, tmo, did, iotag;
5260
5261 if (!vport) {
5262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5263 "3177 ELS response failed\n");
5264 goto out;
5265 }
5266 if (cmdiocb->context_un.mbox)
5267 mbox = cmdiocb->context_un.mbox;
5268
5269 ulp_status = get_job_ulpstatus(phba, rspiocb);
5270 ulp_word4 = get_job_word4(phba, rspiocb);
5271 did = get_job_els_rsp64_did(phba, cmdiocb);
5272
5273 if (phba->sli_rev == LPFC_SLI_REV4) {
5274 tmo = get_wqe_tmo(cmdiocb);
5275 iotag = get_wqe_reqtag(cmdiocb);
5276 } else {
5277 irsp = &rspiocb->iocb;
5278 tmo = irsp->ulpTimeout;
5279 iotag = irsp->ulpIoTag;
5280 }
5281
5282 /* Check to see if link went down during discovery */
5283 if (!ndlp || lpfc_els_chk_latt(vport)) {
5284 if (mbox)
5285 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
5286 goto out;
5287 }
5288
5289 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5290 "ELS rsp cmpl: status:x%x/x%x did:x%x",
5291 ulp_status, ulp_word4, did);
5292 /* ELS response tag <ulpIoTag> completes */
5293 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5294 "0110 ELS response tag x%x completes "
5295 "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
5296 iotag, ulp_status, ulp_word4, tmo,
5297 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5298 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
5299 if (mbox) {
5300 if (ulp_status == 0
5301 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
5302 if (!lpfc_unreg_rpi(vport, ndlp) &&
5303 (!(vport->fc_flag & FC_PT2PT))) {
5304 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5305 ndlp->nlp_state ==
5306 NLP_STE_REG_LOGIN_ISSUE) {
5307 lpfc_printf_vlog(vport, KERN_INFO,
5308 LOG_DISCOVERY,
5309 "0314 PLOGI recov "
5310 "DID x%x "
5311 "Data: x%x x%x x%x\n",
5312 ndlp->nlp_DID,
5313 ndlp->nlp_state,
5314 ndlp->nlp_rpi,
5315 ndlp->nlp_flag);
5316 goto out_free_mbox;
5317 }
5318 }
5319
5320 /* Increment reference count to ndlp to hold the
5321 * reference to ndlp for the callback function.
5322 */
5323 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5324 if (!mbox->ctx_ndlp)
5325 goto out_free_mbox;
5326
5327 mbox->vport = vport;
5328 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
5329 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5330 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5331 }
5332 else {
5333 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
5334 ndlp->nlp_prev_state = ndlp->nlp_state;
5335 lpfc_nlp_set_state(vport, ndlp,
5336 NLP_STE_REG_LOGIN_ISSUE);
5337 }
5338
5339 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
5340 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
5341 != MBX_NOT_FINISHED)
5342 goto out;
5343
5344 /* Decrement the ndlp reference count we
5345 * set for this failed mailbox command.
5346 */
5347 lpfc_nlp_put(ndlp);
5348 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
5349
5350 /* ELS rsp: Cannot issue reg_login for <NPortid> */
5351 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5352 "0138 ELS rsp: Cannot issue reg_login for x%x "
5353 "Data: x%x x%x x%x\n",
5354 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5355 ndlp->nlp_rpi);
5356 }
5357 out_free_mbox:
5358 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
5359 }
5360 out:
5361 if (ndlp && shost) {
5362 spin_lock_irq(&ndlp->lock);
5363 if (mbox)
5364 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
5365 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
5366 spin_unlock_irq(&ndlp->lock);
5367 }
5368
5369 /* An SLI4 NPIV instance wants to drop the node at this point under
5370 * these conditions and release the RPI.
5371 */
5372 if (phba->sli_rev == LPFC_SLI_REV4 &&
5373 (vport && vport->port_type == LPFC_NPIV_PORT) &&
5374 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
5375 ndlp->nlp_flag & NLP_RELEASE_RPI) {
5376 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
5377 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
5378 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5379 spin_lock_irq(&ndlp->lock);
5380 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5381 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5382 spin_unlock_irq(&ndlp->lock);
5383 lpfc_drop_node(vport, ndlp);
5384 }
5385 }
5386
5387 /* Release the originating I/O reference. */
5388 lpfc_els_free_iocb(phba, cmdiocb);
5389 lpfc_nlp_put(ndlp);
5390 return;
5391 }
5392
5393 /**
5394 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
5395 * @vport: pointer to a host virtual N_Port data structure.
5396 * @flag: the els command code to be accepted.
5397 * @oldiocb: pointer to the original lpfc command iocb data structure.
5398 * @ndlp: pointer to a node-list data structure.
5399 * @mbox: pointer to the driver internal queue element for mailbox command.
5400 *
5401 * This routine prepares and issues an Accept (ACC) response IOCB
5402 * command. It uses the @flag to properly set up the IOCB field for the
5403 * specific ACC response command to be issued and invokes the
5404 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
5405 * @mbox pointer is passed in, it will be put into the context_un.mbox
5406 * field of the IOCB for the completion callback function to issue the
5407 * mailbox command to the HBA later when callback is invoked.
5408 *
5409 * Note that the ndlp reference count will be incremented by 1 for holding the
5410 * ndlp and the reference to ndlp will be stored into the ndlp field of
5411 * the IOCB for the completion callback function to the corresponding
5412 * response ELS IOCB command.
5413 *
5414 * Return code
5415 * 0 - Successfully issued acc response
5416 * 1 - Failed to issue acc response
5417 **/
5418 int
lpfc_els_rsp_acc(struct lpfc_vport * vport,uint32_t flag,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)5419 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
5420 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5421 LPFC_MBOXQ_t *mbox)
5422 {
5423 struct lpfc_hba *phba = vport->phba;
5424 IOCB_t *icmd;
5425 IOCB_t *oldcmd;
5426 union lpfc_wqe128 *wqe;
5427 union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
5428 struct lpfc_iocbq *elsiocb;
5429 uint8_t *pcmd;
5430 struct serv_parm *sp;
5431 uint16_t cmdsize;
5432 int rc;
5433 ELS_PKT *els_pkt_ptr;
5434 struct fc_els_rdf_resp *rdf_resp;
5435
5436 switch (flag) {
5437 case ELS_CMD_ACC:
5438 cmdsize = sizeof(uint32_t);
5439 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5440 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5441 if (!elsiocb) {
5442 spin_lock_irq(&ndlp->lock);
5443 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5444 spin_unlock_irq(&ndlp->lock);
5445 return 1;
5446 }
5447
5448 if (phba->sli_rev == LPFC_SLI_REV4) {
5449 wqe = &elsiocb->wqe;
5450 /* XRI / rx_id */
5451 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5452 bf_get(wqe_ctxt_tag,
5453 &oldwqe->xmit_els_rsp.wqe_com));
5454
5455 /* oxid */
5456 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5457 bf_get(wqe_rcvoxid,
5458 &oldwqe->xmit_els_rsp.wqe_com));
5459 } else {
5460 icmd = &elsiocb->iocb;
5461 oldcmd = &oldiocb->iocb;
5462 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5463 icmd->unsli3.rcvsli3.ox_id =
5464 oldcmd->unsli3.rcvsli3.ox_id;
5465 }
5466
5467 pcmd = elsiocb->cmd_dmabuf->virt;
5468 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5469 pcmd += sizeof(uint32_t);
5470
5471 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5472 "Issue ACC: did:x%x flg:x%x",
5473 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5474 break;
5475 case ELS_CMD_FLOGI:
5476 case ELS_CMD_PLOGI:
5477 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
5478 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5479 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5480 if (!elsiocb)
5481 return 1;
5482
5483 if (phba->sli_rev == LPFC_SLI_REV4) {
5484 wqe = &elsiocb->wqe;
5485 /* XRI / rx_id */
5486 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5487 bf_get(wqe_ctxt_tag,
5488 &oldwqe->xmit_els_rsp.wqe_com));
5489
5490 /* oxid */
5491 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5492 bf_get(wqe_rcvoxid,
5493 &oldwqe->xmit_els_rsp.wqe_com));
5494 } else {
5495 icmd = &elsiocb->iocb;
5496 oldcmd = &oldiocb->iocb;
5497 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5498 icmd->unsli3.rcvsli3.ox_id =
5499 oldcmd->unsli3.rcvsli3.ox_id;
5500 }
5501
5502 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5503
5504 if (mbox)
5505 elsiocb->context_un.mbox = mbox;
5506
5507 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5508 pcmd += sizeof(uint32_t);
5509 sp = (struct serv_parm *)pcmd;
5510
5511 if (flag == ELS_CMD_FLOGI) {
5512 /* Copy the received service parameters back */
5513 memcpy(sp, &phba->fc_fabparam,
5514 sizeof(struct serv_parm));
5515
5516 /* Clear the F_Port bit */
5517 sp->cmn.fPort = 0;
5518
5519 /* Mark all class service parameters as invalid */
5520 sp->cls1.classValid = 0;
5521 sp->cls2.classValid = 0;
5522 sp->cls3.classValid = 0;
5523 sp->cls4.classValid = 0;
5524
5525 /* Copy our worldwide names */
5526 memcpy(&sp->portName, &vport->fc_sparam.portName,
5527 sizeof(struct lpfc_name));
5528 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
5529 sizeof(struct lpfc_name));
5530 } else {
5531 memcpy(pcmd, &vport->fc_sparam,
5532 sizeof(struct serv_parm));
5533
5534 sp->cmn.valid_vendor_ver_level = 0;
5535 memset(sp->un.vendorVersion, 0,
5536 sizeof(sp->un.vendorVersion));
5537 sp->cmn.bbRcvSizeMsb &= 0xF;
5538
5539 /* If our firmware supports this feature, convey that
5540 * info to the target using the vendor specific field.
5541 */
5542 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
5543 sp->cmn.valid_vendor_ver_level = 1;
5544 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
5545 sp->un.vv.flags =
5546 cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
5547 }
5548 }
5549
5550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5551 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
5552 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5553 break;
5554 case ELS_CMD_PRLO:
5555 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
5556 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5557 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
5558 if (!elsiocb)
5559 return 1;
5560
5561 if (phba->sli_rev == LPFC_SLI_REV4) {
5562 wqe = &elsiocb->wqe;
5563 /* XRI / rx_id */
5564 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5565 bf_get(wqe_ctxt_tag,
5566 &oldwqe->xmit_els_rsp.wqe_com));
5567
5568 /* oxid */
5569 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5570 bf_get(wqe_rcvoxid,
5571 &oldwqe->xmit_els_rsp.wqe_com));
5572 } else {
5573 icmd = &elsiocb->iocb;
5574 oldcmd = &oldiocb->iocb;
5575 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5576 icmd->unsli3.rcvsli3.ox_id =
5577 oldcmd->unsli3.rcvsli3.ox_id;
5578 }
5579
5580 pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
5581
5582 memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
5583 sizeof(uint32_t) + sizeof(PRLO));
5584 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
5585 els_pkt_ptr = (ELS_PKT *) pcmd;
5586 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
5587
5588 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5589 "Issue ACC PRLO: did:x%x flg:x%x",
5590 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5591 break;
5592 case ELS_CMD_RDF:
5593 cmdsize = sizeof(*rdf_resp);
5594 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5595 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5596 if (!elsiocb)
5597 return 1;
5598
5599 if (phba->sli_rev == LPFC_SLI_REV4) {
5600 wqe = &elsiocb->wqe;
5601 /* XRI / rx_id */
5602 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
5603 bf_get(wqe_ctxt_tag,
5604 &oldwqe->xmit_els_rsp.wqe_com));
5605
5606 /* oxid */
5607 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5608 bf_get(wqe_rcvoxid,
5609 &oldwqe->xmit_els_rsp.wqe_com));
5610 } else {
5611 icmd = &elsiocb->iocb;
5612 oldcmd = &oldiocb->iocb;
5613 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5614 icmd->unsli3.rcvsli3.ox_id =
5615 oldcmd->unsli3.rcvsli3.ox_id;
5616 }
5617
5618 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
5619 rdf_resp = (struct fc_els_rdf_resp *)pcmd;
5620 memset(rdf_resp, 0, sizeof(*rdf_resp));
5621 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
5622
5623 /* FC-LS-5 specifies desc_list_len shall be set to 12 */
5624 rdf_resp->desc_list_len = cpu_to_be32(12);
5625
5626 /* FC-LS-5 specifies LS REQ Information descriptor */
5627 rdf_resp->lsri.desc_tag = cpu_to_be32(1);
5628 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
5629 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
5630 break;
5631 default:
5632 return 1;
5633 }
5634 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
5635 spin_lock_irq(&ndlp->lock);
5636 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5637 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
5638 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5639 spin_unlock_irq(&ndlp->lock);
5640 elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
5641 } else {
5642 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5643 }
5644
5645 phba->fc_stat.elsXmitACC++;
5646 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5647 if (!elsiocb->ndlp) {
5648 lpfc_els_free_iocb(phba, elsiocb);
5649 return 1;
5650 }
5651
5652 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5653 if (rc == IOCB_ERROR) {
5654 lpfc_els_free_iocb(phba, elsiocb);
5655 lpfc_nlp_put(ndlp);
5656 return 1;
5657 }
5658
5659 /* Xmit ELS ACC response tag <ulpIoTag> */
5660 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5661 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5662 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5663 "RPI: x%x, fc_flag x%x refcnt %d\n",
5664 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5665 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5666 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
5667 return 0;
5668 }
5669
5670 /**
5671 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
5672 * @vport: pointer to a virtual N_Port data structure.
5673 * @rejectError: reject response to issue
5674 * @oldiocb: pointer to the original lpfc command iocb data structure.
5675 * @ndlp: pointer to a node-list data structure.
5676 * @mbox: pointer to the driver internal queue element for mailbox command.
5677 *
5678 * This routine prepares and issue an Reject (RJT) response IOCB
5679 * command. If a @mbox pointer is passed in, it will be put into the
5680 * context_un.mbox field of the IOCB for the completion callback function
5681 * to issue to the HBA later.
5682 *
5683 * Note that the ndlp reference count will be incremented by 1 for holding the
5684 * ndlp and the reference to ndlp will be stored into the ndlp field of
5685 * the IOCB for the completion callback function to the reject response
5686 * ELS IOCB command.
5687 *
5688 * Return code
5689 * 0 - Successfully issued reject response
5690 * 1 - Failed to issue reject response
5691 **/
5692 int
lpfc_els_rsp_reject(struct lpfc_vport * vport,uint32_t rejectError,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)5693 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
5694 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5695 LPFC_MBOXQ_t *mbox)
5696 {
5697 int rc;
5698 struct lpfc_hba *phba = vport->phba;
5699 IOCB_t *icmd;
5700 IOCB_t *oldcmd;
5701 union lpfc_wqe128 *wqe;
5702 struct lpfc_iocbq *elsiocb;
5703 uint8_t *pcmd;
5704 uint16_t cmdsize;
5705
5706 cmdsize = 2 * sizeof(uint32_t);
5707 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5708 ndlp->nlp_DID, ELS_CMD_LS_RJT);
5709 if (!elsiocb)
5710 return 1;
5711
5712 if (phba->sli_rev == LPFC_SLI_REV4) {
5713 wqe = &elsiocb->wqe;
5714 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5715 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
5716 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5717 get_job_rcvoxid(phba, oldiocb));
5718 } else {
5719 icmd = &elsiocb->iocb;
5720 oldcmd = &oldiocb->iocb;
5721 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5722 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5723 }
5724
5725 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
5726
5727 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5728 pcmd += sizeof(uint32_t);
5729 *((uint32_t *) (pcmd)) = rejectError;
5730
5731 if (mbox)
5732 elsiocb->context_un.mbox = mbox;
5733
5734 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
5735 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5736 "0129 Xmit ELS RJT x%x response tag x%x "
5737 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5738 "rpi x%x\n",
5739 rejectError, elsiocb->iotag,
5740 get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
5741 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
5742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5743 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
5744 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
5745
5746 phba->fc_stat.elsXmitLSRJT++;
5747 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5748 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5749 if (!elsiocb->ndlp) {
5750 lpfc_els_free_iocb(phba, elsiocb);
5751 return 1;
5752 }
5753
5754 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
5755 * node's assigned RPI gets released provided this node is not already
5756 * registered with the transport.
5757 */
5758 if (phba->sli_rev == LPFC_SLI_REV4 &&
5759 vport->port_type == LPFC_NPIV_PORT &&
5760 !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
5761 spin_lock_irq(&ndlp->lock);
5762 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5763 spin_unlock_irq(&ndlp->lock);
5764 }
5765
5766 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5767 if (rc == IOCB_ERROR) {
5768 lpfc_els_free_iocb(phba, elsiocb);
5769 lpfc_nlp_put(ndlp);
5770 return 1;
5771 }
5772
5773 return 0;
5774 }
5775
5776 /**
5777 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
5778 * @vport: pointer to a host virtual N_Port data structure.
5779 * @cmdiocb: pointer to the original lpfc command iocb data structure.
5780 * @ndlp: NPort to where rsp is directed
5781 *
5782 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
5783 * this N_Port's support of hardware signals in its Congestion
5784 * Capabilities Descriptor.
5785 *
5786 * Return code
5787 * 0 - Successfully issued edc rsp command
5788 * 1 - Failed to issue edc rsp command
5789 **/
5790 static int
lpfc_issue_els_edc_rsp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)5791 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5792 struct lpfc_nodelist *ndlp)
5793 {
5794 struct lpfc_hba *phba = vport->phba;
5795 struct lpfc_els_edc_rsp *edc_rsp;
5796 struct lpfc_iocbq *elsiocb;
5797 IOCB_t *icmd, *cmd;
5798 union lpfc_wqe128 *wqe;
5799 uint8_t *pcmd;
5800 int cmdsize, rc;
5801
5802 cmdsize = sizeof(struct lpfc_els_edc_rsp);
5803 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
5804 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5805 if (!elsiocb)
5806 return 1;
5807
5808 if (phba->sli_rev == LPFC_SLI_REV4) {
5809 wqe = &elsiocb->wqe;
5810 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5811 get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
5812 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5813 get_job_rcvoxid(phba, cmdiocb));
5814 } else {
5815 icmd = &elsiocb->iocb;
5816 cmd = &cmdiocb->iocb;
5817 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
5818 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
5819 }
5820
5821 pcmd = elsiocb->cmd_dmabuf->virt;
5822 memset(pcmd, 0, cmdsize);
5823
5824 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
5825 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
5826 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
5827 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
5828 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
5829 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
5830 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
5831 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
5832 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
5833
5834 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5835 "Issue EDC ACC: did:x%x flg:x%x refcnt %d",
5836 ndlp->nlp_DID, ndlp->nlp_flag,
5837 kref_read(&ndlp->kref));
5838 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5839
5840 phba->fc_stat.elsXmitACC++;
5841 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5842 if (!elsiocb->ndlp) {
5843 lpfc_els_free_iocb(phba, elsiocb);
5844 return 1;
5845 }
5846
5847 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5848 if (rc == IOCB_ERROR) {
5849 lpfc_els_free_iocb(phba, elsiocb);
5850 lpfc_nlp_put(ndlp);
5851 return 1;
5852 }
5853
5854 /* Xmit ELS ACC response tag <ulpIoTag> */
5855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5856 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
5857 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5858 "RPI: x%x, fc_flag x%x\n",
5859 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5860 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5861 ndlp->nlp_rpi, vport->fc_flag);
5862
5863 return 0;
5864 }
5865
5866 /**
5867 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
5868 * @vport: pointer to a virtual N_Port data structure.
5869 * @oldiocb: pointer to the original lpfc command iocb data structure.
5870 * @ndlp: pointer to a node-list data structure.
5871 *
5872 * This routine prepares and issues an Accept (ACC) response to Address
5873 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
5874 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5875 *
5876 * Note that the ndlp reference count will be incremented by 1 for holding the
5877 * ndlp and the reference to ndlp will be stored into the ndlp field of
5878 * the IOCB for the completion callback function to the ADISC Accept response
5879 * ELS IOCB command.
5880 *
5881 * Return code
5882 * 0 - Successfully issued acc adisc response
5883 * 1 - Failed to issue adisc acc response
5884 **/
5885 int
lpfc_els_rsp_adisc_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)5886 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5887 struct lpfc_nodelist *ndlp)
5888 {
5889 struct lpfc_hba *phba = vport->phba;
5890 ADISC *ap;
5891 IOCB_t *icmd, *oldcmd;
5892 union lpfc_wqe128 *wqe;
5893 struct lpfc_iocbq *elsiocb;
5894 uint8_t *pcmd;
5895 uint16_t cmdsize;
5896 int rc;
5897 u32 ulp_context;
5898
5899 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
5900 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5901 ndlp->nlp_DID, ELS_CMD_ACC);
5902 if (!elsiocb)
5903 return 1;
5904
5905 if (phba->sli_rev == LPFC_SLI_REV4) {
5906 wqe = &elsiocb->wqe;
5907 /* XRI / rx_id */
5908 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5909 get_job_ulpcontext(phba, oldiocb));
5910 ulp_context = get_job_ulpcontext(phba, elsiocb);
5911 /* oxid */
5912 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
5913 get_job_rcvoxid(phba, oldiocb));
5914 } else {
5915 icmd = &elsiocb->iocb;
5916 oldcmd = &oldiocb->iocb;
5917 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5918 ulp_context = elsiocb->iocb.ulpContext;
5919 icmd->unsli3.rcvsli3.ox_id =
5920 oldcmd->unsli3.rcvsli3.ox_id;
5921 }
5922
5923 /* Xmit ADISC ACC response tag <ulpIoTag> */
5924 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5925 "0130 Xmit ADISC ACC response iotag x%x xri: "
5926 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
5927 elsiocb->iotag, ulp_context,
5928 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5929 ndlp->nlp_rpi);
5930 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
5931
5932 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5933 pcmd += sizeof(uint32_t);
5934
5935 ap = (ADISC *) (pcmd);
5936 ap->hardAL_PA = phba->fc_pref_ALPA;
5937 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
5938 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
5939 ap->DID = be32_to_cpu(vport->fc_myDID);
5940
5941 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5942 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
5943 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
5944
5945 phba->fc_stat.elsXmitACC++;
5946 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
5947 elsiocb->ndlp = lpfc_nlp_get(ndlp);
5948 if (!elsiocb->ndlp) {
5949 lpfc_els_free_iocb(phba, elsiocb);
5950 return 1;
5951 }
5952
5953 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5954 if (rc == IOCB_ERROR) {
5955 lpfc_els_free_iocb(phba, elsiocb);
5956 lpfc_nlp_put(ndlp);
5957 return 1;
5958 }
5959
5960 return 0;
5961 }
5962
5963 /**
5964 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
5965 * @vport: pointer to a virtual N_Port data structure.
5966 * @oldiocb: pointer to the original lpfc command iocb data structure.
5967 * @ndlp: pointer to a node-list data structure.
5968 *
5969 * This routine prepares and issues an Accept (ACC) response to Process
5970 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
5971 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5972 *
5973 * Note that the ndlp reference count will be incremented by 1 for holding the
5974 * ndlp and the reference to ndlp will be stored into the ndlp field of
5975 * the IOCB for the completion callback function to the PRLI Accept response
5976 * ELS IOCB command.
5977 *
5978 * Return code
5979 * 0 - Successfully issued acc prli response
5980 * 1 - Failed to issue acc prli response
5981 **/
5982 int
lpfc_els_rsp_prli_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)5983 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5984 struct lpfc_nodelist *ndlp)
5985 {
5986 struct lpfc_hba *phba = vport->phba;
5987 PRLI *npr;
5988 struct lpfc_nvme_prli *npr_nvme;
5989 lpfc_vpd_t *vpd;
5990 IOCB_t *icmd;
5991 IOCB_t *oldcmd;
5992 union lpfc_wqe128 *wqe;
5993 struct lpfc_iocbq *elsiocb;
5994 uint8_t *pcmd;
5995 uint16_t cmdsize;
5996 uint32_t prli_fc4_req, *req_payload;
5997 struct lpfc_dmabuf *req_buf;
5998 int rc;
5999 u32 elsrspcmd, ulp_context;
6000
6001 /* Need the incoming PRLI payload to determine if the ACC is for an
6002 * FC4 or NVME PRLI type. The PRLI type is at word 1.
6003 */
6004 req_buf = oldiocb->cmd_dmabuf;
6005 req_payload = (((uint32_t *)req_buf->virt) + 1);
6006
6007 /* PRLI type payload is at byte 3 for FCP or NVME. */
6008 prli_fc4_req = be32_to_cpu(*req_payload);
6009 prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
6010 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6011 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
6012 prli_fc4_req, *((uint32_t *)req_payload));
6013
6014 if (prli_fc4_req == PRLI_FCP_TYPE) {
6015 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
6016 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
6017 } else if (prli_fc4_req & PRLI_NVME_TYPE) {
6018 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
6019 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
6020 } else {
6021 return 1;
6022 }
6023
6024 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6025 ndlp->nlp_DID, elsrspcmd);
6026 if (!elsiocb)
6027 return 1;
6028
6029 if (phba->sli_rev == LPFC_SLI_REV4) {
6030 wqe = &elsiocb->wqe;
6031 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6032 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6033 ulp_context = get_job_ulpcontext(phba, elsiocb);
6034 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6035 get_job_rcvoxid(phba, oldiocb));
6036 } else {
6037 icmd = &elsiocb->iocb;
6038 oldcmd = &oldiocb->iocb;
6039 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6040 ulp_context = elsiocb->iocb.ulpContext;
6041 icmd->unsli3.rcvsli3.ox_id =
6042 oldcmd->unsli3.rcvsli3.ox_id;
6043 }
6044
6045 /* Xmit PRLI ACC response tag <ulpIoTag> */
6046 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6047 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
6048 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6049 elsiocb->iotag, ulp_context,
6050 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6051 ndlp->nlp_rpi);
6052 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6053 memset(pcmd, 0, cmdsize);
6054
6055 *((uint32_t *)(pcmd)) = elsrspcmd;
6056 pcmd += sizeof(uint32_t);
6057
6058 /* For PRLI, remainder of payload is PRLI parameter page */
6059 vpd = &phba->vpd;
6060
6061 if (prli_fc4_req == PRLI_FCP_TYPE) {
6062 /*
6063 * If the remote port is a target and our firmware version
6064 * is 3.20 or later, set the following bits for FC-TAPE
6065 * support.
6066 */
6067 npr = (PRLI *) pcmd;
6068 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
6069 (vpd->rev.feaLevelHigh >= 0x02)) {
6070 npr->ConfmComplAllowed = 1;
6071 npr->Retry = 1;
6072 npr->TaskRetryIdReq = 1;
6073 }
6074 npr->acceptRspCode = PRLI_REQ_EXECUTED;
6075 npr->estabImagePair = 1;
6076 npr->readXferRdyDis = 1;
6077 npr->ConfmComplAllowed = 1;
6078 npr->prliType = PRLI_FCP_TYPE;
6079 npr->initiatorFunc = 1;
6080 } else if (prli_fc4_req & PRLI_NVME_TYPE) {
6081 /* Respond with an NVME PRLI Type */
6082 npr_nvme = (struct lpfc_nvme_prli *) pcmd;
6083 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
6084 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
6085 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
6086 if (phba->nvmet_support) {
6087 bf_set(prli_tgt, npr_nvme, 1);
6088 bf_set(prli_disc, npr_nvme, 1);
6089 if (phba->cfg_nvme_enable_fb) {
6090 bf_set(prli_fba, npr_nvme, 1);
6091
6092 /* TBD. Target mode needs to post buffers
6093 * that support the configured first burst
6094 * byte size.
6095 */
6096 bf_set(prli_fb_sz, npr_nvme,
6097 phba->cfg_nvmet_fb_size);
6098 }
6099 } else {
6100 bf_set(prli_init, npr_nvme, 1);
6101 }
6102
6103 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
6104 "6015 NVME issue PRLI ACC word1 x%08x "
6105 "word4 x%08x word5 x%08x flag x%x, "
6106 "fcp_info x%x nlp_type x%x\n",
6107 npr_nvme->word1, npr_nvme->word4,
6108 npr_nvme->word5, ndlp->nlp_flag,
6109 ndlp->nlp_fcp_info, ndlp->nlp_type);
6110 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
6111 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
6112 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
6113 } else
6114 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6115 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
6116 prli_fc4_req, ndlp->nlp_fc4_type,
6117 ndlp->nlp_DID);
6118
6119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6120 "Issue ACC PRLI: did:x%x flg:x%x",
6121 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6122
6123 phba->fc_stat.elsXmitACC++;
6124 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6125 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6126 if (!elsiocb->ndlp) {
6127 lpfc_els_free_iocb(phba, elsiocb);
6128 return 1;
6129 }
6130
6131 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6132 if (rc == IOCB_ERROR) {
6133 lpfc_els_free_iocb(phba, elsiocb);
6134 lpfc_nlp_put(ndlp);
6135 return 1;
6136 }
6137
6138 return 0;
6139 }
6140
6141 /**
6142 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
6143 * @vport: pointer to a virtual N_Port data structure.
6144 * @format: rnid command format.
6145 * @oldiocb: pointer to the original lpfc command iocb data structure.
6146 * @ndlp: pointer to a node-list data structure.
6147 *
6148 * This routine issues a Request Node Identification Data (RNID) Accept
6149 * (ACC) response. It constructs the RNID ACC response command according to
6150 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
6151 * issue the response.
6152 *
6153 * Note that the ndlp reference count will be incremented by 1 for holding the
6154 * ndlp and the reference to ndlp will be stored into the ndlp field of
6155 * the IOCB for the completion callback function.
6156 *
6157 * Return code
6158 * 0 - Successfully issued acc rnid response
6159 * 1 - Failed to issue acc rnid response
6160 **/
6161 static int
lpfc_els_rsp_rnid_acc(struct lpfc_vport * vport,uint8_t format,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6162 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
6163 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6164 {
6165 struct lpfc_hba *phba = vport->phba;
6166 RNID *rn;
6167 IOCB_t *icmd, *oldcmd;
6168 union lpfc_wqe128 *wqe;
6169 struct lpfc_iocbq *elsiocb;
6170 uint8_t *pcmd;
6171 uint16_t cmdsize;
6172 int rc;
6173 u32 ulp_context;
6174
6175 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
6176 + (2 * sizeof(struct lpfc_name));
6177 if (format)
6178 cmdsize += sizeof(RNID_TOP_DISC);
6179
6180 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6181 ndlp->nlp_DID, ELS_CMD_ACC);
6182 if (!elsiocb)
6183 return 1;
6184
6185 if (phba->sli_rev == LPFC_SLI_REV4) {
6186 wqe = &elsiocb->wqe;
6187 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6188 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6189 ulp_context = get_job_ulpcontext(phba, elsiocb);
6190 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6191 get_job_rcvoxid(phba, oldiocb));
6192 } else {
6193 icmd = &elsiocb->iocb;
6194 oldcmd = &oldiocb->iocb;
6195 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6196 ulp_context = elsiocb->iocb.ulpContext;
6197 icmd->unsli3.rcvsli3.ox_id =
6198 oldcmd->unsli3.rcvsli3.ox_id;
6199 }
6200
6201 /* Xmit RNID ACC response tag <ulpIoTag> */
6202 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6203 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
6204 elsiocb->iotag, ulp_context);
6205 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6206 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6207 pcmd += sizeof(uint32_t);
6208
6209 memset(pcmd, 0, sizeof(RNID));
6210 rn = (RNID *) (pcmd);
6211 rn->Format = format;
6212 rn->CommonLen = (2 * sizeof(struct lpfc_name));
6213 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
6214 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
6215 switch (format) {
6216 case 0:
6217 rn->SpecificLen = 0;
6218 break;
6219 case RNID_TOPOLOGY_DISC:
6220 rn->SpecificLen = sizeof(RNID_TOP_DISC);
6221 memcpy(&rn->un.topologyDisc.portName,
6222 &vport->fc_portname, sizeof(struct lpfc_name));
6223 rn->un.topologyDisc.unitType = RNID_HBA;
6224 rn->un.topologyDisc.physPort = 0;
6225 rn->un.topologyDisc.attachedNodes = 0;
6226 break;
6227 default:
6228 rn->CommonLen = 0;
6229 rn->SpecificLen = 0;
6230 break;
6231 }
6232
6233 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6234 "Issue ACC RNID: did:x%x flg:x%x refcnt %d",
6235 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6236
6237 phba->fc_stat.elsXmitACC++;
6238 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6239 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6240 if (!elsiocb->ndlp) {
6241 lpfc_els_free_iocb(phba, elsiocb);
6242 return 1;
6243 }
6244
6245 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6246 if (rc == IOCB_ERROR) {
6247 lpfc_els_free_iocb(phba, elsiocb);
6248 lpfc_nlp_put(ndlp);
6249 return 1;
6250 }
6251
6252 return 0;
6253 }
6254
6255 /**
6256 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
6257 * @vport: pointer to a virtual N_Port data structure.
6258 * @iocb: pointer to the lpfc command iocb data structure.
6259 * @ndlp: pointer to a node-list data structure.
6260 *
6261 * Return
6262 **/
6263 static void
lpfc_els_clear_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * iocb,struct lpfc_nodelist * ndlp)6264 lpfc_els_clear_rrq(struct lpfc_vport *vport,
6265 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
6266 {
6267 struct lpfc_hba *phba = vport->phba;
6268 uint8_t *pcmd;
6269 struct RRQ *rrq;
6270 uint16_t rxid;
6271 uint16_t xri;
6272 struct lpfc_node_rrq *prrq;
6273
6274
6275 pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
6276 pcmd += sizeof(uint32_t);
6277 rrq = (struct RRQ *)pcmd;
6278 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
6279 rxid = bf_get(rrq_rxid, rrq);
6280
6281 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6282 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
6283 " x%x x%x\n",
6284 be32_to_cpu(bf_get(rrq_did, rrq)),
6285 bf_get(rrq_oxid, rrq),
6286 rxid,
6287 get_wqe_reqtag(iocb),
6288 get_job_ulpcontext(phba, iocb));
6289
6290 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6291 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
6292 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
6293 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
6294 xri = bf_get(rrq_oxid, rrq);
6295 else
6296 xri = rxid;
6297 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
6298 if (prrq)
6299 lpfc_clr_rrq_active(phba, xri, prrq);
6300 return;
6301 }
6302
6303 /**
6304 * lpfc_els_rsp_echo_acc - Issue echo acc response
6305 * @vport: pointer to a virtual N_Port data structure.
6306 * @data: pointer to echo data to return in the accept.
6307 * @oldiocb: pointer to the original lpfc command iocb data structure.
6308 * @ndlp: pointer to a node-list data structure.
6309 *
6310 * Return code
6311 * 0 - Successfully issued acc echo response
6312 * 1 - Failed to issue acc echo response
6313 **/
6314 static int
lpfc_els_rsp_echo_acc(struct lpfc_vport * vport,uint8_t * data,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)6315 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
6316 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
6317 {
6318 struct lpfc_hba *phba = vport->phba;
6319 IOCB_t *icmd, *oldcmd;
6320 union lpfc_wqe128 *wqe;
6321 struct lpfc_iocbq *elsiocb;
6322 uint8_t *pcmd;
6323 uint16_t cmdsize;
6324 int rc;
6325 u32 ulp_context;
6326
6327 if (phba->sli_rev == LPFC_SLI_REV4)
6328 cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
6329 else
6330 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
6331
6332 /* The accumulated length can exceed the BPL_SIZE. For
6333 * now, use this as the limit
6334 */
6335 if (cmdsize > LPFC_BPL_SIZE)
6336 cmdsize = LPFC_BPL_SIZE;
6337 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
6338 ndlp->nlp_DID, ELS_CMD_ACC);
6339 if (!elsiocb)
6340 return 1;
6341
6342 if (phba->sli_rev == LPFC_SLI_REV4) {
6343 wqe = &elsiocb->wqe;
6344 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
6345 get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
6346 ulp_context = get_job_ulpcontext(phba, elsiocb);
6347 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6348 get_job_rcvoxid(phba, oldiocb));
6349 } else {
6350 icmd = &elsiocb->iocb;
6351 oldcmd = &oldiocb->iocb;
6352 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6353 ulp_context = elsiocb->iocb.ulpContext;
6354 icmd->unsli3.rcvsli3.ox_id =
6355 oldcmd->unsli3.rcvsli3.ox_id;
6356 }
6357
6358 /* Xmit ECHO ACC response tag <ulpIoTag> */
6359 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6360 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
6361 elsiocb->iotag, ulp_context);
6362 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
6363 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6364 pcmd += sizeof(uint32_t);
6365 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
6366
6367 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6368 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d",
6369 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
6370
6371 phba->fc_stat.elsXmitACC++;
6372 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
6373 elsiocb->ndlp = lpfc_nlp_get(ndlp);
6374 if (!elsiocb->ndlp) {
6375 lpfc_els_free_iocb(phba, elsiocb);
6376 return 1;
6377 }
6378
6379 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6380 if (rc == IOCB_ERROR) {
6381 lpfc_els_free_iocb(phba, elsiocb);
6382 lpfc_nlp_put(ndlp);
6383 return 1;
6384 }
6385
6386 return 0;
6387 }
6388
6389 /**
6390 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
6391 * @vport: pointer to a host virtual N_Port data structure.
6392 *
6393 * This routine issues Address Discover (ADISC) ELS commands to those
6394 * N_Ports which are in node port recovery state and ADISC has not been issued
6395 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
6396 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
6397 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
6398 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
6399 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
6400 * IOCBs quit for later pick up. On the other hand, after walking through
6401 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
6402 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
6403 * no more ADISC need to be sent.
6404 *
6405 * Return code
6406 * The number of N_Ports with adisc issued.
6407 **/
6408 int
lpfc_els_disc_adisc(struct lpfc_vport * vport)6409 lpfc_els_disc_adisc(struct lpfc_vport *vport)
6410 {
6411 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6412 struct lpfc_nodelist *ndlp, *next_ndlp;
6413 int sentadisc = 0;
6414
6415 /* go thru NPR nodes and issue any remaining ELS ADISCs */
6416 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6417
6418 if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
6419 !(ndlp->nlp_flag & NLP_NPR_ADISC))
6420 continue;
6421
6422 spin_lock_irq(&ndlp->lock);
6423 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
6424 spin_unlock_irq(&ndlp->lock);
6425
6426 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
6427 /* This node was marked for ADISC but was not picked
6428 * for discovery. This is possible if the node was
6429 * missing in gidft response.
6430 *
6431 * At time of marking node for ADISC, we skipped unreg
6432 * from backend
6433 */
6434 lpfc_nlp_unreg_node(vport, ndlp);
6435 lpfc_unreg_rpi(vport, ndlp);
6436 continue;
6437 }
6438
6439 ndlp->nlp_prev_state = ndlp->nlp_state;
6440 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
6441 lpfc_issue_els_adisc(vport, ndlp, 0);
6442 sentadisc++;
6443 vport->num_disc_nodes++;
6444 if (vport->num_disc_nodes >=
6445 vport->cfg_discovery_threads) {
6446 spin_lock_irq(shost->host_lock);
6447 vport->fc_flag |= FC_NLP_MORE;
6448 spin_unlock_irq(shost->host_lock);
6449 break;
6450 }
6451
6452 }
6453 if (sentadisc == 0) {
6454 spin_lock_irq(shost->host_lock);
6455 vport->fc_flag &= ~FC_NLP_MORE;
6456 spin_unlock_irq(shost->host_lock);
6457 }
6458 return sentadisc;
6459 }
6460
6461 /**
6462 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
6463 * @vport: pointer to a host virtual N_Port data structure.
6464 *
6465 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
6466 * which are in node port recovery state, with a @vport. Each time an ELS
6467 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
6468 * the per @vport number of discover count (num_disc_nodes) shall be
6469 * incremented. If the num_disc_nodes reaches a pre-configured threshold
6470 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
6471 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
6472 * later pick up. On the other hand, after walking through all the ndlps with
6473 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
6474 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
6475 * PLOGI need to be sent.
6476 *
6477 * Return code
6478 * The number of N_Ports with plogi issued.
6479 **/
6480 int
lpfc_els_disc_plogi(struct lpfc_vport * vport)6481 lpfc_els_disc_plogi(struct lpfc_vport *vport)
6482 {
6483 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6484 struct lpfc_nodelist *ndlp, *next_ndlp;
6485 int sentplogi = 0;
6486
6487 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
6488 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
6489 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
6490 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
6491 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
6492 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
6493 ndlp->nlp_prev_state = ndlp->nlp_state;
6494 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6495 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
6496 sentplogi++;
6497 vport->num_disc_nodes++;
6498 if (vport->num_disc_nodes >=
6499 vport->cfg_discovery_threads) {
6500 spin_lock_irq(shost->host_lock);
6501 vport->fc_flag |= FC_NLP_MORE;
6502 spin_unlock_irq(shost->host_lock);
6503 break;
6504 }
6505 }
6506 }
6507
6508 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6509 "6452 Discover PLOGI %d flag x%x\n",
6510 sentplogi, vport->fc_flag);
6511
6512 if (sentplogi) {
6513 lpfc_set_disctmo(vport);
6514 }
6515 else {
6516 spin_lock_irq(shost->host_lock);
6517 vport->fc_flag &= ~FC_NLP_MORE;
6518 spin_unlock_irq(shost->host_lock);
6519 }
6520 return sentplogi;
6521 }
6522
6523 static uint32_t
lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc * desc,uint32_t word0)6524 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
6525 uint32_t word0)
6526 {
6527
6528 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
6529 desc->payload.els_req = word0;
6530 desc->length = cpu_to_be32(sizeof(desc->payload));
6531
6532 return sizeof(struct fc_rdp_link_service_desc);
6533 }
6534
6535 static uint32_t
lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc * desc,uint8_t * page_a0,uint8_t * page_a2)6536 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
6537 uint8_t *page_a0, uint8_t *page_a2)
6538 {
6539 uint16_t wavelength;
6540 uint16_t temperature;
6541 uint16_t rx_power;
6542 uint16_t tx_bias;
6543 uint16_t tx_power;
6544 uint16_t vcc;
6545 uint16_t flag = 0;
6546 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
6547 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
6548
6549 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
6550
6551 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
6552 &page_a0[SSF_TRANSCEIVER_CODE_B4];
6553 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
6554 &page_a0[SSF_TRANSCEIVER_CODE_B5];
6555
6556 if ((trasn_code_byte4->fc_sw_laser) ||
6557 (trasn_code_byte5->fc_sw_laser_sl) ||
6558 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
6559 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
6560 } else if (trasn_code_byte4->fc_lw_laser) {
6561 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
6562 page_a0[SSF_WAVELENGTH_B0];
6563 if (wavelength == SFP_WAVELENGTH_LC1310)
6564 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
6565 if (wavelength == SFP_WAVELENGTH_LL1550)
6566 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
6567 }
6568 /* check if its SFP+ */
6569 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
6570 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
6571 << SFP_FLAG_CT_SHIFT;
6572
6573 /* check if its OPTICAL */
6574 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
6575 SFP_FLAG_IS_OPTICAL_PORT : 0)
6576 << SFP_FLAG_IS_OPTICAL_SHIFT;
6577
6578 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
6579 page_a2[SFF_TEMPERATURE_B0]);
6580 vcc = (page_a2[SFF_VCC_B1] << 8 |
6581 page_a2[SFF_VCC_B0]);
6582 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
6583 page_a2[SFF_TXPOWER_B0]);
6584 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
6585 page_a2[SFF_TX_BIAS_CURRENT_B0]);
6586 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
6587 page_a2[SFF_RXPOWER_B0]);
6588 desc->sfp_info.temperature = cpu_to_be16(temperature);
6589 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
6590 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
6591 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
6592 desc->sfp_info.vcc = cpu_to_be16(vcc);
6593
6594 desc->sfp_info.flags = cpu_to_be16(flag);
6595 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
6596
6597 return sizeof(struct fc_rdp_sfp_desc);
6598 }
6599
6600 static uint32_t
lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc * desc,READ_LNK_VAR * stat)6601 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
6602 READ_LNK_VAR *stat)
6603 {
6604 uint32_t type;
6605
6606 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
6607
6608 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
6609
6610 desc->info.port_type = cpu_to_be32(type);
6611
6612 desc->info.link_status.link_failure_cnt =
6613 cpu_to_be32(stat->linkFailureCnt);
6614 desc->info.link_status.loss_of_synch_cnt =
6615 cpu_to_be32(stat->lossSyncCnt);
6616 desc->info.link_status.loss_of_signal_cnt =
6617 cpu_to_be32(stat->lossSignalCnt);
6618 desc->info.link_status.primitive_seq_proto_err =
6619 cpu_to_be32(stat->primSeqErrCnt);
6620 desc->info.link_status.invalid_trans_word =
6621 cpu_to_be32(stat->invalidXmitWord);
6622 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
6623
6624 desc->length = cpu_to_be32(sizeof(desc->info));
6625
6626 return sizeof(struct fc_rdp_link_error_status_desc);
6627 }
6628
6629 static uint32_t
lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc * desc,READ_LNK_VAR * stat,struct lpfc_vport * vport)6630 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
6631 struct lpfc_vport *vport)
6632 {
6633 uint32_t bbCredit;
6634
6635 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
6636
6637 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
6638 (vport->fc_sparam.cmn.bbCreditMsb << 8);
6639 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
6640 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
6641 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
6642 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
6643 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
6644 } else {
6645 desc->bbc_info.attached_port_bbc = 0;
6646 }
6647
6648 desc->bbc_info.rtt = 0;
6649 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
6650
6651 return sizeof(struct fc_rdp_bbc_desc);
6652 }
6653
6654 static uint32_t
lpfc_rdp_res_oed_temp_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6655 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
6656 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
6657 {
6658 uint32_t flags = 0;
6659
6660 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6661
6662 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
6663 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
6664 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
6665 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
6666
6667 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6668 flags |= RDP_OET_HIGH_ALARM;
6669 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6670 flags |= RDP_OET_LOW_ALARM;
6671 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
6672 flags |= RDP_OET_HIGH_WARNING;
6673 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
6674 flags |= RDP_OET_LOW_WARNING;
6675
6676 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
6677 desc->oed_info.function_flags = cpu_to_be32(flags);
6678 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6679 return sizeof(struct fc_rdp_oed_sfp_desc);
6680 }
6681
6682 static uint32_t
lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6683 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
6684 struct fc_rdp_oed_sfp_desc *desc,
6685 uint8_t *page_a2)
6686 {
6687 uint32_t flags = 0;
6688
6689 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6690
6691 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
6692 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
6693 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
6694 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
6695
6696 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6697 flags |= RDP_OET_HIGH_ALARM;
6698 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6699 flags |= RDP_OET_LOW_ALARM;
6700 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
6701 flags |= RDP_OET_HIGH_WARNING;
6702 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
6703 flags |= RDP_OET_LOW_WARNING;
6704
6705 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
6706 desc->oed_info.function_flags = cpu_to_be32(flags);
6707 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6708 return sizeof(struct fc_rdp_oed_sfp_desc);
6709 }
6710
6711 static uint32_t
lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6712 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
6713 struct fc_rdp_oed_sfp_desc *desc,
6714 uint8_t *page_a2)
6715 {
6716 uint32_t flags = 0;
6717
6718 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6719
6720 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
6721 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
6722 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
6723 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
6724
6725 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6726 flags |= RDP_OET_HIGH_ALARM;
6727 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
6728 flags |= RDP_OET_LOW_ALARM;
6729 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
6730 flags |= RDP_OET_HIGH_WARNING;
6731 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
6732 flags |= RDP_OET_LOW_WARNING;
6733
6734 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
6735 desc->oed_info.function_flags = cpu_to_be32(flags);
6736 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6737 return sizeof(struct fc_rdp_oed_sfp_desc);
6738 }
6739
6740 static uint32_t
lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6741 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
6742 struct fc_rdp_oed_sfp_desc *desc,
6743 uint8_t *page_a2)
6744 {
6745 uint32_t flags = 0;
6746
6747 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6748
6749 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
6750 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
6751 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
6752 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
6753
6754 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6755 flags |= RDP_OET_HIGH_ALARM;
6756 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
6757 flags |= RDP_OET_LOW_ALARM;
6758 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
6759 flags |= RDP_OET_HIGH_WARNING;
6760 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
6761 flags |= RDP_OET_LOW_WARNING;
6762
6763 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
6764 desc->oed_info.function_flags = cpu_to_be32(flags);
6765 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6766 return sizeof(struct fc_rdp_oed_sfp_desc);
6767 }
6768
6769
6770 static uint32_t
lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)6771 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
6772 struct fc_rdp_oed_sfp_desc *desc,
6773 uint8_t *page_a2)
6774 {
6775 uint32_t flags = 0;
6776
6777 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6778
6779 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
6780 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
6781 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
6782 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
6783
6784 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6785 flags |= RDP_OET_HIGH_ALARM;
6786 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
6787 flags |= RDP_OET_LOW_ALARM;
6788 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6789 flags |= RDP_OET_HIGH_WARNING;
6790 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
6791 flags |= RDP_OET_LOW_WARNING;
6792
6793 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
6794 desc->oed_info.function_flags = cpu_to_be32(flags);
6795 desc->length = cpu_to_be32(sizeof(desc->oed_info));
6796 return sizeof(struct fc_rdp_oed_sfp_desc);
6797 }
6798
6799 static uint32_t
lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc * desc,uint8_t * page_a0,struct lpfc_vport * vport)6800 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
6801 uint8_t *page_a0, struct lpfc_vport *vport)
6802 {
6803 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
6804 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
6805 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
6806 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
6807 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
6808 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
6809 desc->length = cpu_to_be32(sizeof(desc->opd_info));
6810 return sizeof(struct fc_rdp_opd_sfp_desc);
6811 }
6812
6813 static uint32_t
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc * desc,READ_LNK_VAR * stat)6814 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
6815 {
6816 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
6817 return 0;
6818 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
6819
6820 desc->info.CorrectedBlocks =
6821 cpu_to_be32(stat->fecCorrBlkCount);
6822 desc->info.UncorrectableBlocks =
6823 cpu_to_be32(stat->fecUncorrBlkCount);
6824
6825 desc->length = cpu_to_be32(sizeof(desc->info));
6826
6827 return sizeof(struct fc_fec_rdp_desc);
6828 }
6829
6830 static uint32_t
lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc * desc,struct lpfc_hba * phba)6831 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
6832 {
6833 uint16_t rdp_cap = 0;
6834 uint16_t rdp_speed;
6835
6836 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
6837
6838 switch (phba->fc_linkspeed) {
6839 case LPFC_LINK_SPEED_1GHZ:
6840 rdp_speed = RDP_PS_1GB;
6841 break;
6842 case LPFC_LINK_SPEED_2GHZ:
6843 rdp_speed = RDP_PS_2GB;
6844 break;
6845 case LPFC_LINK_SPEED_4GHZ:
6846 rdp_speed = RDP_PS_4GB;
6847 break;
6848 case LPFC_LINK_SPEED_8GHZ:
6849 rdp_speed = RDP_PS_8GB;
6850 break;
6851 case LPFC_LINK_SPEED_10GHZ:
6852 rdp_speed = RDP_PS_10GB;
6853 break;
6854 case LPFC_LINK_SPEED_16GHZ:
6855 rdp_speed = RDP_PS_16GB;
6856 break;
6857 case LPFC_LINK_SPEED_32GHZ:
6858 rdp_speed = RDP_PS_32GB;
6859 break;
6860 case LPFC_LINK_SPEED_64GHZ:
6861 rdp_speed = RDP_PS_64GB;
6862 break;
6863 case LPFC_LINK_SPEED_128GHZ:
6864 rdp_speed = RDP_PS_128GB;
6865 break;
6866 case LPFC_LINK_SPEED_256GHZ:
6867 rdp_speed = RDP_PS_256GB;
6868 break;
6869 default:
6870 rdp_speed = RDP_PS_UNKNOWN;
6871 break;
6872 }
6873
6874 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
6875
6876 if (phba->lmt & LMT_256Gb)
6877 rdp_cap |= RDP_PS_256GB;
6878 if (phba->lmt & LMT_128Gb)
6879 rdp_cap |= RDP_PS_128GB;
6880 if (phba->lmt & LMT_64Gb)
6881 rdp_cap |= RDP_PS_64GB;
6882 if (phba->lmt & LMT_32Gb)
6883 rdp_cap |= RDP_PS_32GB;
6884 if (phba->lmt & LMT_16Gb)
6885 rdp_cap |= RDP_PS_16GB;
6886 if (phba->lmt & LMT_10Gb)
6887 rdp_cap |= RDP_PS_10GB;
6888 if (phba->lmt & LMT_8Gb)
6889 rdp_cap |= RDP_PS_8GB;
6890 if (phba->lmt & LMT_4Gb)
6891 rdp_cap |= RDP_PS_4GB;
6892 if (phba->lmt & LMT_2Gb)
6893 rdp_cap |= RDP_PS_2GB;
6894 if (phba->lmt & LMT_1Gb)
6895 rdp_cap |= RDP_PS_1GB;
6896
6897 if (rdp_cap == 0)
6898 rdp_cap = RDP_CAP_UNKNOWN;
6899 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
6900 rdp_cap |= RDP_CAP_USER_CONFIGURED;
6901
6902 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
6903 desc->length = cpu_to_be32(sizeof(desc->info));
6904 return sizeof(struct fc_rdp_port_speed_desc);
6905 }
6906
6907 static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport)6908 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
6909 struct lpfc_vport *vport)
6910 {
6911
6912 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
6913
6914 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
6915 sizeof(desc->port_names.wwnn));
6916
6917 memcpy(desc->port_names.wwpn, &vport->fc_portname,
6918 sizeof(desc->port_names.wwpn));
6919
6920 desc->length = cpu_to_be32(sizeof(desc->port_names));
6921 return sizeof(struct fc_rdp_port_name_desc);
6922 }
6923
6924 static uint32_t
lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)6925 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
6926 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6927 {
6928
6929 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
6930 if (vport->fc_flag & FC_FABRIC) {
6931 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
6932 sizeof(desc->port_names.wwnn));
6933
6934 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
6935 sizeof(desc->port_names.wwpn));
6936 } else { /* Point to Point */
6937 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
6938 sizeof(desc->port_names.wwnn));
6939
6940 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
6941 sizeof(desc->port_names.wwpn));
6942 }
6943
6944 desc->length = cpu_to_be32(sizeof(desc->port_names));
6945 return sizeof(struct fc_rdp_port_name_desc);
6946 }
6947
6948 static void
lpfc_els_rdp_cmpl(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context,int status)6949 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
6950 int status)
6951 {
6952 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
6953 struct lpfc_vport *vport = ndlp->vport;
6954 struct lpfc_iocbq *elsiocb;
6955 struct ulp_bde64 *bpl;
6956 IOCB_t *icmd;
6957 union lpfc_wqe128 *wqe;
6958 uint8_t *pcmd;
6959 struct ls_rjt *stat;
6960 struct fc_rdp_res_frame *rdp_res;
6961 uint32_t cmdsize, len;
6962 uint16_t *flag_ptr;
6963 int rc;
6964 u32 ulp_context;
6965
6966 if (status != SUCCESS)
6967 goto error;
6968
6969 /* This will change once we know the true size of the RDP payload */
6970 cmdsize = sizeof(struct fc_rdp_res_frame);
6971
6972 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
6973 lpfc_max_els_tries, rdp_context->ndlp,
6974 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
6975 if (!elsiocb)
6976 goto free_rdp_context;
6977
6978 ulp_context = get_job_ulpcontext(phba, elsiocb);
6979 if (phba->sli_rev == LPFC_SLI_REV4) {
6980 wqe = &elsiocb->wqe;
6981 /* ox-id of the frame */
6982 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
6983 rdp_context->ox_id);
6984 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6985 rdp_context->rx_id);
6986 } else {
6987 icmd = &elsiocb->iocb;
6988 icmd->ulpContext = rdp_context->rx_id;
6989 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
6990 }
6991
6992 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6993 "2171 Xmit RDP response tag x%x xri x%x, "
6994 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
6995 elsiocb->iotag, ulp_context,
6996 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6997 ndlp->nlp_rpi);
6998 rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
6999 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7000 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
7001 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7002
7003 /* Update Alarm and Warning */
7004 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
7005 phba->sfp_alarm |= *flag_ptr;
7006 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
7007 phba->sfp_warning |= *flag_ptr;
7008
7009 /* For RDP payload */
7010 len = 8;
7011 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
7012 (len + pcmd), ELS_CMD_RDP);
7013
7014 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
7015 rdp_context->page_a0, rdp_context->page_a2);
7016 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
7017 phba);
7018 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
7019 (len + pcmd), &rdp_context->link_stat);
7020 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
7021 (len + pcmd), vport);
7022 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
7023 (len + pcmd), vport, ndlp);
7024 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
7025 &rdp_context->link_stat);
7026 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
7027 &rdp_context->link_stat, vport);
7028 len += lpfc_rdp_res_oed_temp_desc(phba,
7029 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7030 rdp_context->page_a2);
7031 len += lpfc_rdp_res_oed_voltage_desc(phba,
7032 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7033 rdp_context->page_a2);
7034 len += lpfc_rdp_res_oed_txbias_desc(phba,
7035 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7036 rdp_context->page_a2);
7037 len += lpfc_rdp_res_oed_txpower_desc(phba,
7038 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7039 rdp_context->page_a2);
7040 len += lpfc_rdp_res_oed_rxpower_desc(phba,
7041 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
7042 rdp_context->page_a2);
7043 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
7044 rdp_context->page_a0, vport);
7045
7046 rdp_res->length = cpu_to_be32(len - 8);
7047 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7048
7049 /* Now that we know the true size of the payload, update the BPL */
7050 bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
7051 bpl->tus.f.bdeSize = len;
7052 bpl->tus.f.bdeFlags = 0;
7053 bpl->tus.w = le32_to_cpu(bpl->tus.w);
7054
7055 phba->fc_stat.elsXmitACC++;
7056 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7057 if (!elsiocb->ndlp) {
7058 lpfc_els_free_iocb(phba, elsiocb);
7059 goto free_rdp_context;
7060 }
7061
7062 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7063 if (rc == IOCB_ERROR) {
7064 lpfc_els_free_iocb(phba, elsiocb);
7065 lpfc_nlp_put(ndlp);
7066 }
7067
7068 goto free_rdp_context;
7069
7070 error:
7071 cmdsize = 2 * sizeof(uint32_t);
7072 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
7073 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
7074 if (!elsiocb)
7075 goto free_rdp_context;
7076
7077 if (phba->sli_rev == LPFC_SLI_REV4) {
7078 wqe = &elsiocb->wqe;
7079 /* ox-id of the frame */
7080 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7081 rdp_context->ox_id);
7082 bf_set(wqe_ctxt_tag,
7083 &wqe->xmit_els_rsp.wqe_com,
7084 rdp_context->rx_id);
7085 } else {
7086 icmd = &elsiocb->iocb;
7087 icmd->ulpContext = rdp_context->rx_id;
7088 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
7089 }
7090
7091 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7092
7093 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
7094 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7095 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7096
7097 phba->fc_stat.elsXmitLSRJT++;
7098 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7099 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7100 if (!elsiocb->ndlp) {
7101 lpfc_els_free_iocb(phba, elsiocb);
7102 goto free_rdp_context;
7103 }
7104
7105 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7106 if (rc == IOCB_ERROR) {
7107 lpfc_els_free_iocb(phba, elsiocb);
7108 lpfc_nlp_put(ndlp);
7109 }
7110
7111 free_rdp_context:
7112 /* This reference put is for the original unsolicited RDP. If the
7113 * prep failed, there is no reference to remove.
7114 */
7115 lpfc_nlp_put(ndlp);
7116 kfree(rdp_context);
7117 }
7118
7119 static int
lpfc_get_rdp_info(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context)7120 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
7121 {
7122 LPFC_MBOXQ_t *mbox = NULL;
7123 int rc;
7124
7125 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7126 if (!mbox) {
7127 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
7128 "7105 failed to allocate mailbox memory");
7129 return 1;
7130 }
7131
7132 if (lpfc_sli4_dump_page_a0(phba, mbox))
7133 goto rdp_fail;
7134 mbox->vport = rdp_context->ndlp->vport;
7135 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
7136 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
7137 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7138 if (rc == MBX_NOT_FINISHED) {
7139 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
7140 return 1;
7141 }
7142
7143 return 0;
7144
7145 rdp_fail:
7146 mempool_free(mbox, phba->mbox_mem_pool);
7147 return 1;
7148 }
7149
7150 /*
7151 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
7152 * @vport: pointer to a host virtual N_Port data structure.
7153 * @cmdiocb: pointer to lpfc command iocb data structure.
7154 * @ndlp: pointer to a node-list data structure.
7155 *
7156 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
7157 * IOCB. First, the payload of the unsolicited RDP is checked.
7158 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
7159 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
7160 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
7161 * gather all data and send RDP response.
7162 *
7163 * Return code
7164 * 0 - Sent the acc response
7165 * 1 - Sent the reject response.
7166 */
7167 static int
lpfc_els_rcv_rdp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7168 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7169 struct lpfc_nodelist *ndlp)
7170 {
7171 struct lpfc_hba *phba = vport->phba;
7172 struct lpfc_dmabuf *pcmd;
7173 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
7174 struct fc_rdp_req_frame *rdp_req;
7175 struct lpfc_rdp_context *rdp_context;
7176 union lpfc_wqe128 *cmd = NULL;
7177 struct ls_rjt stat;
7178
7179 if (phba->sli_rev < LPFC_SLI_REV4 ||
7180 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7181 LPFC_SLI_INTF_IF_TYPE_2) {
7182 rjt_err = LSRJT_UNABLE_TPC;
7183 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7184 goto error;
7185 }
7186
7187 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
7188 rjt_err = LSRJT_UNABLE_TPC;
7189 rjt_expl = LSEXP_REQ_UNSUPPORTED;
7190 goto error;
7191 }
7192
7193 pcmd = cmdiocb->cmd_dmabuf;
7194 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
7195
7196 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7197 "2422 ELS RDP Request "
7198 "dec len %d tag x%x port_id %d len %d\n",
7199 be32_to_cpu(rdp_req->rdp_des_length),
7200 be32_to_cpu(rdp_req->nport_id_desc.tag),
7201 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
7202 be32_to_cpu(rdp_req->nport_id_desc.length));
7203
7204 if (sizeof(struct fc_rdp_nport_desc) !=
7205 be32_to_cpu(rdp_req->rdp_des_length))
7206 goto rjt_logerr;
7207 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
7208 goto rjt_logerr;
7209 if (RDP_NPORT_ID_SIZE !=
7210 be32_to_cpu(rdp_req->nport_id_desc.length))
7211 goto rjt_logerr;
7212 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
7213 if (!rdp_context) {
7214 rjt_err = LSRJT_UNABLE_TPC;
7215 goto error;
7216 }
7217
7218 cmd = &cmdiocb->wqe;
7219 rdp_context->ndlp = lpfc_nlp_get(ndlp);
7220 if (!rdp_context->ndlp) {
7221 kfree(rdp_context);
7222 rjt_err = LSRJT_UNABLE_TPC;
7223 goto error;
7224 }
7225 rdp_context->ox_id = bf_get(wqe_rcvoxid,
7226 &cmd->xmit_els_rsp.wqe_com);
7227 rdp_context->rx_id = bf_get(wqe_ctxt_tag,
7228 &cmd->xmit_els_rsp.wqe_com);
7229 rdp_context->cmpl = lpfc_els_rdp_cmpl;
7230 if (lpfc_get_rdp_info(phba, rdp_context)) {
7231 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
7232 "2423 Unable to send mailbox");
7233 kfree(rdp_context);
7234 rjt_err = LSRJT_UNABLE_TPC;
7235 lpfc_nlp_put(ndlp);
7236 goto error;
7237 }
7238
7239 return 0;
7240
7241 rjt_logerr:
7242 rjt_err = LSRJT_LOGICAL_ERR;
7243
7244 error:
7245 memset(&stat, 0, sizeof(stat));
7246 stat.un.b.lsRjtRsnCode = rjt_err;
7247 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
7248 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7249 return 1;
7250 }
7251
7252
7253 static void
lpfc_els_lcb_rsp(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7254 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7255 {
7256 MAILBOX_t *mb;
7257 IOCB_t *icmd;
7258 union lpfc_wqe128 *wqe;
7259 uint8_t *pcmd;
7260 struct lpfc_iocbq *elsiocb;
7261 struct lpfc_nodelist *ndlp;
7262 struct ls_rjt *stat;
7263 union lpfc_sli4_cfg_shdr *shdr;
7264 struct lpfc_lcb_context *lcb_context;
7265 struct fc_lcb_res_frame *lcb_res;
7266 uint32_t cmdsize, shdr_status, shdr_add_status;
7267 int rc;
7268
7269 mb = &pmb->u.mb;
7270 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
7271 ndlp = lcb_context->ndlp;
7272 pmb->ctx_ndlp = NULL;
7273 pmb->ctx_buf = NULL;
7274
7275 shdr = (union lpfc_sli4_cfg_shdr *)
7276 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
7277 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7278 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7279
7280 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
7281 "0194 SET_BEACON_CONFIG mailbox "
7282 "completed with status x%x add_status x%x,"
7283 " mbx status x%x\n",
7284 shdr_status, shdr_add_status, mb->mbxStatus);
7285
7286 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
7287 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
7288 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
7289 mempool_free(pmb, phba->mbox_mem_pool);
7290 goto error;
7291 }
7292
7293 mempool_free(pmb, phba->mbox_mem_pool);
7294 cmdsize = sizeof(struct fc_lcb_res_frame);
7295 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7296 lpfc_max_els_tries, ndlp,
7297 ndlp->nlp_DID, ELS_CMD_ACC);
7298
7299 /* Decrement the ndlp reference count from previous mbox command */
7300 lpfc_nlp_put(ndlp);
7301
7302 if (!elsiocb)
7303 goto free_lcb_context;
7304
7305 lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
7306
7307 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
7308
7309 if (phba->sli_rev == LPFC_SLI_REV4) {
7310 wqe = &elsiocb->wqe;
7311 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7312 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7313 lcb_context->ox_id);
7314 } else {
7315 icmd = &elsiocb->iocb;
7316 icmd->ulpContext = lcb_context->rx_id;
7317 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7318 }
7319
7320 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7321 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
7322 lcb_res->lcb_sub_command = lcb_context->sub_command;
7323 lcb_res->lcb_type = lcb_context->type;
7324 lcb_res->capability = lcb_context->capability;
7325 lcb_res->lcb_frequency = lcb_context->frequency;
7326 lcb_res->lcb_duration = lcb_context->duration;
7327 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7328 phba->fc_stat.elsXmitACC++;
7329
7330 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7331 if (!elsiocb->ndlp) {
7332 lpfc_els_free_iocb(phba, elsiocb);
7333 goto out;
7334 }
7335
7336 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7337 if (rc == IOCB_ERROR) {
7338 lpfc_els_free_iocb(phba, elsiocb);
7339 lpfc_nlp_put(ndlp);
7340 }
7341 out:
7342 kfree(lcb_context);
7343 return;
7344
7345 error:
7346 cmdsize = sizeof(struct fc_lcb_res_frame);
7347 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7348 lpfc_max_els_tries, ndlp,
7349 ndlp->nlp_DID, ELS_CMD_LS_RJT);
7350 lpfc_nlp_put(ndlp);
7351 if (!elsiocb)
7352 goto free_lcb_context;
7353
7354 if (phba->sli_rev == LPFC_SLI_REV4) {
7355 wqe = &elsiocb->wqe;
7356 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
7357 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7358 lcb_context->ox_id);
7359 } else {
7360 icmd = &elsiocb->iocb;
7361 icmd->ulpContext = lcb_context->rx_id;
7362 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
7363 }
7364
7365 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
7366
7367 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
7368 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
7369 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7370
7371 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
7372 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
7373
7374 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
7375 phba->fc_stat.elsXmitLSRJT++;
7376 elsiocb->ndlp = lpfc_nlp_get(ndlp);
7377 if (!elsiocb->ndlp) {
7378 lpfc_els_free_iocb(phba, elsiocb);
7379 goto free_lcb_context;
7380 }
7381
7382 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7383 if (rc == IOCB_ERROR) {
7384 lpfc_els_free_iocb(phba, elsiocb);
7385 lpfc_nlp_put(ndlp);
7386 }
7387 free_lcb_context:
7388 kfree(lcb_context);
7389 }
7390
7391 static int
lpfc_sli4_set_beacon(struct lpfc_vport * vport,struct lpfc_lcb_context * lcb_context,uint32_t beacon_state)7392 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
7393 struct lpfc_lcb_context *lcb_context,
7394 uint32_t beacon_state)
7395 {
7396 struct lpfc_hba *phba = vport->phba;
7397 union lpfc_sli4_cfg_shdr *cfg_shdr;
7398 LPFC_MBOXQ_t *mbox = NULL;
7399 uint32_t len;
7400 int rc;
7401
7402 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7403 if (!mbox)
7404 return 1;
7405
7406 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
7407 len = sizeof(struct lpfc_mbx_set_beacon_config) -
7408 sizeof(struct lpfc_sli4_cfg_mhdr);
7409 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7410 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
7411 LPFC_SLI4_MBX_EMBED);
7412 mbox->ctx_ndlp = (void *)lcb_context;
7413 mbox->vport = phba->pport;
7414 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
7415 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
7416 phba->sli4_hba.physical_port);
7417 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
7418 beacon_state);
7419 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
7420
7421 /*
7422 * Check bv1s bit before issuing the mailbox
7423 * if bv1s == 1, LCB V1 supported
7424 * else, LCB V0 supported
7425 */
7426
7427 if (phba->sli4_hba.pc_sli4_params.bv1s) {
7428 /* COMMON_SET_BEACON_CONFIG_V1 */
7429 cfg_shdr->request.word9 = BEACON_VERSION_V1;
7430 lcb_context->capability |= LCB_CAPABILITY_DURATION;
7431 bf_set(lpfc_mbx_set_beacon_port_type,
7432 &mbox->u.mqe.un.beacon_config, 0);
7433 bf_set(lpfc_mbx_set_beacon_duration_v1,
7434 &mbox->u.mqe.un.beacon_config,
7435 be16_to_cpu(lcb_context->duration));
7436 } else {
7437 /* COMMON_SET_BEACON_CONFIG_V0 */
7438 if (be16_to_cpu(lcb_context->duration) != 0) {
7439 mempool_free(mbox, phba->mbox_mem_pool);
7440 return 1;
7441 }
7442 cfg_shdr->request.word9 = BEACON_VERSION_V0;
7443 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
7444 bf_set(lpfc_mbx_set_beacon_state,
7445 &mbox->u.mqe.un.beacon_config, beacon_state);
7446 bf_set(lpfc_mbx_set_beacon_port_type,
7447 &mbox->u.mqe.un.beacon_config, 1);
7448 bf_set(lpfc_mbx_set_beacon_duration,
7449 &mbox->u.mqe.un.beacon_config,
7450 be16_to_cpu(lcb_context->duration));
7451 }
7452
7453 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7454 if (rc == MBX_NOT_FINISHED) {
7455 mempool_free(mbox, phba->mbox_mem_pool);
7456 return 1;
7457 }
7458
7459 return 0;
7460 }
7461
7462
7463 /**
7464 * lpfc_els_rcv_lcb - Process an unsolicited LCB
7465 * @vport: pointer to a host virtual N_Port data structure.
7466 * @cmdiocb: pointer to lpfc command iocb data structure.
7467 * @ndlp: pointer to a node-list data structure.
7468 *
7469 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
7470 * First, the payload of the unsolicited LCB is checked.
7471 * Then based on Subcommand beacon will either turn on or off.
7472 *
7473 * Return code
7474 * 0 - Sent the acc response
7475 * 1 - Sent the reject response.
7476 **/
7477 static int
lpfc_els_rcv_lcb(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7478 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7479 struct lpfc_nodelist *ndlp)
7480 {
7481 struct lpfc_hba *phba = vport->phba;
7482 struct lpfc_dmabuf *pcmd;
7483 uint8_t *lp;
7484 struct fc_lcb_request_frame *beacon;
7485 struct lpfc_lcb_context *lcb_context;
7486 u8 state, rjt_err = 0;
7487 struct ls_rjt stat;
7488
7489 pcmd = cmdiocb->cmd_dmabuf;
7490 lp = (uint8_t *)pcmd->virt;
7491 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
7492
7493 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7494 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
7495 "type x%x frequency %x duration x%x\n",
7496 lp[0], lp[1], lp[2],
7497 beacon->lcb_command,
7498 beacon->lcb_sub_command,
7499 beacon->lcb_type,
7500 beacon->lcb_frequency,
7501 be16_to_cpu(beacon->lcb_duration));
7502
7503 if (beacon->lcb_sub_command != LPFC_LCB_ON &&
7504 beacon->lcb_sub_command != LPFC_LCB_OFF) {
7505 rjt_err = LSRJT_CMD_UNSUPPORTED;
7506 goto rjt;
7507 }
7508
7509 if (phba->sli_rev < LPFC_SLI_REV4 ||
7510 phba->hba_flag & HBA_FCOE_MODE ||
7511 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7512 LPFC_SLI_INTF_IF_TYPE_2)) {
7513 rjt_err = LSRJT_CMD_UNSUPPORTED;
7514 goto rjt;
7515 }
7516
7517 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
7518 if (!lcb_context) {
7519 rjt_err = LSRJT_UNABLE_TPC;
7520 goto rjt;
7521 }
7522
7523 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
7524 lcb_context->sub_command = beacon->lcb_sub_command;
7525 lcb_context->capability = 0;
7526 lcb_context->type = beacon->lcb_type;
7527 lcb_context->frequency = beacon->lcb_frequency;
7528 lcb_context->duration = beacon->lcb_duration;
7529 lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
7530 lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
7531 lcb_context->ndlp = lpfc_nlp_get(ndlp);
7532 if (!lcb_context->ndlp) {
7533 rjt_err = LSRJT_UNABLE_TPC;
7534 goto rjt_free;
7535 }
7536
7537 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
7538 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
7539 "0193 failed to send mail box");
7540 lpfc_nlp_put(ndlp);
7541 rjt_err = LSRJT_UNABLE_TPC;
7542 goto rjt_free;
7543 }
7544 return 0;
7545
7546 rjt_free:
7547 kfree(lcb_context);
7548 rjt:
7549 memset(&stat, 0, sizeof(stat));
7550 stat.un.b.lsRjtRsnCode = rjt_err;
7551 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7552 return 1;
7553 }
7554
7555
7556 /**
7557 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
7558 * @vport: pointer to a host virtual N_Port data structure.
7559 *
7560 * This routine cleans up any Registration State Change Notification
7561 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
7562 * @vport together with the host_lock is used to prevent multiple thread
7563 * trying to access the RSCN array on a same @vport at the same time.
7564 **/
7565 void
lpfc_els_flush_rscn(struct lpfc_vport * vport)7566 lpfc_els_flush_rscn(struct lpfc_vport *vport)
7567 {
7568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7569 struct lpfc_hba *phba = vport->phba;
7570 int i;
7571
7572 spin_lock_irq(shost->host_lock);
7573 if (vport->fc_rscn_flush) {
7574 /* Another thread is walking fc_rscn_id_list on this vport */
7575 spin_unlock_irq(shost->host_lock);
7576 return;
7577 }
7578 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
7579 vport->fc_rscn_flush = 1;
7580 spin_unlock_irq(shost->host_lock);
7581
7582 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7583 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
7584 vport->fc_rscn_id_list[i] = NULL;
7585 }
7586 spin_lock_irq(shost->host_lock);
7587 vport->fc_rscn_id_cnt = 0;
7588 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
7589 spin_unlock_irq(shost->host_lock);
7590 lpfc_can_disctmo(vport);
7591 /* Indicate we are done walking this fc_rscn_id_list */
7592 vport->fc_rscn_flush = 0;
7593 }
7594
7595 /**
7596 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
7597 * @vport: pointer to a host virtual N_Port data structure.
7598 * @did: remote destination port identifier.
7599 *
7600 * This routine checks whether there is any pending Registration State
7601 * Configuration Notification (RSCN) to a @did on @vport.
7602 *
7603 * Return code
7604 * None zero - The @did matched with a pending rscn
7605 * 0 - not able to match @did with a pending rscn
7606 **/
7607 int
lpfc_rscn_payload_check(struct lpfc_vport * vport,uint32_t did)7608 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
7609 {
7610 D_ID ns_did;
7611 D_ID rscn_did;
7612 uint32_t *lp;
7613 uint32_t payload_len, i;
7614 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7615
7616 ns_did.un.word = did;
7617
7618 /* Never match fabric nodes for RSCNs */
7619 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7620 return 0;
7621
7622 /* If we are doing a FULL RSCN rediscovery, match everything */
7623 if (vport->fc_flag & FC_RSCN_DISCOVERY)
7624 return did;
7625
7626 spin_lock_irq(shost->host_lock);
7627 if (vport->fc_rscn_flush) {
7628 /* Another thread is walking fc_rscn_id_list on this vport */
7629 spin_unlock_irq(shost->host_lock);
7630 return 0;
7631 }
7632 /* Indicate we are walking fc_rscn_id_list on this vport */
7633 vport->fc_rscn_flush = 1;
7634 spin_unlock_irq(shost->host_lock);
7635 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
7636 lp = vport->fc_rscn_id_list[i]->virt;
7637 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
7638 payload_len -= sizeof(uint32_t); /* take off word 0 */
7639 while (payload_len) {
7640 rscn_did.un.word = be32_to_cpu(*lp++);
7641 payload_len -= sizeof(uint32_t);
7642 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
7643 case RSCN_ADDRESS_FORMAT_PORT:
7644 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7645 && (ns_did.un.b.area == rscn_did.un.b.area)
7646 && (ns_did.un.b.id == rscn_did.un.b.id))
7647 goto return_did_out;
7648 break;
7649 case RSCN_ADDRESS_FORMAT_AREA:
7650 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
7651 && (ns_did.un.b.area == rscn_did.un.b.area))
7652 goto return_did_out;
7653 break;
7654 case RSCN_ADDRESS_FORMAT_DOMAIN:
7655 if (ns_did.un.b.domain == rscn_did.un.b.domain)
7656 goto return_did_out;
7657 break;
7658 case RSCN_ADDRESS_FORMAT_FABRIC:
7659 goto return_did_out;
7660 }
7661 }
7662 }
7663 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7664 vport->fc_rscn_flush = 0;
7665 return 0;
7666 return_did_out:
7667 /* Indicate we are done with walking fc_rscn_id_list on this vport */
7668 vport->fc_rscn_flush = 0;
7669 return did;
7670 }
7671
7672 /**
7673 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
7674 * @vport: pointer to a host virtual N_Port data structure.
7675 *
7676 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
7677 * state machine for a @vport's nodes that are with pending RSCN (Registration
7678 * State Change Notification).
7679 *
7680 * Return code
7681 * 0 - Successful (currently alway return 0)
7682 **/
7683 static int
lpfc_rscn_recovery_check(struct lpfc_vport * vport)7684 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
7685 {
7686 struct lpfc_nodelist *ndlp = NULL, *n;
7687
7688 /* Move all affected nodes by pending RSCNs to NPR state. */
7689 list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
7690 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
7691 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
7692 continue;
7693
7694 /* NVME Target mode does not do RSCN Recovery. */
7695 if (vport->phba->nvmet_support)
7696 continue;
7697
7698 /* If we are in the process of doing discovery on this
7699 * NPort, let it continue on its own.
7700 */
7701 switch (ndlp->nlp_state) {
7702 case NLP_STE_PLOGI_ISSUE:
7703 case NLP_STE_ADISC_ISSUE:
7704 case NLP_STE_REG_LOGIN_ISSUE:
7705 case NLP_STE_PRLI_ISSUE:
7706 case NLP_STE_LOGO_ISSUE:
7707 continue;
7708 }
7709
7710 lpfc_disc_state_machine(vport, ndlp, NULL,
7711 NLP_EVT_DEVICE_RECOVERY);
7712 lpfc_cancel_retry_delay_tmo(vport, ndlp);
7713 }
7714 return 0;
7715 }
7716
7717 /**
7718 * lpfc_send_rscn_event - Send an RSCN event to management application
7719 * @vport: pointer to a host virtual N_Port data structure.
7720 * @cmdiocb: pointer to lpfc command iocb data structure.
7721 *
7722 * lpfc_send_rscn_event sends an RSCN netlink event to management
7723 * applications.
7724 */
7725 static void
lpfc_send_rscn_event(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb)7726 lpfc_send_rscn_event(struct lpfc_vport *vport,
7727 struct lpfc_iocbq *cmdiocb)
7728 {
7729 struct lpfc_dmabuf *pcmd;
7730 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7731 uint32_t *payload_ptr;
7732 uint32_t payload_len;
7733 struct lpfc_rscn_event_header *rscn_event_data;
7734
7735 pcmd = cmdiocb->cmd_dmabuf;
7736 payload_ptr = (uint32_t *) pcmd->virt;
7737 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
7738
7739 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
7740 payload_len, GFP_KERNEL);
7741 if (!rscn_event_data) {
7742 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
7743 "0147 Failed to allocate memory for RSCN event\n");
7744 return;
7745 }
7746 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
7747 rscn_event_data->payload_length = payload_len;
7748 memcpy(rscn_event_data->rscn_payload, payload_ptr,
7749 payload_len);
7750
7751 fc_host_post_vendor_event(shost,
7752 fc_get_event_number(),
7753 sizeof(struct lpfc_rscn_event_header) + payload_len,
7754 (char *)rscn_event_data,
7755 LPFC_NL_VENDOR_ID);
7756
7757 kfree(rscn_event_data);
7758 }
7759
7760 /**
7761 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
7762 * @vport: pointer to a host virtual N_Port data structure.
7763 * @cmdiocb: pointer to lpfc command iocb data structure.
7764 * @ndlp: pointer to a node-list data structure.
7765 *
7766 * This routine processes an unsolicited RSCN (Registration State Change
7767 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
7768 * to invoke fc_host_post_event() routine to the FC transport layer. If the
7769 * discover state machine is about to begin discovery, it just accepts the
7770 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
7771 * contains N_Port IDs for other vports on this HBA, it just accepts the
7772 * RSCN and ignore processing it. If the state machine is in the recovery
7773 * state, the fc_rscn_id_list of this @vport is walked and the
7774 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
7775 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
7776 * routine is invoked to handle the RSCN event.
7777 *
7778 * Return code
7779 * 0 - Just sent the acc response
7780 * 1 - Sent the acc response and waited for name server completion
7781 **/
7782 static int
lpfc_els_rcv_rscn(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7783 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7784 struct lpfc_nodelist *ndlp)
7785 {
7786 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7787 struct lpfc_hba *phba = vport->phba;
7788 struct lpfc_dmabuf *pcmd;
7789 uint32_t *lp, *datap;
7790 uint32_t payload_len, length, nportid, *cmd;
7791 int rscn_cnt;
7792 int rscn_id = 0, hba_id = 0;
7793 int i, tmo;
7794
7795 pcmd = cmdiocb->cmd_dmabuf;
7796 lp = (uint32_t *) pcmd->virt;
7797
7798 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
7799 payload_len -= sizeof(uint32_t); /* take off word 0 */
7800 /* RSCN received */
7801 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7802 "0214 RSCN received Data: x%x x%x x%x x%x\n",
7803 vport->fc_flag, payload_len, *lp,
7804 vport->fc_rscn_id_cnt);
7805
7806 /* Send an RSCN event to the management application */
7807 lpfc_send_rscn_event(vport, cmdiocb);
7808
7809 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
7810 fc_host_post_event(shost, fc_get_event_number(),
7811 FCH_EVT_RSCN, lp[i]);
7812
7813 /* Check if RSCN is coming from a direct-connected remote NPort */
7814 if (vport->fc_flag & FC_PT2PT) {
7815 /* If so, just ACC it, no other action needed for now */
7816 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7817 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
7818 *lp, vport->fc_flag, payload_len);
7819 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7820
7821 /* Check to see if we need to NVME rescan this target
7822 * remoteport.
7823 */
7824 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
7825 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
7826 lpfc_nvme_rescan_port(vport, ndlp);
7827 return 0;
7828 }
7829
7830 /* If we are about to begin discovery, just ACC the RSCN.
7831 * Discovery processing will satisfy it.
7832 */
7833 if (vport->port_state <= LPFC_NS_QRY) {
7834 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7835 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
7836 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
7837
7838 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7839 return 0;
7840 }
7841
7842 /* If this RSCN just contains NPortIDs for other vports on this HBA,
7843 * just ACC and ignore it.
7844 */
7845 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
7846 !(vport->cfg_peer_port_login)) {
7847 i = payload_len;
7848 datap = lp;
7849 while (i > 0) {
7850 nportid = *datap++;
7851 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
7852 i -= sizeof(uint32_t);
7853 rscn_id++;
7854 if (lpfc_find_vport_by_did(phba, nportid))
7855 hba_id++;
7856 }
7857 if (rscn_id == hba_id) {
7858 /* ALL NPortIDs in RSCN are on HBA */
7859 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7860 "0219 Ignore RSCN "
7861 "Data: x%x x%x x%x x%x\n",
7862 vport->fc_flag, payload_len,
7863 *lp, vport->fc_rscn_id_cnt);
7864 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7865 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
7866 ndlp->nlp_DID, vport->port_state,
7867 ndlp->nlp_flag);
7868
7869 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
7870 ndlp, NULL);
7871 /* Restart disctmo if its already running */
7872 if (vport->fc_flag & FC_DISC_TMO) {
7873 tmo = ((phba->fc_ratov * 3) + 3);
7874 mod_timer(&vport->fc_disctmo,
7875 jiffies +
7876 msecs_to_jiffies(1000 * tmo));
7877 }
7878 return 0;
7879 }
7880 }
7881
7882 spin_lock_irq(shost->host_lock);
7883 if (vport->fc_rscn_flush) {
7884 /* Another thread is walking fc_rscn_id_list on this vport */
7885 vport->fc_flag |= FC_RSCN_DISCOVERY;
7886 spin_unlock_irq(shost->host_lock);
7887 /* Send back ACC */
7888 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7889 return 0;
7890 }
7891 /* Indicate we are walking fc_rscn_id_list on this vport */
7892 vport->fc_rscn_flush = 1;
7893 spin_unlock_irq(shost->host_lock);
7894 /* Get the array count after successfully have the token */
7895 rscn_cnt = vport->fc_rscn_id_cnt;
7896 /* If we are already processing an RSCN, save the received
7897 * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
7898 */
7899 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
7900 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7901 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
7902 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
7903
7904 spin_lock_irq(shost->host_lock);
7905 vport->fc_flag |= FC_RSCN_DEFERRED;
7906
7907 /* Restart disctmo if its already running */
7908 if (vport->fc_flag & FC_DISC_TMO) {
7909 tmo = ((phba->fc_ratov * 3) + 3);
7910 mod_timer(&vport->fc_disctmo,
7911 jiffies + msecs_to_jiffies(1000 * tmo));
7912 }
7913 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
7914 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
7915 vport->fc_flag |= FC_RSCN_MODE;
7916 spin_unlock_irq(shost->host_lock);
7917 if (rscn_cnt) {
7918 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
7919 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
7920 }
7921 if ((rscn_cnt) &&
7922 (payload_len + length <= LPFC_BPL_SIZE)) {
7923 *cmd &= ELS_CMD_MASK;
7924 *cmd |= cpu_to_be32(payload_len + length);
7925 memcpy(((uint8_t *)cmd) + length, lp,
7926 payload_len);
7927 } else {
7928 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
7929 vport->fc_rscn_id_cnt++;
7930 /* If we zero, cmdiocb->cmd_dmabuf, the calling
7931 * routine will not try to free it.
7932 */
7933 cmdiocb->cmd_dmabuf = NULL;
7934 }
7935 /* Deferred RSCN */
7936 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7937 "0235 Deferred RSCN "
7938 "Data: x%x x%x x%x\n",
7939 vport->fc_rscn_id_cnt, vport->fc_flag,
7940 vport->port_state);
7941 } else {
7942 vport->fc_flag |= FC_RSCN_DISCOVERY;
7943 spin_unlock_irq(shost->host_lock);
7944 /* ReDiscovery RSCN */
7945 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7946 "0234 ReDiscovery RSCN "
7947 "Data: x%x x%x x%x\n",
7948 vport->fc_rscn_id_cnt, vport->fc_flag,
7949 vport->port_state);
7950 }
7951 /* Indicate we are done walking fc_rscn_id_list on this vport */
7952 vport->fc_rscn_flush = 0;
7953 /* Send back ACC */
7954 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7955 /* send RECOVERY event for ALL nodes that match RSCN payload */
7956 lpfc_rscn_recovery_check(vport);
7957 return 0;
7958 }
7959 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7960 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
7961 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
7962
7963 spin_lock_irq(shost->host_lock);
7964 vport->fc_flag |= FC_RSCN_MODE;
7965 spin_unlock_irq(shost->host_lock);
7966 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7967 /* Indicate we are done walking fc_rscn_id_list on this vport */
7968 vport->fc_rscn_flush = 0;
7969 /*
7970 * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
7971 * not try to free it.
7972 */
7973 cmdiocb->cmd_dmabuf = NULL;
7974 lpfc_set_disctmo(vport);
7975 /* Send back ACC */
7976 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7977 /* send RECOVERY event for ALL nodes that match RSCN payload */
7978 lpfc_rscn_recovery_check(vport);
7979 return lpfc_els_handle_rscn(vport);
7980 }
7981
7982 /**
7983 * lpfc_els_handle_rscn - Handle rscn for a vport
7984 * @vport: pointer to a host virtual N_Port data structure.
7985 *
7986 * This routine handles the Registration State Configuration Notification
7987 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
7988 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
7989 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
7990 * NameServer shall be issued. If CT command to the NameServer fails to be
7991 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
7992 * RSCN activities with the @vport.
7993 *
7994 * Return code
7995 * 0 - Cleaned up rscn on the @vport
7996 * 1 - Wait for plogi to name server before proceed
7997 **/
7998 int
lpfc_els_handle_rscn(struct lpfc_vport * vport)7999 lpfc_els_handle_rscn(struct lpfc_vport *vport)
8000 {
8001 struct lpfc_nodelist *ndlp;
8002 struct lpfc_hba *phba = vport->phba;
8003
8004 /* Ignore RSCN if the port is being torn down. */
8005 if (vport->load_flag & FC_UNLOADING) {
8006 lpfc_els_flush_rscn(vport);
8007 return 0;
8008 }
8009
8010 /* Start timer for RSCN processing */
8011 lpfc_set_disctmo(vport);
8012
8013 /* RSCN processed */
8014 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
8015 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
8016 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
8017 vport->port_state, vport->num_disc_nodes,
8018 vport->gidft_inp);
8019
8020 /* To process RSCN, first compare RSCN data with NameServer */
8021 vport->fc_ns_retry = 0;
8022 vport->num_disc_nodes = 0;
8023
8024 ndlp = lpfc_findnode_did(vport, NameServer_DID);
8025 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
8026 /* Good ndlp, issue CT Request to NameServer. Need to
8027 * know how many gidfts were issued. If none, then just
8028 * flush the RSCN. Otherwise, the outstanding requests
8029 * need to complete.
8030 */
8031 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
8032 if (lpfc_issue_gidft(vport) > 0)
8033 return 1;
8034 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
8035 if (lpfc_issue_gidpt(vport) > 0)
8036 return 1;
8037 } else {
8038 return 1;
8039 }
8040 } else {
8041 /* Nameserver login in question. Revalidate. */
8042 if (ndlp) {
8043 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
8044 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8045 } else {
8046 ndlp = lpfc_nlp_init(vport, NameServer_DID);
8047 if (!ndlp) {
8048 lpfc_els_flush_rscn(vport);
8049 return 0;
8050 }
8051 ndlp->nlp_prev_state = ndlp->nlp_state;
8052 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8053 }
8054 ndlp->nlp_type |= NLP_FABRIC;
8055 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
8056 /* Wait for NameServer login cmpl before we can
8057 * continue
8058 */
8059 return 1;
8060 }
8061
8062 lpfc_els_flush_rscn(vport);
8063 return 0;
8064 }
8065
8066 /**
8067 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
8068 * @vport: pointer to a host virtual N_Port data structure.
8069 * @cmdiocb: pointer to lpfc command iocb data structure.
8070 * @ndlp: pointer to a node-list data structure.
8071 *
8072 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
8073 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
8074 * point topology. As an unsolicited FLOGI should not be received in a loop
8075 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
8076 * lpfc_check_sparm() routine is invoked to check the parameters in the
8077 * unsolicited FLOGI. If parameters validation failed, the routine
8078 * lpfc_els_rsp_reject() shall be called with reject reason code set to
8079 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
8080 * FLOGI shall be compared with the Port WWN of the @vport to determine who
8081 * will initiate PLOGI. The higher lexicographical value party shall has
8082 * higher priority (as the winning port) and will initiate PLOGI and
8083 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
8084 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
8085 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
8086 *
8087 * Return code
8088 * 0 - Successfully processed the unsolicited flogi
8089 * 1 - Failed to process the unsolicited flogi
8090 **/
8091 static int
lpfc_els_rcv_flogi(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8092 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8093 struct lpfc_nodelist *ndlp)
8094 {
8095 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8096 struct lpfc_hba *phba = vport->phba;
8097 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
8098 uint32_t *lp = (uint32_t *) pcmd->virt;
8099 union lpfc_wqe128 *wqe = &cmdiocb->wqe;
8100 struct serv_parm *sp;
8101 LPFC_MBOXQ_t *mbox;
8102 uint32_t cmd, did;
8103 int rc;
8104 uint32_t fc_flag = 0;
8105 uint32_t port_state = 0;
8106
8107 /* Clear external loopback plug detected flag */
8108 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
8109
8110 cmd = *lp++;
8111 sp = (struct serv_parm *) lp;
8112
8113 /* FLOGI received */
8114
8115 lpfc_set_disctmo(vport);
8116
8117 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8118 /* We should never receive a FLOGI in loop mode, ignore it */
8119 did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
8120
8121 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
8122 Loop Mode */
8123 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8124 "0113 An FLOGI ELS command x%x was "
8125 "received from DID x%x in Loop Mode\n",
8126 cmd, did);
8127 return 1;
8128 }
8129
8130 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
8131
8132 /*
8133 * If our portname is greater than the remote portname,
8134 * then we initiate Nport login.
8135 */
8136
8137 rc = memcmp(&vport->fc_portname, &sp->portName,
8138 sizeof(struct lpfc_name));
8139
8140 if (!rc) {
8141 if (phba->sli_rev < LPFC_SLI_REV4) {
8142 mbox = mempool_alloc(phba->mbox_mem_pool,
8143 GFP_KERNEL);
8144 if (!mbox)
8145 return 1;
8146 lpfc_linkdown(phba);
8147 lpfc_init_link(phba, mbox,
8148 phba->cfg_topology,
8149 phba->cfg_link_speed);
8150 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8151 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8152 mbox->vport = vport;
8153 rc = lpfc_sli_issue_mbox(phba, mbox,
8154 MBX_NOWAIT);
8155 lpfc_set_loopback_flag(phba);
8156 if (rc == MBX_NOT_FINISHED)
8157 mempool_free(mbox, phba->mbox_mem_pool);
8158 return 1;
8159 }
8160
8161 /* External loopback plug insertion detected */
8162 phba->link_flag |= LS_EXTERNAL_LOOPBACK;
8163
8164 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
8165 "1119 External Loopback plug detected\n");
8166
8167 /* abort the flogi coming back to ourselves
8168 * due to external loopback on the port.
8169 */
8170 lpfc_els_abort_flogi(phba);
8171 return 0;
8172
8173 } else if (rc > 0) { /* greater than */
8174 spin_lock_irq(shost->host_lock);
8175 vport->fc_flag |= FC_PT2PT_PLOGI;
8176 spin_unlock_irq(shost->host_lock);
8177
8178 /* If we have the high WWPN we can assign our own
8179 * myDID; otherwise, we have to WAIT for a PLOGI
8180 * from the remote NPort to find out what it
8181 * will be.
8182 */
8183 vport->fc_myDID = PT2PT_LocalID;
8184 } else {
8185 vport->fc_myDID = PT2PT_RemoteID;
8186 }
8187
8188 /*
8189 * The vport state should go to LPFC_FLOGI only
8190 * AFTER we issue a FLOGI, not receive one.
8191 */
8192 spin_lock_irq(shost->host_lock);
8193 fc_flag = vport->fc_flag;
8194 port_state = vport->port_state;
8195 vport->fc_flag |= FC_PT2PT;
8196 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8197
8198 /* Acking an unsol FLOGI. Count 1 for link bounce
8199 * work-around.
8200 */
8201 vport->rcv_flogi_cnt++;
8202 spin_unlock_irq(shost->host_lock);
8203 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8204 "3311 Rcv Flogi PS x%x new PS x%x "
8205 "fc_flag x%x new fc_flag x%x\n",
8206 port_state, vport->port_state,
8207 fc_flag, vport->fc_flag);
8208
8209 /*
8210 * We temporarily set fc_myDID to make it look like we are
8211 * a Fabric. This is done just so we end up with the right
8212 * did / sid on the FLOGI ACC rsp.
8213 */
8214 did = vport->fc_myDID;
8215 vport->fc_myDID = Fabric_DID;
8216
8217 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
8218
8219 /* Defer ACC response until AFTER we issue a FLOGI */
8220 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
8221 phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
8222 &wqe->xmit_els_rsp.wqe_com);
8223 phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
8224 &wqe->xmit_els_rsp.wqe_com);
8225
8226 vport->fc_myDID = did;
8227
8228 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8229 "3344 Deferring FLOGI ACC: rx_id: x%x,"
8230 " ox_id: x%x, hba_flag x%x\n",
8231 phba->defer_flogi_acc_rx_id,
8232 phba->defer_flogi_acc_ox_id, phba->hba_flag);
8233
8234 phba->defer_flogi_acc_flag = true;
8235
8236 return 0;
8237 }
8238
8239 /* Send back ACC */
8240 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
8241
8242 /* Now lets put fc_myDID back to what its supposed to be */
8243 vport->fc_myDID = did;
8244
8245 return 0;
8246 }
8247
8248 /**
8249 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
8250 * @vport: pointer to a host virtual N_Port data structure.
8251 * @cmdiocb: pointer to lpfc command iocb data structure.
8252 * @ndlp: pointer to a node-list data structure.
8253 *
8254 * This routine processes Request Node Identification Data (RNID) IOCB
8255 * received as an ELS unsolicited event. Only when the RNID specified format
8256 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
8257 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
8258 * Accept (ACC) the RNID ELS command. All the other RNID formats are
8259 * rejected by invoking the lpfc_els_rsp_reject() routine.
8260 *
8261 * Return code
8262 * 0 - Successfully processed rnid iocb (currently always return 0)
8263 **/
8264 static int
lpfc_els_rcv_rnid(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8265 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8266 struct lpfc_nodelist *ndlp)
8267 {
8268 struct lpfc_dmabuf *pcmd;
8269 uint32_t *lp;
8270 RNID *rn;
8271 struct ls_rjt stat;
8272
8273 pcmd = cmdiocb->cmd_dmabuf;
8274 lp = (uint32_t *) pcmd->virt;
8275
8276 lp++;
8277 rn = (RNID *) lp;
8278
8279 /* RNID received */
8280
8281 switch (rn->Format) {
8282 case 0:
8283 case RNID_TOPOLOGY_DISC:
8284 /* Send back ACC */
8285 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
8286 break;
8287 default:
8288 /* Reject this request because format not supported */
8289 stat.un.b.lsRjtRsvd0 = 0;
8290 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8291 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8292 stat.un.b.vendorUnique = 0;
8293 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
8294 NULL);
8295 }
8296 return 0;
8297 }
8298
8299 /**
8300 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
8301 * @vport: pointer to a host virtual N_Port data structure.
8302 * @cmdiocb: pointer to lpfc command iocb data structure.
8303 * @ndlp: pointer to a node-list data structure.
8304 *
8305 * Return code
8306 * 0 - Successfully processed echo iocb (currently always return 0)
8307 **/
8308 static int
lpfc_els_rcv_echo(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8309 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8310 struct lpfc_nodelist *ndlp)
8311 {
8312 uint8_t *pcmd;
8313
8314 pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
8315
8316 /* skip over first word of echo command to find echo data */
8317 pcmd += sizeof(uint32_t);
8318
8319 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
8320 return 0;
8321 }
8322
8323 /**
8324 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
8325 * @vport: pointer to a host virtual N_Port data structure.
8326 * @cmdiocb: pointer to lpfc command iocb data structure.
8327 * @ndlp: pointer to a node-list data structure.
8328 *
8329 * This routine processes a Link Incident Report Registration(LIRR) IOCB
8330 * received as an ELS unsolicited event. Currently, this function just invokes
8331 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
8332 *
8333 * Return code
8334 * 0 - Successfully processed lirr iocb (currently always return 0)
8335 **/
8336 static int
lpfc_els_rcv_lirr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8337 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8338 struct lpfc_nodelist *ndlp)
8339 {
8340 struct ls_rjt stat;
8341
8342 /* For now, unconditionally reject this command */
8343 stat.un.b.lsRjtRsvd0 = 0;
8344 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8345 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8346 stat.un.b.vendorUnique = 0;
8347 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8348 return 0;
8349 }
8350
8351 /**
8352 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
8353 * @vport: pointer to a host virtual N_Port data structure.
8354 * @cmdiocb: pointer to lpfc command iocb data structure.
8355 * @ndlp: pointer to a node-list data structure.
8356 *
8357 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
8358 * received as an ELS unsolicited event. A request to RRQ shall only
8359 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
8360 * Nx_Port N_Port_ID of the target Exchange is the same as the
8361 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
8362 * not accepted, an LS_RJT with reason code "Unable to perform
8363 * command request" and reason code explanation "Invalid Originator
8364 * S_ID" shall be returned. For now, we just unconditionally accept
8365 * RRQ from the target.
8366 **/
8367 static void
lpfc_els_rcv_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8368 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8369 struct lpfc_nodelist *ndlp)
8370 {
8371 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8372 if (vport->phba->sli_rev == LPFC_SLI_REV4)
8373 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
8374 }
8375
8376 /**
8377 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
8378 * @phba: pointer to lpfc hba data structure.
8379 * @pmb: pointer to the driver internal queue element for mailbox command.
8380 *
8381 * This routine is the completion callback function for the MBX_READ_LNK_STAT
8382 * mailbox command. This callback function is to actually send the Accept
8383 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
8384 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
8385 * mailbox command, constructs the RLS response with the link statistics
8386 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
8387 * response to the RLS.
8388 *
8389 * Note that the ndlp reference count will be incremented by 1 for holding the
8390 * ndlp and the reference to ndlp will be stored into the ndlp field of
8391 * the IOCB for the completion callback function to the RLS Accept Response
8392 * ELS IOCB command.
8393 *
8394 **/
8395 static void
lpfc_els_rsp_rls_acc(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)8396 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8397 {
8398 int rc = 0;
8399 MAILBOX_t *mb;
8400 IOCB_t *icmd;
8401 union lpfc_wqe128 *wqe;
8402 struct RLS_RSP *rls_rsp;
8403 uint8_t *pcmd;
8404 struct lpfc_iocbq *elsiocb;
8405 struct lpfc_nodelist *ndlp;
8406 uint16_t oxid;
8407 uint16_t rxid;
8408 uint32_t cmdsize;
8409 u32 ulp_context;
8410
8411 mb = &pmb->u.mb;
8412
8413 ndlp = pmb->ctx_ndlp;
8414 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
8415 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
8416 pmb->ctx_buf = NULL;
8417 pmb->ctx_ndlp = NULL;
8418
8419 if (mb->mbxStatus) {
8420 mempool_free(pmb, phba->mbox_mem_pool);
8421 return;
8422 }
8423
8424 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
8425 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
8426 lpfc_max_els_tries, ndlp,
8427 ndlp->nlp_DID, ELS_CMD_ACC);
8428
8429 /* Decrement the ndlp reference count from previous mbox command */
8430 lpfc_nlp_put(ndlp);
8431
8432 if (!elsiocb) {
8433 mempool_free(pmb, phba->mbox_mem_pool);
8434 return;
8435 }
8436
8437 ulp_context = get_job_ulpcontext(phba, elsiocb);
8438 if (phba->sli_rev == LPFC_SLI_REV4) {
8439 wqe = &elsiocb->wqe;
8440 /* Xri / rx_id */
8441 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
8442 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
8443 } else {
8444 icmd = &elsiocb->iocb;
8445 icmd->ulpContext = rxid;
8446 icmd->unsli3.rcvsli3.ox_id = oxid;
8447 }
8448
8449 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8450 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8451 pcmd += sizeof(uint32_t); /* Skip past command */
8452 rls_rsp = (struct RLS_RSP *)pcmd;
8453
8454 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
8455 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
8456 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
8457 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
8458 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
8459 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
8460 mempool_free(pmb, phba->mbox_mem_pool);
8461 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8462 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8463 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
8464 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
8465 elsiocb->iotag, ulp_context,
8466 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8467 ndlp->nlp_rpi);
8468 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8469 phba->fc_stat.elsXmitACC++;
8470 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8471 if (!elsiocb->ndlp) {
8472 lpfc_els_free_iocb(phba, elsiocb);
8473 return;
8474 }
8475
8476 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8477 if (rc == IOCB_ERROR) {
8478 lpfc_els_free_iocb(phba, elsiocb);
8479 lpfc_nlp_put(ndlp);
8480 }
8481 return;
8482 }
8483
8484 /**
8485 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
8486 * @vport: pointer to a host virtual N_Port data structure.
8487 * @cmdiocb: pointer to lpfc command iocb data structure.
8488 * @ndlp: pointer to a node-list data structure.
8489 *
8490 * This routine processes Read Link Status (RLS) IOCB received as an
8491 * ELS unsolicited event. It first checks the remote port state. If the
8492 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8493 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8494 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
8495 * for reading the HBA link statistics. It is for the callback function,
8496 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
8497 * to actually sending out RPL Accept (ACC) response.
8498 *
8499 * Return codes
8500 * 0 - Successfully processed rls iocb (currently always return 0)
8501 **/
8502 static int
lpfc_els_rcv_rls(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8503 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8504 struct lpfc_nodelist *ndlp)
8505 {
8506 struct lpfc_hba *phba = vport->phba;
8507 LPFC_MBOXQ_t *mbox;
8508 struct ls_rjt stat;
8509 u32 ctx = get_job_ulpcontext(phba, cmdiocb);
8510 u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
8511
8512 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8513 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8514 /* reject the unsolicited RLS request and done with it */
8515 goto reject_out;
8516
8517 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
8518 if (mbox) {
8519 lpfc_read_lnk_stat(phba, mbox);
8520 mbox->ctx_buf = (void *)((unsigned long)
8521 (ox_id << 16 | ctx));
8522 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
8523 if (!mbox->ctx_ndlp)
8524 goto node_err;
8525 mbox->vport = vport;
8526 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
8527 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8528 != MBX_NOT_FINISHED)
8529 /* Mbox completion will send ELS Response */
8530 return 0;
8531 /* Decrement reference count used for the failed mbox
8532 * command.
8533 */
8534 lpfc_nlp_put(ndlp);
8535 node_err:
8536 mempool_free(mbox, phba->mbox_mem_pool);
8537 }
8538 reject_out:
8539 /* issue rejection response */
8540 stat.un.b.lsRjtRsvd0 = 0;
8541 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8542 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8543 stat.un.b.vendorUnique = 0;
8544 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8545 return 0;
8546 }
8547
8548 /**
8549 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
8550 * @vport: pointer to a host virtual N_Port data structure.
8551 * @cmdiocb: pointer to lpfc command iocb data structure.
8552 * @ndlp: pointer to a node-list data structure.
8553 *
8554 * This routine processes Read Timout Value (RTV) IOCB received as an
8555 * ELS unsolicited event. It first checks the remote port state. If the
8556 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
8557 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
8558 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
8559 * Value (RTV) unsolicited IOCB event.
8560 *
8561 * Note that the ndlp reference count will be incremented by 1 for holding the
8562 * ndlp and the reference to ndlp will be stored into the ndlp field of
8563 * the IOCB for the completion callback function to the RTV Accept Response
8564 * ELS IOCB command.
8565 *
8566 * Return codes
8567 * 0 - Successfully processed rtv iocb (currently always return 0)
8568 **/
8569 static int
lpfc_els_rcv_rtv(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8570 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8571 struct lpfc_nodelist *ndlp)
8572 {
8573 int rc = 0;
8574 IOCB_t *icmd;
8575 union lpfc_wqe128 *wqe;
8576 struct lpfc_hba *phba = vport->phba;
8577 struct ls_rjt stat;
8578 struct RTV_RSP *rtv_rsp;
8579 uint8_t *pcmd;
8580 struct lpfc_iocbq *elsiocb;
8581 uint32_t cmdsize;
8582 u32 ulp_context;
8583
8584 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8585 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
8586 /* reject the unsolicited RTV request and done with it */
8587 goto reject_out;
8588
8589 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
8590 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
8591 lpfc_max_els_tries, ndlp,
8592 ndlp->nlp_DID, ELS_CMD_ACC);
8593
8594 if (!elsiocb)
8595 return 1;
8596
8597 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8598 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8599 pcmd += sizeof(uint32_t); /* Skip past command */
8600
8601 ulp_context = get_job_ulpcontext(phba, elsiocb);
8602 /* use the command's xri in the response */
8603 if (phba->sli_rev == LPFC_SLI_REV4) {
8604 wqe = &elsiocb->wqe;
8605 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
8606 get_job_ulpcontext(phba, cmdiocb));
8607 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8608 get_job_rcvoxid(phba, cmdiocb));
8609 } else {
8610 icmd = &elsiocb->iocb;
8611 icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
8612 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
8613 }
8614
8615 rtv_rsp = (struct RTV_RSP *)pcmd;
8616
8617 /* populate RTV payload */
8618 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
8619 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
8620 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
8621 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
8622 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
8623
8624 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
8625 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
8626 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
8627 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
8628 "Data: x%x x%x x%x\n",
8629 elsiocb->iotag, ulp_context,
8630 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8631 ndlp->nlp_rpi,
8632 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
8633 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8634 phba->fc_stat.elsXmitACC++;
8635 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8636 if (!elsiocb->ndlp) {
8637 lpfc_els_free_iocb(phba, elsiocb);
8638 return 0;
8639 }
8640
8641 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8642 if (rc == IOCB_ERROR) {
8643 lpfc_els_free_iocb(phba, elsiocb);
8644 lpfc_nlp_put(ndlp);
8645 }
8646 return 0;
8647
8648 reject_out:
8649 /* issue rejection response */
8650 stat.un.b.lsRjtRsvd0 = 0;
8651 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8652 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8653 stat.un.b.vendorUnique = 0;
8654 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
8655 return 0;
8656 }
8657
8658 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
8659 * @vport: pointer to a host virtual N_Port data structure.
8660 * @ndlp: pointer to a node-list data structure.
8661 * @did: DID of the target.
8662 * @rrq: Pointer to the rrq struct.
8663 *
8664 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
8665 * Successful the the completion handler will clear the RRQ.
8666 *
8667 * Return codes
8668 * 0 - Successfully sent rrq els iocb.
8669 * 1 - Failed to send rrq els iocb.
8670 **/
8671 static int
lpfc_issue_els_rrq(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t did,struct lpfc_node_rrq * rrq)8672 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8673 uint32_t did, struct lpfc_node_rrq *rrq)
8674 {
8675 struct lpfc_hba *phba = vport->phba;
8676 struct RRQ *els_rrq;
8677 struct lpfc_iocbq *elsiocb;
8678 uint8_t *pcmd;
8679 uint16_t cmdsize;
8680 int ret;
8681
8682 if (!ndlp)
8683 return 1;
8684
8685 /* If ndlp is not NULL, we will bump the reference count on it */
8686 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
8687 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
8688 ELS_CMD_RRQ);
8689 if (!elsiocb)
8690 return 1;
8691
8692 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
8693
8694 /* For RRQ request, remainder of payload is Exchange IDs */
8695 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
8696 pcmd += sizeof(uint32_t);
8697 els_rrq = (struct RRQ *) pcmd;
8698
8699 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
8700 bf_set(rrq_rxid, els_rrq, rrq->rxid);
8701 bf_set(rrq_did, els_rrq, vport->fc_myDID);
8702 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
8703 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
8704
8705
8706 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8707 "Issue RRQ: did:x%x",
8708 did, rrq->xritag, rrq->rxid);
8709 elsiocb->context_un.rrq = rrq;
8710 elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
8711
8712 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8713 if (!elsiocb->ndlp)
8714 goto io_err;
8715
8716 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8717 if (ret == IOCB_ERROR) {
8718 lpfc_nlp_put(ndlp);
8719 goto io_err;
8720 }
8721 return 0;
8722
8723 io_err:
8724 lpfc_els_free_iocb(phba, elsiocb);
8725 return 1;
8726 }
8727
8728 /**
8729 * lpfc_send_rrq - Sends ELS RRQ if needed.
8730 * @phba: pointer to lpfc hba data structure.
8731 * @rrq: pointer to the active rrq.
8732 *
8733 * This routine will call the lpfc_issue_els_rrq if the rrq is
8734 * still active for the xri. If this function returns a failure then
8735 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
8736 *
8737 * Returns 0 Success.
8738 * 1 Failure.
8739 **/
8740 int
lpfc_send_rrq(struct lpfc_hba * phba,struct lpfc_node_rrq * rrq)8741 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
8742 {
8743 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
8744 rrq->nlp_DID);
8745 if (!ndlp)
8746 return 1;
8747
8748 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
8749 return lpfc_issue_els_rrq(rrq->vport, ndlp,
8750 rrq->nlp_DID, rrq);
8751 else
8752 return 1;
8753 }
8754
8755 /**
8756 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
8757 * @vport: pointer to a host virtual N_Port data structure.
8758 * @cmdsize: size of the ELS command.
8759 * @oldiocb: pointer to the original lpfc command iocb data structure.
8760 * @ndlp: pointer to a node-list data structure.
8761 *
8762 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
8763 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
8764 *
8765 * Note that the ndlp reference count will be incremented by 1 for holding the
8766 * ndlp and the reference to ndlp will be stored into the ndlp field of
8767 * the IOCB for the completion callback function to the RPL Accept Response
8768 * ELS command.
8769 *
8770 * Return code
8771 * 0 - Successfully issued ACC RPL ELS command
8772 * 1 - Failed to issue ACC RPL ELS command
8773 **/
8774 static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport * vport,uint16_t cmdsize,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)8775 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
8776 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
8777 {
8778 int rc = 0;
8779 struct lpfc_hba *phba = vport->phba;
8780 IOCB_t *icmd;
8781 union lpfc_wqe128 *wqe;
8782 RPL_RSP rpl_rsp;
8783 struct lpfc_iocbq *elsiocb;
8784 uint8_t *pcmd;
8785 u32 ulp_context;
8786
8787 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
8788 ndlp->nlp_DID, ELS_CMD_ACC);
8789
8790 if (!elsiocb)
8791 return 1;
8792
8793 ulp_context = get_job_ulpcontext(phba, elsiocb);
8794 if (phba->sli_rev == LPFC_SLI_REV4) {
8795 wqe = &elsiocb->wqe;
8796 /* Xri / rx_id */
8797 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
8798 get_job_ulpcontext(phba, oldiocb));
8799 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8800 get_job_rcvoxid(phba, oldiocb));
8801 } else {
8802 icmd = &elsiocb->iocb;
8803 icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
8804 icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
8805 }
8806
8807 pcmd = elsiocb->cmd_dmabuf->virt;
8808 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
8809 pcmd += sizeof(uint16_t);
8810 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
8811 pcmd += sizeof(uint16_t);
8812
8813 /* Setup the RPL ACC payload */
8814 rpl_rsp.listLen = be32_to_cpu(1);
8815 rpl_rsp.index = 0;
8816 rpl_rsp.port_num_blk.portNum = 0;
8817 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
8818 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
8819 sizeof(struct lpfc_name));
8820 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
8821 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
8822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8823 "0120 Xmit ELS RPL ACC response tag x%x "
8824 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
8825 "rpi x%x\n",
8826 elsiocb->iotag, ulp_context,
8827 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
8828 ndlp->nlp_rpi);
8829 elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
8830 phba->fc_stat.elsXmitACC++;
8831 elsiocb->ndlp = lpfc_nlp_get(ndlp);
8832 if (!elsiocb->ndlp) {
8833 lpfc_els_free_iocb(phba, elsiocb);
8834 return 1;
8835 }
8836
8837 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
8838 if (rc == IOCB_ERROR) {
8839 lpfc_els_free_iocb(phba, elsiocb);
8840 lpfc_nlp_put(ndlp);
8841 return 1;
8842 }
8843
8844 return 0;
8845 }
8846
8847 /**
8848 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
8849 * @vport: pointer to a host virtual N_Port data structure.
8850 * @cmdiocb: pointer to lpfc command iocb data structure.
8851 * @ndlp: pointer to a node-list data structure.
8852 *
8853 * This routine processes Read Port List (RPL) IOCB received as an ELS
8854 * unsolicited event. It first checks the remote port state. If the remote
8855 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
8856 * invokes the lpfc_els_rsp_reject() routine to send reject response.
8857 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
8858 * to accept the RPL.
8859 *
8860 * Return code
8861 * 0 - Successfully processed rpl iocb (currently always return 0)
8862 **/
8863 static int
lpfc_els_rcv_rpl(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8864 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8865 struct lpfc_nodelist *ndlp)
8866 {
8867 struct lpfc_dmabuf *pcmd;
8868 uint32_t *lp;
8869 uint32_t maxsize;
8870 uint16_t cmdsize;
8871 RPL *rpl;
8872 struct ls_rjt stat;
8873
8874 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8875 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
8876 /* issue rejection response */
8877 stat.un.b.lsRjtRsvd0 = 0;
8878 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8879 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8880 stat.un.b.vendorUnique = 0;
8881 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
8882 NULL);
8883 /* rejected the unsolicited RPL request and done with it */
8884 return 0;
8885 }
8886
8887 pcmd = cmdiocb->cmd_dmabuf;
8888 lp = (uint32_t *) pcmd->virt;
8889 rpl = (RPL *) (lp + 1);
8890 maxsize = be32_to_cpu(rpl->maxsize);
8891
8892 /* We support only one port */
8893 if ((rpl->index == 0) &&
8894 ((maxsize == 0) ||
8895 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
8896 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
8897 } else {
8898 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
8899 }
8900 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
8901
8902 return 0;
8903 }
8904
8905 /**
8906 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
8907 * @vport: pointer to a virtual N_Port data structure.
8908 * @cmdiocb: pointer to lpfc command iocb data structure.
8909 * @ndlp: pointer to a node-list data structure.
8910 *
8911 * This routine processes Fibre Channel Address Resolution Protocol
8912 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
8913 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
8914 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
8915 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
8916 * remote PortName is compared against the FC PortName stored in the @vport
8917 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
8918 * compared against the FC NodeName stored in the @vport data structure.
8919 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
8920 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
8921 * invoked to send out FARP Response to the remote node. Before sending the
8922 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
8923 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
8924 * routine is invoked to log into the remote port first.
8925 *
8926 * Return code
8927 * 0 - Either the FARP Match Mode not supported or successfully processed
8928 **/
8929 static int
lpfc_els_rcv_farp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)8930 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8931 struct lpfc_nodelist *ndlp)
8932 {
8933 struct lpfc_dmabuf *pcmd;
8934 uint32_t *lp;
8935 FARP *fp;
8936 uint32_t cnt, did;
8937
8938 did = get_job_els_rsp64_did(vport->phba, cmdiocb);
8939 pcmd = cmdiocb->cmd_dmabuf;
8940 lp = (uint32_t *) pcmd->virt;
8941
8942 lp++;
8943 fp = (FARP *) lp;
8944 /* FARP-REQ received from DID <did> */
8945 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8946 "0601 FARP-REQ received from DID x%x\n", did);
8947 /* We will only support match on WWPN or WWNN */
8948 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
8949 return 0;
8950 }
8951
8952 cnt = 0;
8953 /* If this FARP command is searching for my portname */
8954 if (fp->Mflags & FARP_MATCH_PORT) {
8955 if (memcmp(&fp->RportName, &vport->fc_portname,
8956 sizeof(struct lpfc_name)) == 0)
8957 cnt = 1;
8958 }
8959
8960 /* If this FARP command is searching for my nodename */
8961 if (fp->Mflags & FARP_MATCH_NODE) {
8962 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
8963 sizeof(struct lpfc_name)) == 0)
8964 cnt = 1;
8965 }
8966
8967 if (cnt) {
8968 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
8969 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
8970 /* Log back into the node before sending the FARP. */
8971 if (fp->Rflags & FARP_REQUEST_PLOGI) {
8972 ndlp->nlp_prev_state = ndlp->nlp_state;
8973 lpfc_nlp_set_state(vport, ndlp,
8974 NLP_STE_PLOGI_ISSUE);
8975 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8976 }
8977
8978 /* Send a FARP response to that node */
8979 if (fp->Rflags & FARP_REQUEST_FARPR)
8980 lpfc_issue_els_farpr(vport, did, 0);
8981 }
8982 }
8983 return 0;
8984 }
8985
8986 /**
8987 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
8988 * @vport: pointer to a host virtual N_Port data structure.
8989 * @cmdiocb: pointer to lpfc command iocb data structure.
8990 * @ndlp: pointer to a node-list data structure.
8991 *
8992 * This routine processes Fibre Channel Address Resolution Protocol
8993 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
8994 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
8995 * the FARP response request.
8996 *
8997 * Return code
8998 * 0 - Successfully processed FARPR IOCB (currently always return 0)
8999 **/
9000 static int
lpfc_els_rcv_farpr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9001 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9002 struct lpfc_nodelist *ndlp)
9003 {
9004 struct lpfc_dmabuf *pcmd;
9005 uint32_t *lp;
9006 uint32_t did;
9007
9008 did = get_job_els_rsp64_did(vport->phba, cmdiocb);
9009 pcmd = cmdiocb->cmd_dmabuf;
9010 lp = (uint32_t *)pcmd->virt;
9011
9012 lp++;
9013 /* FARP-RSP received from DID <did> */
9014 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9015 "0600 FARP-RSP received from DID x%x\n", did);
9016 /* ACCEPT the Farp resp request */
9017 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
9018
9019 return 0;
9020 }
9021
9022 /**
9023 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
9024 * @vport: pointer to a host virtual N_Port data structure.
9025 * @cmdiocb: pointer to lpfc command iocb data structure.
9026 * @fan_ndlp: pointer to a node-list data structure.
9027 *
9028 * This routine processes a Fabric Address Notification (FAN) IOCB
9029 * command received as an ELS unsolicited event. The FAN ELS command will
9030 * only be processed on a physical port (i.e., the @vport represents the
9031 * physical port). The fabric NodeName and PortName from the FAN IOCB are
9032 * compared against those in the phba data structure. If any of those is
9033 * different, the lpfc_initial_flogi() routine is invoked to initialize
9034 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
9035 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
9036 * is invoked to register login to the fabric.
9037 *
9038 * Return code
9039 * 0 - Successfully processed fan iocb (currently always return 0).
9040 **/
9041 static int
lpfc_els_rcv_fan(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * fan_ndlp)9042 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9043 struct lpfc_nodelist *fan_ndlp)
9044 {
9045 struct lpfc_hba *phba = vport->phba;
9046 uint32_t *lp;
9047 FAN *fp;
9048
9049 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
9050 lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
9051 fp = (FAN *) ++lp;
9052 /* FAN received; Fan does not have a reply sequence */
9053 if ((vport == phba->pport) &&
9054 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
9055 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
9056 sizeof(struct lpfc_name))) ||
9057 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
9058 sizeof(struct lpfc_name)))) {
9059 /* This port has switched fabrics. FLOGI is required */
9060 lpfc_issue_init_vfi(vport);
9061 } else {
9062 /* FAN verified - skip FLOGI */
9063 vport->fc_myDID = vport->fc_prevDID;
9064 if (phba->sli_rev < LPFC_SLI_REV4)
9065 lpfc_issue_fabric_reglogin(vport);
9066 else {
9067 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9068 "3138 Need register VFI: (x%x/%x)\n",
9069 vport->fc_prevDID, vport->fc_myDID);
9070 lpfc_issue_reg_vfi(vport);
9071 }
9072 }
9073 }
9074 return 0;
9075 }
9076
9077 /**
9078 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
9079 * @vport: pointer to a host virtual N_Port data structure.
9080 * @cmdiocb: pointer to lpfc command iocb data structure.
9081 * @ndlp: pointer to a node-list data structure.
9082 *
9083 * Return code
9084 * 0 - Successfully processed echo iocb (currently always return 0)
9085 **/
9086 static int
lpfc_els_rcv_edc(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)9087 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
9088 struct lpfc_nodelist *ndlp)
9089 {
9090 struct lpfc_hba *phba = vport->phba;
9091 struct fc_els_edc *edc_req;
9092 struct fc_tlv_desc *tlv;
9093 uint8_t *payload;
9094 uint32_t *ptr, dtag;
9095 const char *dtag_nm;
9096 int desc_cnt = 0, bytes_remain;
9097 bool rcv_cap_desc = false;
9098
9099 payload = cmdiocb->cmd_dmabuf->virt;
9100
9101 edc_req = (struct fc_els_edc *)payload;
9102 bytes_remain = be32_to_cpu(edc_req->desc_len);
9103
9104 ptr = (uint32_t *)payload;
9105 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
9106 "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
9107 bytes_remain, be32_to_cpu(*ptr),
9108 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
9109
9110 /* No signal support unless there is a congestion descriptor */
9111 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9112 phba->cgn_sig_freq = 0;
9113 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
9114
9115 if (bytes_remain <= 0)
9116 goto out;
9117
9118 tlv = edc_req->desc;
9119
9120 /*
9121 * cycle through EDC diagnostic descriptors to find the
9122 * congestion signaling capability descriptor
9123 */
9124 while (bytes_remain && !rcv_cap_desc) {
9125 if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
9126 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
9127 "6464 Truncated TLV hdr on "
9128 "Diagnostic descriptor[%d]\n",
9129 desc_cnt);
9130 goto out;
9131 }
9132
9133 dtag = be32_to_cpu(tlv->desc_tag);
9134 switch (dtag) {
9135 case ELS_DTAG_LNK_FAULT_CAP:
9136 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9137 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9138 sizeof(struct fc_diag_lnkflt_desc)) {
9139 lpfc_printf_log(
9140 phba, KERN_WARNING, LOG_CGN_MGMT,
9141 "6465 Truncated Link Fault Diagnostic "
9142 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9143 desc_cnt, bytes_remain,
9144 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9145 sizeof(struct fc_diag_cg_sig_desc));
9146 goto out;
9147 }
9148 /* No action for Link Fault descriptor for now */
9149 break;
9150 case ELS_DTAG_CG_SIGNAL_CAP:
9151 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
9152 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
9153 sizeof(struct fc_diag_cg_sig_desc)) {
9154 lpfc_printf_log(
9155 phba, KERN_WARNING, LOG_CGN_MGMT,
9156 "6466 Truncated cgn signal Diagnostic "
9157 "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
9158 desc_cnt, bytes_remain,
9159 FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
9160 sizeof(struct fc_diag_cg_sig_desc));
9161 goto out;
9162 }
9163
9164 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
9165 phba->cgn_reg_signal = phba->cgn_init_reg_signal;
9166
9167 /* We start negotiation with lpfc_fabric_cgn_frequency.
9168 * When we process the EDC, we will settle on the
9169 * higher frequency.
9170 */
9171 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9172
9173 lpfc_least_capable_settings(
9174 phba, (struct fc_diag_cg_sig_desc *)tlv);
9175 rcv_cap_desc = true;
9176 break;
9177 default:
9178 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
9179 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
9180 "6467 unknown Diagnostic "
9181 "Descriptor[%d]: tag x%x (%s)\n",
9182 desc_cnt, dtag, dtag_nm);
9183 }
9184 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
9185 tlv = fc_tlv_next_desc(tlv);
9186 desc_cnt++;
9187 }
9188 out:
9189 /* Need to send back an ACC */
9190 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
9191
9192 lpfc_config_cgn_signal(phba);
9193 return 0;
9194 }
9195
9196 /**
9197 * lpfc_els_timeout - Handler funciton to the els timer
9198 * @t: timer context used to obtain the vport.
9199 *
9200 * This routine is invoked by the ELS timer after timeout. It posts the ELS
9201 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
9202 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
9203 * up the worker thread. It is for the worker thread to invoke the routine
9204 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
9205 **/
9206 void
lpfc_els_timeout(struct timer_list * t)9207 lpfc_els_timeout(struct timer_list *t)
9208 {
9209 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
9210 struct lpfc_hba *phba = vport->phba;
9211 uint32_t tmo_posted;
9212 unsigned long iflag;
9213
9214 spin_lock_irqsave(&vport->work_port_lock, iflag);
9215 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
9216 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
9217 vport->work_port_events |= WORKER_ELS_TMO;
9218 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
9219
9220 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
9221 lpfc_worker_wake_up(phba);
9222 return;
9223 }
9224
9225
9226 /**
9227 * lpfc_els_timeout_handler - Process an els timeout event
9228 * @vport: pointer to a virtual N_Port data structure.
9229 *
9230 * This routine is the actual handler function that processes an ELS timeout
9231 * event. It walks the ELS ring to get and abort all the IOCBs (except the
9232 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
9233 * invoking the lpfc_sli_issue_abort_iotag() routine.
9234 **/
9235 void
lpfc_els_timeout_handler(struct lpfc_vport * vport)9236 lpfc_els_timeout_handler(struct lpfc_vport *vport)
9237 {
9238 struct lpfc_hba *phba = vport->phba;
9239 struct lpfc_sli_ring *pring;
9240 struct lpfc_iocbq *tmp_iocb, *piocb;
9241 IOCB_t *cmd = NULL;
9242 struct lpfc_dmabuf *pcmd;
9243 uint32_t els_command = 0;
9244 uint32_t timeout;
9245 uint32_t remote_ID = 0xffffffff;
9246 LIST_HEAD(abort_list);
9247 u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
9248
9249
9250 timeout = (uint32_t)(phba->fc_ratov << 1);
9251
9252 pring = lpfc_phba_elsring(phba);
9253 if (unlikely(!pring))
9254 return;
9255
9256 if (phba->pport->load_flag & FC_UNLOADING)
9257 return;
9258
9259 spin_lock_irq(&phba->hbalock);
9260 if (phba->sli_rev == LPFC_SLI_REV4)
9261 spin_lock(&pring->ring_lock);
9262
9263 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9264 ulp_command = get_job_cmnd(phba, piocb);
9265 ulp_context = get_job_ulpcontext(phba, piocb);
9266 did = get_job_els_rsp64_did(phba, piocb);
9267
9268 if (phba->sli_rev == LPFC_SLI_REV4) {
9269 iotag = get_wqe_reqtag(piocb);
9270 } else {
9271 cmd = &piocb->iocb;
9272 iotag = cmd->ulpIoTag;
9273 }
9274
9275 if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
9276 ulp_command == CMD_ABORT_XRI_CX ||
9277 ulp_command == CMD_ABORT_XRI_CN ||
9278 ulp_command == CMD_CLOSE_XRI_CN)
9279 continue;
9280
9281 if (piocb->vport != vport)
9282 continue;
9283
9284 pcmd = piocb->cmd_dmabuf;
9285 if (pcmd)
9286 els_command = *(uint32_t *) (pcmd->virt);
9287
9288 if (els_command == ELS_CMD_FARP ||
9289 els_command == ELS_CMD_FARPR ||
9290 els_command == ELS_CMD_FDISC)
9291 continue;
9292
9293 if (piocb->drvrTimeout > 0) {
9294 if (piocb->drvrTimeout >= timeout)
9295 piocb->drvrTimeout -= timeout;
9296 else
9297 piocb->drvrTimeout = 0;
9298 continue;
9299 }
9300
9301 remote_ID = 0xffffffff;
9302 if (ulp_command != CMD_GEN_REQUEST64_CR) {
9303 remote_ID = did;
9304 } else {
9305 struct lpfc_nodelist *ndlp;
9306 ndlp = __lpfc_findnode_rpi(vport, ulp_context);
9307 if (ndlp)
9308 remote_ID = ndlp->nlp_DID;
9309 }
9310 list_add_tail(&piocb->dlist, &abort_list);
9311 }
9312 if (phba->sli_rev == LPFC_SLI_REV4)
9313 spin_unlock(&pring->ring_lock);
9314 spin_unlock_irq(&phba->hbalock);
9315
9316 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9317 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9318 "0127 ELS timeout Data: x%x x%x x%x "
9319 "x%x\n", els_command,
9320 remote_ID, ulp_command, iotag);
9321
9322 spin_lock_irq(&phba->hbalock);
9323 list_del_init(&piocb->dlist);
9324 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9325 spin_unlock_irq(&phba->hbalock);
9326 }
9327
9328 /* Make sure HBA is alive */
9329 lpfc_issue_hb_tmo(phba);
9330
9331 if (!list_empty(&pring->txcmplq))
9332 if (!(phba->pport->load_flag & FC_UNLOADING))
9333 mod_timer(&vport->els_tmofunc,
9334 jiffies + msecs_to_jiffies(1000 * timeout));
9335 }
9336
9337 /**
9338 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
9339 * @vport: pointer to a host virtual N_Port data structure.
9340 *
9341 * This routine is used to clean up all the outstanding ELS commands on a
9342 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
9343 * routine. After that, it walks the ELS transmit queue to remove all the
9344 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
9345 * the IOCBs with a non-NULL completion callback function, the callback
9346 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9347 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
9348 * callback function, the IOCB will simply be released. Finally, it walks
9349 * the ELS transmit completion queue to issue an abort IOCB to any transmit
9350 * completion queue IOCB that is associated with the @vport and is not
9351 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
9352 * part of the discovery state machine) out to HBA by invoking the
9353 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
9354 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
9355 * the IOCBs are aborted when this function returns.
9356 **/
9357 void
lpfc_els_flush_cmd(struct lpfc_vport * vport)9358 lpfc_els_flush_cmd(struct lpfc_vport *vport)
9359 {
9360 LIST_HEAD(abort_list);
9361 struct lpfc_hba *phba = vport->phba;
9362 struct lpfc_sli_ring *pring;
9363 struct lpfc_iocbq *tmp_iocb, *piocb;
9364 u32 ulp_command;
9365 unsigned long iflags = 0;
9366
9367 lpfc_fabric_abort_vport(vport);
9368
9369 /*
9370 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
9371 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
9372 * ultimately grabs the ring_lock, the driver must splice the list into
9373 * a working list and release the locks before calling the abort.
9374 */
9375 spin_lock_irqsave(&phba->hbalock, iflags);
9376 pring = lpfc_phba_elsring(phba);
9377
9378 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
9379 if (unlikely(!pring)) {
9380 spin_unlock_irqrestore(&phba->hbalock, iflags);
9381 return;
9382 }
9383
9384 if (phba->sli_rev == LPFC_SLI_REV4)
9385 spin_lock(&pring->ring_lock);
9386
9387 /* First we need to issue aborts to outstanding cmds on txcmpl */
9388 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
9389 if (piocb->cmd_flag & LPFC_IO_LIBDFC)
9390 continue;
9391
9392 if (piocb->vport != vport)
9393 continue;
9394
9395 if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
9396 continue;
9397
9398 /* On the ELS ring we can have ELS_REQUESTs or
9399 * GEN_REQUESTs waiting for a response.
9400 */
9401 ulp_command = get_job_cmnd(phba, piocb);
9402 if (ulp_command == CMD_ELS_REQUEST64_CR) {
9403 list_add_tail(&piocb->dlist, &abort_list);
9404
9405 /* If the link is down when flushing ELS commands
9406 * the firmware will not complete them till after
9407 * the link comes back up. This may confuse
9408 * discovery for the new link up, so we need to
9409 * change the compl routine to just clean up the iocb
9410 * and avoid any retry logic.
9411 */
9412 if (phba->link_state == LPFC_LINK_DOWN)
9413 piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
9414 }
9415 if (ulp_command == CMD_GEN_REQUEST64_CR)
9416 list_add_tail(&piocb->dlist, &abort_list);
9417 }
9418
9419 if (phba->sli_rev == LPFC_SLI_REV4)
9420 spin_unlock(&pring->ring_lock);
9421 spin_unlock_irqrestore(&phba->hbalock, iflags);
9422
9423 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
9424 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
9425 spin_lock_irqsave(&phba->hbalock, iflags);
9426 list_del_init(&piocb->dlist);
9427 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
9428 spin_unlock_irqrestore(&phba->hbalock, iflags);
9429 }
9430 /* Make sure HBA is alive */
9431 lpfc_issue_hb_tmo(phba);
9432
9433 if (!list_empty(&abort_list))
9434 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9435 "3387 abort list for txq not empty\n");
9436 INIT_LIST_HEAD(&abort_list);
9437
9438 spin_lock_irqsave(&phba->hbalock, iflags);
9439 if (phba->sli_rev == LPFC_SLI_REV4)
9440 spin_lock(&pring->ring_lock);
9441
9442 /* No need to abort the txq list,
9443 * just queue them up for lpfc_sli_cancel_iocbs
9444 */
9445 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
9446 ulp_command = get_job_cmnd(phba, piocb);
9447
9448 if (piocb->cmd_flag & LPFC_IO_LIBDFC)
9449 continue;
9450
9451 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
9452 if (ulp_command == CMD_QUE_RING_BUF_CN ||
9453 ulp_command == CMD_QUE_RING_BUF64_CN ||
9454 ulp_command == CMD_CLOSE_XRI_CN ||
9455 ulp_command == CMD_ABORT_XRI_CN ||
9456 ulp_command == CMD_ABORT_XRI_CX)
9457 continue;
9458
9459 if (piocb->vport != vport)
9460 continue;
9461
9462 list_del_init(&piocb->list);
9463 list_add_tail(&piocb->list, &abort_list);
9464 }
9465
9466 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
9467 if (vport == phba->pport) {
9468 list_for_each_entry_safe(piocb, tmp_iocb,
9469 &phba->fabric_iocb_list, list) {
9470 list_del_init(&piocb->list);
9471 list_add_tail(&piocb->list, &abort_list);
9472 }
9473 }
9474
9475 if (phba->sli_rev == LPFC_SLI_REV4)
9476 spin_unlock(&pring->ring_lock);
9477 spin_unlock_irqrestore(&phba->hbalock, iflags);
9478
9479 /* Cancel all the IOCBs from the completions list */
9480 lpfc_sli_cancel_iocbs(phba, &abort_list,
9481 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
9482
9483 return;
9484 }
9485
9486 /**
9487 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
9488 * @phba: pointer to lpfc hba data structure.
9489 *
9490 * This routine is used to clean up all the outstanding ELS commands on a
9491 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
9492 * routine. After that, it walks the ELS transmit queue to remove all the
9493 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
9494 * the IOCBs with the completion callback function associated, the callback
9495 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
9496 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
9497 * callback function associated, the IOCB will simply be released. Finally,
9498 * it walks the ELS transmit completion queue to issue an abort IOCB to any
9499 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
9500 * management plane IOCBs that are not part of the discovery state machine)
9501 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
9502 **/
9503 void
lpfc_els_flush_all_cmd(struct lpfc_hba * phba)9504 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
9505 {
9506 struct lpfc_vport *vport;
9507
9508 spin_lock_irq(&phba->port_list_lock);
9509 list_for_each_entry(vport, &phba->port_list, listentry)
9510 lpfc_els_flush_cmd(vport);
9511 spin_unlock_irq(&phba->port_list_lock);
9512
9513 return;
9514 }
9515
9516 /**
9517 * lpfc_send_els_failure_event - Posts an ELS command failure event
9518 * @phba: Pointer to hba context object.
9519 * @cmdiocbp: Pointer to command iocb which reported error.
9520 * @rspiocbp: Pointer to response iocb which reported error.
9521 *
9522 * This function sends an event when there is an ELS command
9523 * failure.
9524 **/
9525 void
lpfc_send_els_failure_event(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbp,struct lpfc_iocbq * rspiocbp)9526 lpfc_send_els_failure_event(struct lpfc_hba *phba,
9527 struct lpfc_iocbq *cmdiocbp,
9528 struct lpfc_iocbq *rspiocbp)
9529 {
9530 struct lpfc_vport *vport = cmdiocbp->vport;
9531 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9532 struct lpfc_lsrjt_event lsrjt_event;
9533 struct lpfc_fabric_event_header fabric_event;
9534 struct ls_rjt stat;
9535 struct lpfc_nodelist *ndlp;
9536 uint32_t *pcmd;
9537 u32 ulp_status, ulp_word4;
9538
9539 ndlp = cmdiocbp->ndlp;
9540 if (!ndlp)
9541 return;
9542
9543 ulp_status = get_job_ulpstatus(phba, rspiocbp);
9544 ulp_word4 = get_job_word4(phba, rspiocbp);
9545
9546 if (ulp_status == IOSTAT_LS_RJT) {
9547 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
9548 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
9549 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
9550 sizeof(struct lpfc_name));
9551 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
9552 sizeof(struct lpfc_name));
9553 pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
9554 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
9555 stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
9556 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
9557 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
9558 fc_host_post_vendor_event(shost,
9559 fc_get_event_number(),
9560 sizeof(lsrjt_event),
9561 (char *)&lsrjt_event,
9562 LPFC_NL_VENDOR_ID);
9563 return;
9564 }
9565 if (ulp_status == IOSTAT_NPORT_BSY ||
9566 ulp_status == IOSTAT_FABRIC_BSY) {
9567 fabric_event.event_type = FC_REG_FABRIC_EVENT;
9568 if (ulp_status == IOSTAT_NPORT_BSY)
9569 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
9570 else
9571 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
9572 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
9573 sizeof(struct lpfc_name));
9574 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
9575 sizeof(struct lpfc_name));
9576 fc_host_post_vendor_event(shost,
9577 fc_get_event_number(),
9578 sizeof(fabric_event),
9579 (char *)&fabric_event,
9580 LPFC_NL_VENDOR_ID);
9581 return;
9582 }
9583
9584 }
9585
9586 /**
9587 * lpfc_send_els_event - Posts unsolicited els event
9588 * @vport: Pointer to vport object.
9589 * @ndlp: Pointer FC node object.
9590 * @payload: ELS command code type.
9591 *
9592 * This function posts an event when there is an incoming
9593 * unsolicited ELS command.
9594 **/
9595 static void
lpfc_send_els_event(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t * payload)9596 lpfc_send_els_event(struct lpfc_vport *vport,
9597 struct lpfc_nodelist *ndlp,
9598 uint32_t *payload)
9599 {
9600 struct lpfc_els_event_header *els_data = NULL;
9601 struct lpfc_logo_event *logo_data = NULL;
9602 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9603
9604 if (*payload == ELS_CMD_LOGO) {
9605 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
9606 if (!logo_data) {
9607 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9608 "0148 Failed to allocate memory "
9609 "for LOGO event\n");
9610 return;
9611 }
9612 els_data = &logo_data->header;
9613 } else {
9614 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
9615 GFP_KERNEL);
9616 if (!els_data) {
9617 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9618 "0149 Failed to allocate memory "
9619 "for ELS event\n");
9620 return;
9621 }
9622 }
9623 els_data->event_type = FC_REG_ELS_EVENT;
9624 switch (*payload) {
9625 case ELS_CMD_PLOGI:
9626 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
9627 break;
9628 case ELS_CMD_PRLO:
9629 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
9630 break;
9631 case ELS_CMD_ADISC:
9632 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
9633 break;
9634 case ELS_CMD_LOGO:
9635 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
9636 /* Copy the WWPN in the LOGO payload */
9637 memcpy(logo_data->logo_wwpn, &payload[2],
9638 sizeof(struct lpfc_name));
9639 break;
9640 default:
9641 kfree(els_data);
9642 return;
9643 }
9644 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
9645 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
9646 if (*payload == ELS_CMD_LOGO) {
9647 fc_host_post_vendor_event(shost,
9648 fc_get_event_number(),
9649 sizeof(struct lpfc_logo_event),
9650 (char *)logo_data,
9651 LPFC_NL_VENDOR_ID);
9652 kfree(logo_data);
9653 } else {
9654 fc_host_post_vendor_event(shost,
9655 fc_get_event_number(),
9656 sizeof(struct lpfc_els_event_header),
9657 (char *)els_data,
9658 LPFC_NL_VENDOR_ID);
9659 kfree(els_data);
9660 }
9661
9662 return;
9663 }
9664
9665
9666 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
9667 FC_FPIN_LI_EVT_TYPES_INIT);
9668
9669 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
9670 FC_FPIN_DELI_EVT_TYPES_INIT);
9671
9672 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
9673 FC_FPIN_CONGN_EVT_TYPES_INIT);
9674
9675 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
9676 fc_fpin_congn_severity_types,
9677 FC_FPIN_CONGN_SEVERITY_INIT);
9678
9679
9680 /**
9681 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
9682 * @phba: Pointer to phba object.
9683 * @wwnlist: Pointer to list of WWPNs in FPIN payload
9684 * @cnt: count of WWPNs in FPIN payload
9685 *
9686 * This routine is called by LI and PC descriptors.
9687 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
9688 */
9689 static void
lpfc_display_fpin_wwpn(struct lpfc_hba * phba,__be64 * wwnlist,u32 cnt)9690 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
9691 {
9692 char buf[LPFC_FPIN_WWPN_LINE_SZ];
9693 __be64 wwn;
9694 u64 wwpn;
9695 int i, len;
9696 int line = 0;
9697 int wcnt = 0;
9698 bool endit = false;
9699
9700 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:");
9701 for (i = 0; i < cnt; i++) {
9702 /* Are we on the last WWPN */
9703 if (i == (cnt - 1))
9704 endit = true;
9705
9706 /* Extract the next WWPN from the payload */
9707 wwn = *wwnlist++;
9708 wwpn = be64_to_cpu(wwn);
9709 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
9710 " %016llx", wwpn);
9711
9712 /* Log a message if we are on the last WWPN
9713 * or if we hit the max allowed per message.
9714 */
9715 wcnt++;
9716 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
9717 buf[len] = 0;
9718 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9719 "4686 %s\n", buf);
9720
9721 /* Check if we reached the last WWPN */
9722 if (endit)
9723 return;
9724
9725 /* Limit the number of log message displayed per FPIN */
9726 line++;
9727 if (line == LPFC_FPIN_WWPN_NUM_LINE) {
9728 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9729 "4687 %d WWPNs Truncated\n",
9730 cnt - i - 1);
9731 return;
9732 }
9733
9734 /* Start over with next log message */
9735 wcnt = 0;
9736 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
9737 "Additional WWPNs:");
9738 }
9739 }
9740 }
9741
9742 /**
9743 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
9744 * @phba: Pointer to phba object.
9745 * @tlv: Pointer to the Link Integrity Notification Descriptor.
9746 *
9747 * This function processes a Link Integrity FPIN event by logging a message.
9748 **/
9749 static void
lpfc_els_rcv_fpin_li(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)9750 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
9751 {
9752 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
9753 const char *li_evt_str;
9754 u32 li_evt, cnt;
9755
9756 li_evt = be16_to_cpu(li->event_type);
9757 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
9758 cnt = be32_to_cpu(li->pname_count);
9759
9760 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9761 "4680 FPIN Link Integrity %s (x%x) "
9762 "Detecting PN x%016llx Attached PN x%016llx "
9763 "Duration %d mSecs Count %d Port Cnt %d\n",
9764 li_evt_str, li_evt,
9765 be64_to_cpu(li->detecting_wwpn),
9766 be64_to_cpu(li->attached_wwpn),
9767 be32_to_cpu(li->event_threshold),
9768 be32_to_cpu(li->event_count), cnt);
9769
9770 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt);
9771 }
9772
9773 /**
9774 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
9775 * @phba: Pointer to hba object.
9776 * @tlv: Pointer to the Delivery Notification Descriptor TLV
9777 *
9778 * This function processes a Delivery FPIN event by logging a message.
9779 **/
9780 static void
lpfc_els_rcv_fpin_del(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)9781 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
9782 {
9783 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
9784 const char *del_rsn_str;
9785 u32 del_rsn;
9786 __be32 *frame;
9787
9788 del_rsn = be16_to_cpu(del->deli_reason_code);
9789 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn);
9790
9791 /* Skip over desc_tag/desc_len header to payload */
9792 frame = (__be32 *)(del + 1);
9793
9794 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9795 "4681 FPIN Delivery %s (x%x) "
9796 "Detecting PN x%016llx Attached PN x%016llx "
9797 "DiscHdr0 x%08x "
9798 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
9799 "DiscHdr4 x%08x DiscHdr5 x%08x\n",
9800 del_rsn_str, del_rsn,
9801 be64_to_cpu(del->detecting_wwpn),
9802 be64_to_cpu(del->attached_wwpn),
9803 be32_to_cpu(frame[0]),
9804 be32_to_cpu(frame[1]),
9805 be32_to_cpu(frame[2]),
9806 be32_to_cpu(frame[3]),
9807 be32_to_cpu(frame[4]),
9808 be32_to_cpu(frame[5]));
9809 }
9810
9811 /**
9812 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
9813 * @phba: Pointer to hba object.
9814 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV
9815 *
9816 * This function processes a Peer Congestion FPIN event by logging a message.
9817 **/
9818 static void
lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)9819 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
9820 {
9821 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
9822 const char *pc_evt_str;
9823 u32 pc_evt, cnt;
9824
9825 pc_evt = be16_to_cpu(pc->event_type);
9826 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
9827 cnt = be32_to_cpu(pc->pname_count);
9828
9829 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
9830 "4684 FPIN Peer Congestion %s (x%x) "
9831 "Duration %d mSecs "
9832 "Detecting PN x%016llx Attached PN x%016llx "
9833 "Impacted Port Cnt %d\n",
9834 pc_evt_str, pc_evt,
9835 be32_to_cpu(pc->event_period),
9836 be64_to_cpu(pc->detecting_wwpn),
9837 be64_to_cpu(pc->attached_wwpn),
9838 cnt);
9839
9840 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt);
9841 }
9842
9843 /**
9844 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
9845 * @phba: Pointer to hba object.
9846 * @tlv: Pointer to the Congestion Notification Descriptor TLV
9847 *
9848 * This function processes an FPIN Congestion Notifiction. The notification
9849 * could be an Alarm or Warning. This routine feeds that data into driver's
9850 * running congestion algorithm. It also processes the FPIN by
9851 * logging a message. It returns 1 to indicate deliver this message
9852 * to the upper layer or 0 to indicate don't deliver it.
9853 **/
9854 static int
lpfc_els_rcv_fpin_cgn(struct lpfc_hba * phba,struct fc_tlv_desc * tlv)9855 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
9856 {
9857 struct lpfc_cgn_info *cp;
9858 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
9859 const char *cgn_evt_str;
9860 u32 cgn_evt;
9861 const char *cgn_sev_str;
9862 u32 cgn_sev;
9863 uint16_t value;
9864 u32 crc;
9865 bool nm_log = false;
9866 int rc = 1;
9867
9868 cgn_evt = be16_to_cpu(cgn->event_type);
9869 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt);
9870 cgn_sev = cgn->severity;
9871 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev);
9872
9873 /* The driver only takes action on a Credit Stall or Oversubscription
9874 * event type to engage the IO algorithm. The driver prints an
9875 * unmaskable message only for Lost Credit and Credit Stall.
9876 * TODO: Still need to have definition of host action on clear,
9877 * lost credit and device specific event types.
9878 */
9879 switch (cgn_evt) {
9880 case FPIN_CONGN_LOST_CREDIT:
9881 nm_log = true;
9882 break;
9883 case FPIN_CONGN_CREDIT_STALL:
9884 nm_log = true;
9885 fallthrough;
9886 case FPIN_CONGN_OVERSUBSCRIPTION:
9887 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
9888 nm_log = false;
9889 switch (cgn_sev) {
9890 case FPIN_CONGN_SEVERITY_ERROR:
9891 /* Take action here for an Alarm event */
9892 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
9893 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
9894 /* Track of alarm cnt for SYNC_WQE */
9895 atomic_inc(&phba->cgn_sync_alarm_cnt);
9896 }
9897 /* Track alarm cnt for cgn_info regardless
9898 * of whether CMF is configured for Signals
9899 * or FPINs.
9900 */
9901 atomic_inc(&phba->cgn_fabric_alarm_cnt);
9902 goto cleanup;
9903 }
9904 break;
9905 case FPIN_CONGN_SEVERITY_WARNING:
9906 /* Take action here for a Warning event */
9907 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
9908 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
9909 /* Track of warning cnt for SYNC_WQE */
9910 atomic_inc(&phba->cgn_sync_warn_cnt);
9911 }
9912 /* Track warning cnt and freq for cgn_info
9913 * regardless of whether CMF is configured for
9914 * Signals or FPINs.
9915 */
9916 atomic_inc(&phba->cgn_fabric_warn_cnt);
9917 cleanup:
9918 /* Save frequency in ms */
9919 phba->cgn_fpin_frequency =
9920 be32_to_cpu(cgn->event_period);
9921 value = phba->cgn_fpin_frequency;
9922 if (phba->cgn_i) {
9923 cp = (struct lpfc_cgn_info *)
9924 phba->cgn_i->virt;
9925 cp->cgn_alarm_freq =
9926 cpu_to_le16(value);
9927 cp->cgn_warn_freq =
9928 cpu_to_le16(value);
9929 crc = lpfc_cgn_calc_crc32
9930 (cp,
9931 LPFC_CGN_INFO_SZ,
9932 LPFC_CGN_CRC32_SEED);
9933 cp->cgn_info_crc = cpu_to_le32(crc);
9934 }
9935
9936 /* Don't deliver to upper layer since
9937 * driver took action on this tlv.
9938 */
9939 rc = 0;
9940 }
9941 break;
9942 }
9943 break;
9944 }
9945
9946 /* Change the log level to unmaskable for the following event types. */
9947 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
9948 LOG_CGN_MGMT | LOG_ELS,
9949 "4683 FPIN CONGESTION %s type %s (x%x) Event "
9950 "Duration %d mSecs\n",
9951 cgn_sev_str, cgn_evt_str, cgn_evt,
9952 be32_to_cpu(cgn->event_period));
9953 return rc;
9954 }
9955
9956 void
lpfc_els_rcv_fpin(struct lpfc_vport * vport,void * p,u32 fpin_length)9957 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
9958 {
9959 struct lpfc_hba *phba = vport->phba;
9960 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
9961 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
9962 const char *dtag_nm;
9963 int desc_cnt = 0, bytes_remain, cnt;
9964 u32 dtag, deliver = 0;
9965 int len;
9966
9967 /* FPINs handled only if we are in the right discovery state */
9968 if (vport->port_state < LPFC_DISC_AUTH)
9969 return;
9970
9971 /* make sure there is the full fpin header */
9972 if (fpin_length < sizeof(struct fc_els_fpin))
9973 return;
9974
9975 /* Sanity check descriptor length. The desc_len value does not
9976 * include space for the ELS command and the desc_len fields.
9977 */
9978 len = be32_to_cpu(fpin->desc_len);
9979 if (fpin_length < len + sizeof(struct fc_els_fpin)) {
9980 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
9981 "4671 Bad ELS FPIN length %d: %d\n",
9982 len, fpin_length);
9983 return;
9984 }
9985
9986 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
9987 first_tlv = tlv;
9988 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
9989 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
9990
9991 /* process each descriptor separately */
9992 while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
9993 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
9994 dtag = be32_to_cpu(tlv->desc_tag);
9995 switch (dtag) {
9996 case ELS_DTAG_LNK_INTEGRITY:
9997 lpfc_els_rcv_fpin_li(phba, tlv);
9998 deliver = 1;
9999 break;
10000 case ELS_DTAG_DELIVERY:
10001 lpfc_els_rcv_fpin_del(phba, tlv);
10002 deliver = 1;
10003 break;
10004 case ELS_DTAG_PEER_CONGEST:
10005 lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
10006 deliver = 1;
10007 break;
10008 case ELS_DTAG_CONGESTION:
10009 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
10010 break;
10011 default:
10012 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
10013 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10014 "4678 unknown FPIN descriptor[%d]: "
10015 "tag x%x (%s)\n",
10016 desc_cnt, dtag, dtag_nm);
10017
10018 /* If descriptor is bad, drop the rest of the data */
10019 return;
10020 }
10021 lpfc_cgn_update_stat(phba, dtag);
10022 cnt = be32_to_cpu(tlv->desc_len);
10023
10024 /* Sanity check descriptor length. The desc_len value does not
10025 * include space for the desc_tag and the desc_len fields.
10026 */
10027 len -= (cnt + sizeof(struct fc_tlv_desc));
10028 if (len < 0) {
10029 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
10030 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
10031 "4672 Bad FPIN descriptor TLV length "
10032 "%d: %d %d %s\n",
10033 cnt, len, fpin_length, dtag_nm);
10034 return;
10035 }
10036
10037 current_tlv = tlv;
10038 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
10039 tlv = fc_tlv_next_desc(tlv);
10040
10041 /* Format payload such that the FPIN delivered to the
10042 * upper layer is a single descriptor FPIN.
10043 */
10044 if (desc_cnt)
10045 memcpy(first_tlv, current_tlv,
10046 (cnt + sizeof(struct fc_els_fpin)));
10047
10048 /* Adjust the length so that it only reflects a
10049 * single descriptor FPIN.
10050 */
10051 fpin_length = cnt + sizeof(struct fc_els_fpin);
10052 fpin->desc_len = cpu_to_be32(fpin_length);
10053 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
10054
10055 /* Send every descriptor individually to the upper layer */
10056 if (deliver)
10057 fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
10058 fpin_length, (char *)fpin);
10059 desc_cnt++;
10060 }
10061 }
10062
10063 /**
10064 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
10065 * @phba: pointer to lpfc hba data structure.
10066 * @pring: pointer to a SLI ring.
10067 * @vport: pointer to a host virtual N_Port data structure.
10068 * @elsiocb: pointer to lpfc els command iocb data structure.
10069 *
10070 * This routine is used for processing the IOCB associated with a unsolicited
10071 * event. It first determines whether there is an existing ndlp that matches
10072 * the DID from the unsolicited IOCB. If not, it will create a new one with
10073 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
10074 * IOCB is then used to invoke the proper routine and to set up proper state
10075 * of the discovery state machine.
10076 **/
10077 static void
lpfc_els_unsol_buffer(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_vport * vport,struct lpfc_iocbq * elsiocb)10078 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10079 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
10080 {
10081 struct lpfc_nodelist *ndlp;
10082 struct ls_rjt stat;
10083 u32 *payload, payload_len;
10084 u32 cmd = 0, did = 0, newnode, status = 0;
10085 uint8_t rjt_exp, rjt_err = 0, init_link = 0;
10086 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10087 LPFC_MBOXQ_t *mbox;
10088
10089 if (!vport || !elsiocb->cmd_dmabuf)
10090 goto dropit;
10091
10092 newnode = 0;
10093 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10094 payload = elsiocb->cmd_dmabuf->virt;
10095 if (phba->sli_rev == LPFC_SLI_REV4)
10096 payload_len = wcqe_cmpl->total_data_placed;
10097 else
10098 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
10099 status = get_job_ulpstatus(phba, elsiocb);
10100 cmd = *payload;
10101 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
10102 lpfc_sli3_post_buffer(phba, pring, 1);
10103
10104 did = get_job_els_rsp64_did(phba, elsiocb);
10105 if (status) {
10106 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10107 "RCV Unsol ELS: status:x%x/x%x did:x%x",
10108 status, get_job_word4(phba, elsiocb), did);
10109 goto dropit;
10110 }
10111
10112 /* Check to see if link went down during discovery */
10113 if (lpfc_els_chk_latt(vport))
10114 goto dropit;
10115
10116 /* Ignore traffic received during vport shutdown. */
10117 if (vport->load_flag & FC_UNLOADING)
10118 goto dropit;
10119
10120 /* If NPort discovery is delayed drop incoming ELS */
10121 if ((vport->fc_flag & FC_DISC_DELAYED) &&
10122 (cmd != ELS_CMD_PLOGI))
10123 goto dropit;
10124
10125 ndlp = lpfc_findnode_did(vport, did);
10126 if (!ndlp) {
10127 /* Cannot find existing Fabric ndlp, so allocate a new one */
10128 ndlp = lpfc_nlp_init(vport, did);
10129 if (!ndlp)
10130 goto dropit;
10131 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10132 newnode = 1;
10133 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
10134 ndlp->nlp_type |= NLP_FABRIC;
10135 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
10136 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
10137 newnode = 1;
10138 }
10139
10140 phba->fc_stat.elsRcvFrame++;
10141
10142 /*
10143 * Do not process any unsolicited ELS commands
10144 * if the ndlp is in DEV_LOSS
10145 */
10146 spin_lock_irq(&ndlp->lock);
10147 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
10148 spin_unlock_irq(&ndlp->lock);
10149 if (newnode)
10150 lpfc_nlp_put(ndlp);
10151 goto dropit;
10152 }
10153 spin_unlock_irq(&ndlp->lock);
10154
10155 elsiocb->ndlp = lpfc_nlp_get(ndlp);
10156 if (!elsiocb->ndlp)
10157 goto dropit;
10158 elsiocb->vport = vport;
10159
10160 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
10161 cmd &= ELS_CMD_MASK;
10162 }
10163 /* ELS command <elsCmd> received from NPORT <did> */
10164 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10165 "0112 ELS command x%x received from NPORT x%x "
10166 "refcnt %d Data: x%x x%x x%x x%x\n",
10167 cmd, did, kref_read(&ndlp->kref), vport->port_state,
10168 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
10169
10170 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
10171 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
10172 (cmd != ELS_CMD_FLOGI) &&
10173 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
10174 rjt_err = LSRJT_LOGICAL_BSY;
10175 rjt_exp = LSEXP_NOTHING_MORE;
10176 goto lsrjt;
10177 }
10178
10179 switch (cmd) {
10180 case ELS_CMD_PLOGI:
10181 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10182 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
10183 did, vport->port_state, ndlp->nlp_flag);
10184
10185 phba->fc_stat.elsRcvPLOGI++;
10186 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
10187 if (phba->sli_rev == LPFC_SLI_REV4 &&
10188 (phba->pport->fc_flag & FC_PT2PT)) {
10189 vport->fc_prevDID = vport->fc_myDID;
10190 /* Our DID needs to be updated before registering
10191 * the vfi. This is done in lpfc_rcv_plogi but
10192 * that is called after the reg_vfi.
10193 */
10194 vport->fc_myDID =
10195 bf_get(els_rsp64_sid,
10196 &elsiocb->wqe.xmit_els_rsp);
10197 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
10198 "3312 Remote port assigned DID x%x "
10199 "%x\n", vport->fc_myDID,
10200 vport->fc_prevDID);
10201 }
10202
10203 lpfc_send_els_event(vport, ndlp, payload);
10204
10205 /* If Nport discovery is delayed, reject PLOGIs */
10206 if (vport->fc_flag & FC_DISC_DELAYED) {
10207 rjt_err = LSRJT_UNABLE_TPC;
10208 rjt_exp = LSEXP_NOTHING_MORE;
10209 break;
10210 }
10211
10212 if (vport->port_state < LPFC_DISC_AUTH) {
10213 if (!(phba->pport->fc_flag & FC_PT2PT) ||
10214 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
10215 rjt_err = LSRJT_UNABLE_TPC;
10216 rjt_exp = LSEXP_NOTHING_MORE;
10217 break;
10218 }
10219 }
10220
10221 spin_lock_irq(&ndlp->lock);
10222 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
10223 spin_unlock_irq(&ndlp->lock);
10224
10225 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10226 NLP_EVT_RCV_PLOGI);
10227
10228 break;
10229 case ELS_CMD_FLOGI:
10230 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10231 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
10232 did, vport->port_state, ndlp->nlp_flag);
10233
10234 phba->fc_stat.elsRcvFLOGI++;
10235
10236 /* If the driver believes fabric discovery is done and is ready,
10237 * bounce the link. There is some descrepancy.
10238 */
10239 if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
10240 vport->fc_flag & FC_PT2PT &&
10241 vport->rcv_flogi_cnt >= 1) {
10242 rjt_err = LSRJT_LOGICAL_BSY;
10243 rjt_exp = LSEXP_NOTHING_MORE;
10244 init_link++;
10245 goto lsrjt;
10246 }
10247
10248 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
10249 /* retain node if our response is deferred */
10250 if (phba->defer_flogi_acc_flag)
10251 break;
10252 if (newnode)
10253 lpfc_disc_state_machine(vport, ndlp, NULL,
10254 NLP_EVT_DEVICE_RM);
10255 break;
10256 case ELS_CMD_LOGO:
10257 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10258 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
10259 did, vport->port_state, ndlp->nlp_flag);
10260
10261 phba->fc_stat.elsRcvLOGO++;
10262 lpfc_send_els_event(vport, ndlp, payload);
10263 if (vport->port_state < LPFC_DISC_AUTH) {
10264 rjt_err = LSRJT_UNABLE_TPC;
10265 rjt_exp = LSEXP_NOTHING_MORE;
10266 break;
10267 }
10268 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
10269 if (newnode)
10270 lpfc_disc_state_machine(vport, ndlp, NULL,
10271 NLP_EVT_DEVICE_RM);
10272 break;
10273 case ELS_CMD_PRLO:
10274 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10275 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
10276 did, vport->port_state, ndlp->nlp_flag);
10277
10278 phba->fc_stat.elsRcvPRLO++;
10279 lpfc_send_els_event(vport, ndlp, payload);
10280 if (vport->port_state < LPFC_DISC_AUTH) {
10281 rjt_err = LSRJT_UNABLE_TPC;
10282 rjt_exp = LSEXP_NOTHING_MORE;
10283 break;
10284 }
10285 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
10286 break;
10287 case ELS_CMD_LCB:
10288 phba->fc_stat.elsRcvLCB++;
10289 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
10290 break;
10291 case ELS_CMD_RDP:
10292 phba->fc_stat.elsRcvRDP++;
10293 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
10294 break;
10295 case ELS_CMD_RSCN:
10296 phba->fc_stat.elsRcvRSCN++;
10297 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
10298 if (newnode)
10299 lpfc_disc_state_machine(vport, ndlp, NULL,
10300 NLP_EVT_DEVICE_RM);
10301 break;
10302 case ELS_CMD_ADISC:
10303 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10304 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
10305 did, vport->port_state, ndlp->nlp_flag);
10306
10307 lpfc_send_els_event(vport, ndlp, payload);
10308 phba->fc_stat.elsRcvADISC++;
10309 if (vport->port_state < LPFC_DISC_AUTH) {
10310 rjt_err = LSRJT_UNABLE_TPC;
10311 rjt_exp = LSEXP_NOTHING_MORE;
10312 break;
10313 }
10314 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10315 NLP_EVT_RCV_ADISC);
10316 break;
10317 case ELS_CMD_PDISC:
10318 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10319 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
10320 did, vport->port_state, ndlp->nlp_flag);
10321
10322 phba->fc_stat.elsRcvPDISC++;
10323 if (vport->port_state < LPFC_DISC_AUTH) {
10324 rjt_err = LSRJT_UNABLE_TPC;
10325 rjt_exp = LSEXP_NOTHING_MORE;
10326 break;
10327 }
10328 lpfc_disc_state_machine(vport, ndlp, elsiocb,
10329 NLP_EVT_RCV_PDISC);
10330 break;
10331 case ELS_CMD_FARPR:
10332 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10333 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
10334 did, vport->port_state, ndlp->nlp_flag);
10335
10336 phba->fc_stat.elsRcvFARPR++;
10337 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
10338 break;
10339 case ELS_CMD_FARP:
10340 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10341 "RCV FARP: did:x%x/ste:x%x flg:x%x",
10342 did, vport->port_state, ndlp->nlp_flag);
10343
10344 phba->fc_stat.elsRcvFARP++;
10345 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
10346 break;
10347 case ELS_CMD_FAN:
10348 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10349 "RCV FAN: did:x%x/ste:x%x flg:x%x",
10350 did, vport->port_state, ndlp->nlp_flag);
10351
10352 phba->fc_stat.elsRcvFAN++;
10353 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
10354 break;
10355 case ELS_CMD_PRLI:
10356 case ELS_CMD_NVMEPRLI:
10357 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10358 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
10359 did, vport->port_state, ndlp->nlp_flag);
10360
10361 phba->fc_stat.elsRcvPRLI++;
10362 if ((vport->port_state < LPFC_DISC_AUTH) &&
10363 (vport->fc_flag & FC_FABRIC)) {
10364 rjt_err = LSRJT_UNABLE_TPC;
10365 rjt_exp = LSEXP_NOTHING_MORE;
10366 break;
10367 }
10368 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
10369 break;
10370 case ELS_CMD_LIRR:
10371 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10372 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
10373 did, vport->port_state, ndlp->nlp_flag);
10374
10375 phba->fc_stat.elsRcvLIRR++;
10376 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
10377 if (newnode)
10378 lpfc_disc_state_machine(vport, ndlp, NULL,
10379 NLP_EVT_DEVICE_RM);
10380 break;
10381 case ELS_CMD_RLS:
10382 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10383 "RCV RLS: did:x%x/ste:x%x flg:x%x",
10384 did, vport->port_state, ndlp->nlp_flag);
10385
10386 phba->fc_stat.elsRcvRLS++;
10387 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
10388 if (newnode)
10389 lpfc_disc_state_machine(vport, ndlp, NULL,
10390 NLP_EVT_DEVICE_RM);
10391 break;
10392 case ELS_CMD_RPL:
10393 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10394 "RCV RPL: did:x%x/ste:x%x flg:x%x",
10395 did, vport->port_state, ndlp->nlp_flag);
10396
10397 phba->fc_stat.elsRcvRPL++;
10398 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
10399 if (newnode)
10400 lpfc_disc_state_machine(vport, ndlp, NULL,
10401 NLP_EVT_DEVICE_RM);
10402 break;
10403 case ELS_CMD_RNID:
10404 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10405 "RCV RNID: did:x%x/ste:x%x flg:x%x",
10406 did, vport->port_state, ndlp->nlp_flag);
10407
10408 phba->fc_stat.elsRcvRNID++;
10409 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
10410 if (newnode)
10411 lpfc_disc_state_machine(vport, ndlp, NULL,
10412 NLP_EVT_DEVICE_RM);
10413 break;
10414 case ELS_CMD_RTV:
10415 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10416 "RCV RTV: did:x%x/ste:x%x flg:x%x",
10417 did, vport->port_state, ndlp->nlp_flag);
10418 phba->fc_stat.elsRcvRTV++;
10419 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
10420 if (newnode)
10421 lpfc_disc_state_machine(vport, ndlp, NULL,
10422 NLP_EVT_DEVICE_RM);
10423 break;
10424 case ELS_CMD_RRQ:
10425 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10426 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
10427 did, vport->port_state, ndlp->nlp_flag);
10428
10429 phba->fc_stat.elsRcvRRQ++;
10430 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
10431 if (newnode)
10432 lpfc_disc_state_machine(vport, ndlp, NULL,
10433 NLP_EVT_DEVICE_RM);
10434 break;
10435 case ELS_CMD_ECHO:
10436 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10437 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
10438 did, vport->port_state, ndlp->nlp_flag);
10439
10440 phba->fc_stat.elsRcvECHO++;
10441 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
10442 if (newnode)
10443 lpfc_disc_state_machine(vport, ndlp, NULL,
10444 NLP_EVT_DEVICE_RM);
10445 break;
10446 case ELS_CMD_REC:
10447 /* receive this due to exchange closed */
10448 rjt_err = LSRJT_UNABLE_TPC;
10449 rjt_exp = LSEXP_INVALID_OX_RX;
10450 break;
10451 case ELS_CMD_FPIN:
10452 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10453 "RCV FPIN: did:x%x/ste:x%x flg:x%x",
10454 did, vport->port_state, ndlp->nlp_flag);
10455
10456 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
10457 payload_len);
10458
10459 /* There are no replies, so no rjt codes */
10460 break;
10461 case ELS_CMD_EDC:
10462 lpfc_els_rcv_edc(vport, elsiocb, ndlp);
10463 break;
10464 case ELS_CMD_RDF:
10465 phba->fc_stat.elsRcvRDF++;
10466 /* Accept RDF only from fabric controller */
10467 if (did != Fabric_Cntl_DID) {
10468 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
10469 "1115 Received RDF from invalid DID "
10470 "x%x\n", did);
10471 rjt_err = LSRJT_PROTOCOL_ERR;
10472 rjt_exp = LSEXP_NOTHING_MORE;
10473 goto lsrjt;
10474 }
10475
10476 lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
10477 break;
10478 default:
10479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
10480 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
10481 cmd, did, vport->port_state);
10482
10483 /* Unsupported ELS command, reject */
10484 rjt_err = LSRJT_CMD_UNSUPPORTED;
10485 rjt_exp = LSEXP_NOTHING_MORE;
10486
10487 /* Unknown ELS command <elsCmd> received from NPORT <did> */
10488 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10489 "0115 Unknown ELS command x%x "
10490 "received from NPORT x%x\n", cmd, did);
10491 if (newnode)
10492 lpfc_disc_state_machine(vport, ndlp, NULL,
10493 NLP_EVT_DEVICE_RM);
10494 break;
10495 }
10496
10497 lsrjt:
10498 /* check if need to LS_RJT received ELS cmd */
10499 if (rjt_err) {
10500 memset(&stat, 0, sizeof(stat));
10501 stat.un.b.lsRjtRsnCode = rjt_err;
10502 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
10503 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
10504 NULL);
10505 /* Remove the reference from above for new nodes. */
10506 if (newnode)
10507 lpfc_disc_state_machine(vport, ndlp, NULL,
10508 NLP_EVT_DEVICE_RM);
10509 }
10510
10511 /* Release the reference on this elsiocb, not the ndlp. */
10512 lpfc_nlp_put(elsiocb->ndlp);
10513 elsiocb->ndlp = NULL;
10514
10515 /* Special case. Driver received an unsolicited command that
10516 * unsupportable given the driver's current state. Reset the
10517 * link and start over.
10518 */
10519 if (init_link) {
10520 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10521 if (!mbox)
10522 return;
10523 lpfc_linkdown(phba);
10524 lpfc_init_link(phba, mbox,
10525 phba->cfg_topology,
10526 phba->cfg_link_speed);
10527 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
10528 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10529 mbox->vport = vport;
10530 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
10531 MBX_NOT_FINISHED)
10532 mempool_free(mbox, phba->mbox_mem_pool);
10533 }
10534
10535 return;
10536
10537 dropit:
10538 if (vport && !(vport->load_flag & FC_UNLOADING))
10539 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10540 "0111 Dropping received ELS cmd "
10541 "Data: x%x x%x x%x x%x\n",
10542 cmd, status, get_job_word4(phba, elsiocb), did);
10543
10544 phba->fc_stat.elsRcvDrop++;
10545 }
10546
10547 /**
10548 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
10549 * @phba: pointer to lpfc hba data structure.
10550 * @pring: pointer to a SLI ring.
10551 * @elsiocb: pointer to lpfc els iocb data structure.
10552 *
10553 * This routine is used to process an unsolicited event received from a SLI
10554 * (Service Level Interface) ring. The actual processing of the data buffer
10555 * associated with the unsolicited event is done by invoking the routine
10556 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
10557 * SLI ring on which the unsolicited event was received.
10558 **/
10559 void
lpfc_els_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * elsiocb)10560 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10561 struct lpfc_iocbq *elsiocb)
10562 {
10563 struct lpfc_vport *vport = elsiocb->vport;
10564 u32 ulp_command, status, parameter, bde_count = 0;
10565 IOCB_t *icmd;
10566 struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
10567 struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
10568 struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
10569 dma_addr_t paddr;
10570
10571 elsiocb->cmd_dmabuf = NULL;
10572 elsiocb->rsp_dmabuf = NULL;
10573 elsiocb->bpl_dmabuf = NULL;
10574
10575 wcqe_cmpl = &elsiocb->wcqe_cmpl;
10576 ulp_command = get_job_cmnd(phba, elsiocb);
10577 status = get_job_ulpstatus(phba, elsiocb);
10578 parameter = get_job_word4(phba, elsiocb);
10579 if (phba->sli_rev == LPFC_SLI_REV4)
10580 bde_count = wcqe_cmpl->word3;
10581 else
10582 bde_count = elsiocb->iocb.ulpBdeCount;
10583
10584 if (status == IOSTAT_NEED_BUFFER) {
10585 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
10586 } else if (status == IOSTAT_LOCAL_REJECT &&
10587 (parameter & IOERR_PARAM_MASK) ==
10588 IOERR_RCV_BUFFER_WAITING) {
10589 phba->fc_stat.NoRcvBuf++;
10590 /* Not enough posted buffers; Try posting more buffers */
10591 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
10592 lpfc_sli3_post_buffer(phba, pring, 0);
10593 return;
10594 }
10595
10596 if (phba->sli_rev == LPFC_SLI_REV3) {
10597 icmd = &elsiocb->iocb;
10598 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
10599 (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
10600 ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
10601 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
10602 vport = phba->pport;
10603 else
10604 vport = lpfc_find_vport_by_vpid(phba,
10605 icmd->unsli3.rcvsli3.vpi);
10606 }
10607 }
10608
10609 /* If there are no BDEs associated
10610 * with this IOCB, there is nothing to do.
10611 */
10612 if (bde_count == 0)
10613 return;
10614
10615 /* Account for SLI2 or SLI3 and later unsolicited buffering */
10616 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
10617 elsiocb->cmd_dmabuf = bdeBuf1;
10618 if (bde_count == 2)
10619 elsiocb->bpl_dmabuf = bdeBuf2;
10620 } else {
10621 icmd = &elsiocb->iocb;
10622 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
10623 icmd->un.cont64[0].addrLow);
10624 elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
10625 paddr);
10626 if (bde_count == 2) {
10627 paddr = getPaddr(icmd->un.cont64[1].addrHigh,
10628 icmd->un.cont64[1].addrLow);
10629 elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
10630 pring,
10631 paddr);
10632 }
10633 }
10634
10635 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
10636 /*
10637 * The different unsolicited event handlers would tell us
10638 * if they are done with "mp" by setting cmd_dmabuf to NULL.
10639 */
10640 if (elsiocb->cmd_dmabuf) {
10641 lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
10642 elsiocb->cmd_dmabuf = NULL;
10643 }
10644
10645 if (elsiocb->bpl_dmabuf) {
10646 lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
10647 elsiocb->bpl_dmabuf = NULL;
10648 }
10649
10650 }
10651
10652 static void
lpfc_start_fdmi(struct lpfc_vport * vport)10653 lpfc_start_fdmi(struct lpfc_vport *vport)
10654 {
10655 struct lpfc_nodelist *ndlp;
10656
10657 /* If this is the first time, allocate an ndlp and initialize
10658 * it. Otherwise, make sure the node is enabled and then do the
10659 * login.
10660 */
10661 ndlp = lpfc_findnode_did(vport, FDMI_DID);
10662 if (!ndlp) {
10663 ndlp = lpfc_nlp_init(vport, FDMI_DID);
10664 if (ndlp) {
10665 ndlp->nlp_type |= NLP_FABRIC;
10666 } else {
10667 return;
10668 }
10669 }
10670
10671 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
10672 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
10673 }
10674
10675 /**
10676 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
10677 * @phba: pointer to lpfc hba data structure.
10678 * @vport: pointer to a virtual N_Port data structure.
10679 *
10680 * This routine issues a Port Login (PLOGI) to the Name Server with
10681 * State Change Request (SCR) for a @vport. This routine will create an
10682 * ndlp for the Name Server associated to the @vport if such node does
10683 * not already exist. The PLOGI to Name Server is issued by invoking the
10684 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
10685 * (FDMI) is configured to the @vport, a FDMI node will be created and
10686 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
10687 **/
10688 void
lpfc_do_scr_ns_plogi(struct lpfc_hba * phba,struct lpfc_vport * vport)10689 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
10690 {
10691 struct lpfc_nodelist *ndlp;
10692 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10693
10694 /*
10695 * If lpfc_delay_discovery parameter is set and the clean address
10696 * bit is cleared and fc fabric parameters chenged, delay FC NPort
10697 * discovery.
10698 */
10699 spin_lock_irq(shost->host_lock);
10700 if (vport->fc_flag & FC_DISC_DELAYED) {
10701 spin_unlock_irq(shost->host_lock);
10702 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10703 "3334 Delay fc port discovery for %d secs\n",
10704 phba->fc_ratov);
10705 mod_timer(&vport->delayed_disc_tmo,
10706 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
10707 return;
10708 }
10709 spin_unlock_irq(shost->host_lock);
10710
10711 ndlp = lpfc_findnode_did(vport, NameServer_DID);
10712 if (!ndlp) {
10713 ndlp = lpfc_nlp_init(vport, NameServer_DID);
10714 if (!ndlp) {
10715 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10716 lpfc_disc_start(vport);
10717 return;
10718 }
10719 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
10720 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10721 "0251 NameServer login: no memory\n");
10722 return;
10723 }
10724 }
10725
10726 ndlp->nlp_type |= NLP_FABRIC;
10727
10728 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
10729
10730 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
10731 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
10732 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10733 "0252 Cannot issue NameServer login\n");
10734 return;
10735 }
10736
10737 if ((phba->cfg_enable_SmartSAN ||
10738 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
10739 (vport->load_flag & FC_ALLOW_FDMI))
10740 lpfc_start_fdmi(vport);
10741 }
10742
10743 /**
10744 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
10745 * @phba: pointer to lpfc hba data structure.
10746 * @pmb: pointer to the driver internal queue element for mailbox command.
10747 *
10748 * This routine is the completion callback function to register new vport
10749 * mailbox command. If the new vport mailbox command completes successfully,
10750 * the fabric registration login shall be performed on physical port (the
10751 * new vport created is actually a physical port, with VPI 0) or the port
10752 * login to Name Server for State Change Request (SCR) will be performed
10753 * on virtual port (real virtual port, with VPI greater than 0).
10754 **/
10755 static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)10756 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
10757 {
10758 struct lpfc_vport *vport = pmb->vport;
10759 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10760 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
10761 MAILBOX_t *mb = &pmb->u.mb;
10762 int rc;
10763
10764 spin_lock_irq(shost->host_lock);
10765 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
10766 spin_unlock_irq(shost->host_lock);
10767
10768 if (mb->mbxStatus) {
10769 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10770 "0915 Register VPI failed : Status: x%x"
10771 " upd bit: x%x \n", mb->mbxStatus,
10772 mb->un.varRegVpi.upd);
10773 if (phba->sli_rev == LPFC_SLI_REV4 &&
10774 mb->un.varRegVpi.upd)
10775 goto mbox_err_exit ;
10776
10777 switch (mb->mbxStatus) {
10778 case 0x11: /* unsupported feature */
10779 case 0x9603: /* max_vpi exceeded */
10780 case 0x9602: /* Link event since CLEAR_LA */
10781 /* giving up on vport registration */
10782 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
10783 spin_lock_irq(shost->host_lock);
10784 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
10785 spin_unlock_irq(shost->host_lock);
10786 lpfc_can_disctmo(vport);
10787 break;
10788 /* If reg_vpi fail with invalid VPI status, re-init VPI */
10789 case 0x20:
10790 spin_lock_irq(shost->host_lock);
10791 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
10792 spin_unlock_irq(shost->host_lock);
10793 lpfc_init_vpi(phba, pmb, vport->vpi);
10794 pmb->vport = vport;
10795 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
10796 rc = lpfc_sli_issue_mbox(phba, pmb,
10797 MBX_NOWAIT);
10798 if (rc == MBX_NOT_FINISHED) {
10799 lpfc_printf_vlog(vport, KERN_ERR,
10800 LOG_TRACE_EVENT,
10801 "2732 Failed to issue INIT_VPI"
10802 " mailbox command\n");
10803 } else {
10804 lpfc_nlp_put(ndlp);
10805 return;
10806 }
10807 fallthrough;
10808 default:
10809 /* Try to recover from this error */
10810 if (phba->sli_rev == LPFC_SLI_REV4)
10811 lpfc_sli4_unreg_all_rpis(vport);
10812 lpfc_mbx_unreg_vpi(vport);
10813 spin_lock_irq(shost->host_lock);
10814 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
10815 spin_unlock_irq(shost->host_lock);
10816 if (mb->mbxStatus == MBX_NOT_FINISHED)
10817 break;
10818 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
10819 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
10820 if (phba->sli_rev == LPFC_SLI_REV4)
10821 lpfc_issue_init_vfi(vport);
10822 else
10823 lpfc_initial_flogi(vport);
10824 } else {
10825 lpfc_initial_fdisc(vport);
10826 }
10827 break;
10828 }
10829 } else {
10830 spin_lock_irq(shost->host_lock);
10831 vport->vpi_state |= LPFC_VPI_REGISTERED;
10832 spin_unlock_irq(shost->host_lock);
10833 if (vport == phba->pport) {
10834 if (phba->sli_rev < LPFC_SLI_REV4)
10835 lpfc_issue_fabric_reglogin(vport);
10836 else {
10837 /*
10838 * If the physical port is instantiated using
10839 * FDISC, do not start vport discovery.
10840 */
10841 if (vport->port_state != LPFC_FDISC)
10842 lpfc_start_fdiscs(phba);
10843 lpfc_do_scr_ns_plogi(phba, vport);
10844 }
10845 } else {
10846 lpfc_do_scr_ns_plogi(phba, vport);
10847 }
10848 }
10849 mbox_err_exit:
10850 /* Now, we decrement the ndlp reference count held for this
10851 * callback function
10852 */
10853 lpfc_nlp_put(ndlp);
10854
10855 mempool_free(pmb, phba->mbox_mem_pool);
10856 return;
10857 }
10858
10859 /**
10860 * lpfc_register_new_vport - Register a new vport with a HBA
10861 * @phba: pointer to lpfc hba data structure.
10862 * @vport: pointer to a host virtual N_Port data structure.
10863 * @ndlp: pointer to a node-list data structure.
10864 *
10865 * This routine registers the @vport as a new virtual port with a HBA.
10866 * It is done through a registering vpi mailbox command.
10867 **/
10868 void
lpfc_register_new_vport(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)10869 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
10870 struct lpfc_nodelist *ndlp)
10871 {
10872 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
10873 LPFC_MBOXQ_t *mbox;
10874
10875 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10876 if (mbox) {
10877 lpfc_reg_vpi(vport, mbox);
10878 mbox->vport = vport;
10879 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
10880 if (!mbox->ctx_ndlp) {
10881 mempool_free(mbox, phba->mbox_mem_pool);
10882 goto mbox_err_exit;
10883 }
10884
10885 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
10886 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
10887 == MBX_NOT_FINISHED) {
10888 /* mailbox command not success, decrement ndlp
10889 * reference count for this command
10890 */
10891 lpfc_nlp_put(ndlp);
10892 mempool_free(mbox, phba->mbox_mem_pool);
10893
10894 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10895 "0253 Register VPI: Can't send mbox\n");
10896 goto mbox_err_exit;
10897 }
10898 } else {
10899 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
10900 "0254 Register VPI: no memory\n");
10901 goto mbox_err_exit;
10902 }
10903 return;
10904
10905 mbox_err_exit:
10906 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
10907 spin_lock_irq(shost->host_lock);
10908 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
10909 spin_unlock_irq(shost->host_lock);
10910 return;
10911 }
10912
10913 /**
10914 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
10915 * @phba: pointer to lpfc hba data structure.
10916 *
10917 * This routine cancels the retry delay timers to all the vports.
10918 **/
10919 void
lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba * phba)10920 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
10921 {
10922 struct lpfc_vport **vports;
10923 struct lpfc_nodelist *ndlp;
10924 uint32_t link_state;
10925 int i;
10926
10927 /* Treat this failure as linkdown for all vports */
10928 link_state = phba->link_state;
10929 lpfc_linkdown(phba);
10930 phba->link_state = link_state;
10931
10932 vports = lpfc_create_vport_work_array(phba);
10933
10934 if (vports) {
10935 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10936 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
10937 if (ndlp)
10938 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
10939 lpfc_els_flush_cmd(vports[i]);
10940 }
10941 lpfc_destroy_vport_work_array(phba, vports);
10942 }
10943 }
10944
10945 /**
10946 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
10947 * @phba: pointer to lpfc hba data structure.
10948 *
10949 * This routine abort all pending discovery commands and
10950 * start a timer to retry FLOGI for the physical port
10951 * discovery.
10952 **/
10953 void
lpfc_retry_pport_discovery(struct lpfc_hba * phba)10954 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
10955 {
10956 struct lpfc_nodelist *ndlp;
10957
10958 /* Cancel the all vports retry delay retry timers */
10959 lpfc_cancel_all_vport_retry_delay_timer(phba);
10960
10961 /* If fabric require FLOGI, then re-instantiate physical login */
10962 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
10963 if (!ndlp)
10964 return;
10965
10966 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
10967 spin_lock_irq(&ndlp->lock);
10968 ndlp->nlp_flag |= NLP_DELAY_TMO;
10969 spin_unlock_irq(&ndlp->lock);
10970 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
10971 phba->pport->port_state = LPFC_FLOGI;
10972 return;
10973 }
10974
10975 /**
10976 * lpfc_fabric_login_reqd - Check if FLOGI required.
10977 * @phba: pointer to lpfc hba data structure.
10978 * @cmdiocb: pointer to FDISC command iocb.
10979 * @rspiocb: pointer to FDISC response iocb.
10980 *
10981 * This routine checks if a FLOGI is reguired for FDISC
10982 * to succeed.
10983 **/
10984 static int
lpfc_fabric_login_reqd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)10985 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
10986 struct lpfc_iocbq *cmdiocb,
10987 struct lpfc_iocbq *rspiocb)
10988 {
10989 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
10990 u32 ulp_word4 = get_job_word4(phba, rspiocb);
10991
10992 if (ulp_status != IOSTAT_FABRIC_RJT ||
10993 ulp_word4 != RJT_LOGIN_REQUIRED)
10994 return 0;
10995 else
10996 return 1;
10997 }
10998
10999 /**
11000 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
11001 * @phba: pointer to lpfc hba data structure.
11002 * @cmdiocb: pointer to lpfc command iocb data structure.
11003 * @rspiocb: pointer to lpfc response iocb data structure.
11004 *
11005 * This routine is the completion callback function to a Fabric Discover
11006 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
11007 * single threaded, each FDISC completion callback function will reset
11008 * the discovery timer for all vports such that the timers will not get
11009 * unnecessary timeout. The function checks the FDISC IOCB status. If error
11010 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
11011 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
11012 * assigned to the vport has been changed with the completion of the FDISC
11013 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
11014 * are unregistered from the HBA, and then the lpfc_register_new_vport()
11015 * routine is invoked to register new vport with the HBA. Otherwise, the
11016 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
11017 * Server for State Change Request (SCR).
11018 **/
11019 static void
lpfc_cmpl_els_fdisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11020 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11021 struct lpfc_iocbq *rspiocb)
11022 {
11023 struct lpfc_vport *vport = cmdiocb->vport;
11024 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11025 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
11026 struct lpfc_nodelist *np;
11027 struct lpfc_nodelist *next_np;
11028 struct lpfc_iocbq *piocb;
11029 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
11030 struct serv_parm *sp;
11031 uint8_t fabric_param_changed;
11032 u32 ulp_status, ulp_word4;
11033
11034 ulp_status = get_job_ulpstatus(phba, rspiocb);
11035 ulp_word4 = get_job_word4(phba, rspiocb);
11036
11037 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11038 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
11039 ulp_status, ulp_word4,
11040 vport->fc_prevDID);
11041 /* Since all FDISCs are being single threaded, we
11042 * must reset the discovery timer for ALL vports
11043 * waiting to send FDISC when one completes.
11044 */
11045 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
11046 lpfc_set_disctmo(piocb->vport);
11047 }
11048
11049 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11050 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
11051 ulp_status, ulp_word4, vport->fc_prevDID);
11052
11053 if (ulp_status) {
11054
11055 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
11056 lpfc_retry_pport_discovery(phba);
11057 goto out;
11058 }
11059
11060 /* Check for retry */
11061 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
11062 goto out;
11063 /* FDISC failed */
11064 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11065 "0126 FDISC failed. (x%x/x%x)\n",
11066 ulp_status, ulp_word4);
11067 goto fdisc_failed;
11068 }
11069
11070 lpfc_check_nlp_post_devloss(vport, ndlp);
11071
11072 spin_lock_irq(shost->host_lock);
11073 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
11074 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
11075 vport->fc_flag |= FC_FABRIC;
11076 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
11077 vport->fc_flag |= FC_PUBLIC_LOOP;
11078 spin_unlock_irq(shost->host_lock);
11079
11080 vport->fc_myDID = ulp_word4 & Mask_DID;
11081 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
11082 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
11083 if (!prsp)
11084 goto out;
11085 sp = prsp->virt + sizeof(uint32_t);
11086 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
11087 memcpy(&vport->fabric_portname, &sp->portName,
11088 sizeof(struct lpfc_name));
11089 memcpy(&vport->fabric_nodename, &sp->nodeName,
11090 sizeof(struct lpfc_name));
11091 if (fabric_param_changed &&
11092 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
11093 /* If our NportID changed, we need to ensure all
11094 * remaining NPORTs get unreg_login'ed so we can
11095 * issue unreg_vpi.
11096 */
11097 list_for_each_entry_safe(np, next_np,
11098 &vport->fc_nodes, nlp_listp) {
11099 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
11100 !(np->nlp_flag & NLP_NPR_ADISC))
11101 continue;
11102 spin_lock_irq(&ndlp->lock);
11103 np->nlp_flag &= ~NLP_NPR_ADISC;
11104 spin_unlock_irq(&ndlp->lock);
11105 lpfc_unreg_rpi(vport, np);
11106 }
11107 lpfc_cleanup_pending_mbox(vport);
11108
11109 if (phba->sli_rev == LPFC_SLI_REV4)
11110 lpfc_sli4_unreg_all_rpis(vport);
11111
11112 lpfc_mbx_unreg_vpi(vport);
11113 spin_lock_irq(shost->host_lock);
11114 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
11115 if (phba->sli_rev == LPFC_SLI_REV4)
11116 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
11117 else
11118 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
11119 spin_unlock_irq(shost->host_lock);
11120 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
11121 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
11122 /*
11123 * Driver needs to re-reg VPI in order for f/w
11124 * to update the MAC address.
11125 */
11126 lpfc_register_new_vport(phba, vport, ndlp);
11127 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11128 goto out;
11129 }
11130
11131 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
11132 lpfc_issue_init_vpi(vport);
11133 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
11134 lpfc_register_new_vport(phba, vport, ndlp);
11135 else
11136 lpfc_do_scr_ns_plogi(phba, vport);
11137
11138 /* The FDISC completed successfully. Move the fabric ndlp to
11139 * UNMAPPED state and register with the transport.
11140 */
11141 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
11142 goto out;
11143
11144 fdisc_failed:
11145 if (vport->fc_vport &&
11146 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
11147 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11148 /* Cancel discovery timer */
11149 lpfc_can_disctmo(vport);
11150 out:
11151 lpfc_els_free_iocb(phba, cmdiocb);
11152 lpfc_nlp_put(ndlp);
11153 }
11154
11155 /**
11156 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
11157 * @vport: pointer to a virtual N_Port data structure.
11158 * @ndlp: pointer to a node-list data structure.
11159 * @retry: number of retries to the command IOCB.
11160 *
11161 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
11162 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
11163 * routine to issue the IOCB, which makes sure only one outstanding fabric
11164 * IOCB will be sent off HBA at any given time.
11165 *
11166 * Note that the ndlp reference count will be incremented by 1 for holding the
11167 * ndlp and the reference to ndlp will be stored into the ndlp field of
11168 * the IOCB for the completion callback function to the FDISC ELS command.
11169 *
11170 * Return code
11171 * 0 - Successfully issued fdisc iocb command
11172 * 1 - Failed to issue fdisc iocb command
11173 **/
11174 static int
lpfc_issue_els_fdisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)11175 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
11176 uint8_t retry)
11177 {
11178 struct lpfc_hba *phba = vport->phba;
11179 IOCB_t *icmd;
11180 union lpfc_wqe128 *wqe = NULL;
11181 struct lpfc_iocbq *elsiocb;
11182 struct serv_parm *sp;
11183 uint8_t *pcmd;
11184 uint16_t cmdsize;
11185 int did = ndlp->nlp_DID;
11186 int rc;
11187
11188 vport->port_state = LPFC_FDISC;
11189 vport->fc_myDID = 0;
11190 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
11191 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
11192 ELS_CMD_FDISC);
11193 if (!elsiocb) {
11194 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11195 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11196 "0255 Issue FDISC: no IOCB\n");
11197 return 1;
11198 }
11199
11200 if (phba->sli_rev == LPFC_SLI_REV4) {
11201 wqe = &elsiocb->wqe;
11202 bf_set(els_req64_sid, &wqe->els_req, 0);
11203 bf_set(els_req64_sp, &wqe->els_req, 1);
11204 } else {
11205 icmd = &elsiocb->iocb;
11206 icmd->un.elsreq64.myID = 0;
11207 icmd->un.elsreq64.fl = 1;
11208 icmd->ulpCt_h = 1;
11209 icmd->ulpCt_l = 0;
11210 }
11211
11212 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11213 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
11214 pcmd += sizeof(uint32_t); /* CSP Word 1 */
11215 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
11216 sp = (struct serv_parm *) pcmd;
11217 /* Setup CSPs accordingly for Fabric */
11218 sp->cmn.e_d_tov = 0;
11219 sp->cmn.w2.r_a_tov = 0;
11220 sp->cmn.virtual_fabric_support = 0;
11221 sp->cls1.classValid = 0;
11222 sp->cls2.seqDelivery = 1;
11223 sp->cls3.seqDelivery = 1;
11224
11225 pcmd += sizeof(uint32_t); /* CSP Word 2 */
11226 pcmd += sizeof(uint32_t); /* CSP Word 3 */
11227 pcmd += sizeof(uint32_t); /* CSP Word 4 */
11228 pcmd += sizeof(uint32_t); /* Port Name */
11229 memcpy(pcmd, &vport->fc_portname, 8);
11230 pcmd += sizeof(uint32_t); /* Node Name */
11231 pcmd += sizeof(uint32_t); /* Node Name */
11232 memcpy(pcmd, &vport->fc_nodename, 8);
11233 sp->cmn.valid_vendor_ver_level = 0;
11234 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
11235 lpfc_set_disctmo(vport);
11236
11237 phba->fc_stat.elsXmitFDISC++;
11238 elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
11239
11240 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11241 "Issue FDISC: did:x%x",
11242 did, 0, 0);
11243
11244 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11245 if (!elsiocb->ndlp)
11246 goto err_out;
11247
11248 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
11249 if (rc == IOCB_ERROR) {
11250 lpfc_nlp_put(ndlp);
11251 goto err_out;
11252 }
11253
11254 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
11255 return 0;
11256
11257 err_out:
11258 lpfc_els_free_iocb(phba, elsiocb);
11259 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
11260 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
11261 "0256 Issue FDISC: Cannot send IOCB\n");
11262 return 1;
11263 }
11264
11265 /**
11266 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
11267 * @phba: pointer to lpfc hba data structure.
11268 * @cmdiocb: pointer to lpfc command iocb data structure.
11269 * @rspiocb: pointer to lpfc response iocb data structure.
11270 *
11271 * This routine is the completion callback function to the issuing of a LOGO
11272 * ELS command off a vport. It frees the command IOCB and then decrement the
11273 * reference count held on ndlp for this completion function, indicating that
11274 * the reference to the ndlp is no long needed. Note that the
11275 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
11276 * callback function and an additional explicit ndlp reference decrementation
11277 * will trigger the actual release of the ndlp.
11278 **/
11279 static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11280 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11281 struct lpfc_iocbq *rspiocb)
11282 {
11283 struct lpfc_vport *vport = cmdiocb->vport;
11284 IOCB_t *irsp;
11285 struct lpfc_nodelist *ndlp;
11286 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11287 u32 ulp_status, ulp_word4, did, tmo;
11288
11289 ndlp = cmdiocb->ndlp;
11290
11291 ulp_status = get_job_ulpstatus(phba, rspiocb);
11292 ulp_word4 = get_job_word4(phba, rspiocb);
11293
11294 if (phba->sli_rev == LPFC_SLI_REV4) {
11295 did = get_job_els_rsp64_did(phba, cmdiocb);
11296 tmo = get_wqe_tmo(cmdiocb);
11297 } else {
11298 irsp = &rspiocb->iocb;
11299 did = get_job_els_rsp64_did(phba, rspiocb);
11300 tmo = irsp->ulpTimeout;
11301 }
11302
11303 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11304 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
11305 ulp_status, ulp_word4, did);
11306
11307 /* NPIV LOGO completes to NPort <nlp_DID> */
11308 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
11309 "2928 NPIV LOGO completes to NPort x%x "
11310 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
11311 ndlp->nlp_DID, ulp_status, ulp_word4,
11312 tmo, vport->num_disc_nodes,
11313 kref_read(&ndlp->kref), ndlp->nlp_flag,
11314 ndlp->fc4_xpt_flags);
11315
11316 if (ulp_status == IOSTAT_SUCCESS) {
11317 spin_lock_irq(shost->host_lock);
11318 vport->fc_flag &= ~FC_NDISC_ACTIVE;
11319 vport->fc_flag &= ~FC_FABRIC;
11320 spin_unlock_irq(shost->host_lock);
11321 lpfc_can_disctmo(vport);
11322 }
11323
11324 if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
11325 /* Wake up lpfc_vport_delete if waiting...*/
11326 if (ndlp->logo_waitq)
11327 wake_up(ndlp->logo_waitq);
11328 spin_lock_irq(&ndlp->lock);
11329 ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
11330 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
11331 spin_unlock_irq(&ndlp->lock);
11332 }
11333
11334 /* Safe to release resources now. */
11335 lpfc_els_free_iocb(phba, cmdiocb);
11336 lpfc_nlp_put(ndlp);
11337 }
11338
11339 /**
11340 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
11341 * @vport: pointer to a virtual N_Port data structure.
11342 * @ndlp: pointer to a node-list data structure.
11343 *
11344 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
11345 *
11346 * Note that the ndlp reference count will be incremented by 1 for holding the
11347 * ndlp and the reference to ndlp will be stored into the ndlp field of
11348 * the IOCB for the completion callback function to the LOGO ELS command.
11349 *
11350 * Return codes
11351 * 0 - Successfully issued logo off the @vport
11352 * 1 - Failed to issue logo off the @vport
11353 **/
11354 int
lpfc_issue_els_npiv_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)11355 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
11356 {
11357 int rc = 0;
11358 struct lpfc_hba *phba = vport->phba;
11359 struct lpfc_iocbq *elsiocb;
11360 uint8_t *pcmd;
11361 uint16_t cmdsize;
11362
11363 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
11364 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
11365 ELS_CMD_LOGO);
11366 if (!elsiocb)
11367 return 1;
11368
11369 pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
11370 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
11371 pcmd += sizeof(uint32_t);
11372
11373 /* Fill in LOGO payload */
11374 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
11375 pcmd += sizeof(uint32_t);
11376 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
11377
11378 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
11379 "Issue LOGO npiv did:x%x flg:x%x",
11380 ndlp->nlp_DID, ndlp->nlp_flag, 0);
11381
11382 elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
11383 spin_lock_irq(&ndlp->lock);
11384 ndlp->nlp_flag |= NLP_LOGO_SND;
11385 spin_unlock_irq(&ndlp->lock);
11386 elsiocb->ndlp = lpfc_nlp_get(ndlp);
11387 if (!elsiocb->ndlp) {
11388 lpfc_els_free_iocb(phba, elsiocb);
11389 goto err;
11390 }
11391
11392 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
11393 if (rc == IOCB_ERROR) {
11394 lpfc_els_free_iocb(phba, elsiocb);
11395 lpfc_nlp_put(ndlp);
11396 goto err;
11397 }
11398 return 0;
11399
11400 err:
11401 spin_lock_irq(&ndlp->lock);
11402 ndlp->nlp_flag &= ~NLP_LOGO_SND;
11403 spin_unlock_irq(&ndlp->lock);
11404 return 1;
11405 }
11406
11407 /**
11408 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
11409 * @t: timer context used to obtain the lpfc hba.
11410 *
11411 * This routine is invoked by the fabric iocb block timer after
11412 * timeout. It posts the fabric iocb block timeout event by setting the
11413 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
11414 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
11415 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
11416 * posted event WORKER_FABRIC_BLOCK_TMO.
11417 **/
11418 void
lpfc_fabric_block_timeout(struct timer_list * t)11419 lpfc_fabric_block_timeout(struct timer_list *t)
11420 {
11421 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
11422 unsigned long iflags;
11423 uint32_t tmo_posted;
11424
11425 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11426 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
11427 if (!tmo_posted)
11428 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
11429 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11430
11431 if (!tmo_posted)
11432 lpfc_worker_wake_up(phba);
11433 return;
11434 }
11435
11436 /**
11437 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
11438 * @phba: pointer to lpfc hba data structure.
11439 *
11440 * This routine issues one fabric iocb from the driver internal list to
11441 * the HBA. It first checks whether it's ready to issue one fabric iocb to
11442 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
11443 * remove one pending fabric iocb from the driver internal list and invokes
11444 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
11445 **/
11446 static void
lpfc_resume_fabric_iocbs(struct lpfc_hba * phba)11447 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
11448 {
11449 struct lpfc_iocbq *iocb;
11450 unsigned long iflags;
11451 int ret;
11452
11453 repeat:
11454 iocb = NULL;
11455 spin_lock_irqsave(&phba->hbalock, iflags);
11456 /* Post any pending iocb to the SLI layer */
11457 if (atomic_read(&phba->fabric_iocb_count) == 0) {
11458 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
11459 list);
11460 if (iocb)
11461 /* Increment fabric iocb count to hold the position */
11462 atomic_inc(&phba->fabric_iocb_count);
11463 }
11464 spin_unlock_irqrestore(&phba->hbalock, iflags);
11465 if (iocb) {
11466 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11467 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11468 iocb->cmd_flag |= LPFC_IO_FABRIC;
11469
11470 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11471 "Fabric sched1: ste:x%x",
11472 iocb->vport->port_state, 0, 0);
11473
11474 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11475
11476 if (ret == IOCB_ERROR) {
11477 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11478 iocb->fabric_cmd_cmpl = NULL;
11479 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11480 set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
11481 iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
11482 iocb->cmd_cmpl(phba, iocb, iocb);
11483
11484 atomic_dec(&phba->fabric_iocb_count);
11485 goto repeat;
11486 }
11487 }
11488 }
11489
11490 /**
11491 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
11492 * @phba: pointer to lpfc hba data structure.
11493 *
11494 * This routine unblocks the issuing fabric iocb command. The function
11495 * will clear the fabric iocb block bit and then invoke the routine
11496 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
11497 * from the driver internal fabric iocb list.
11498 **/
11499 void
lpfc_unblock_fabric_iocbs(struct lpfc_hba * phba)11500 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
11501 {
11502 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11503
11504 lpfc_resume_fabric_iocbs(phba);
11505 return;
11506 }
11507
11508 /**
11509 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
11510 * @phba: pointer to lpfc hba data structure.
11511 *
11512 * This routine blocks the issuing fabric iocb for a specified amount of
11513 * time (currently 100 ms). This is done by set the fabric iocb block bit
11514 * and set up a timeout timer for 100ms. When the block bit is set, no more
11515 * fabric iocb will be issued out of the HBA.
11516 **/
11517 static void
lpfc_block_fabric_iocbs(struct lpfc_hba * phba)11518 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
11519 {
11520 int blocked;
11521
11522 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11523 /* Start a timer to unblock fabric iocbs after 100ms */
11524 if (!blocked)
11525 mod_timer(&phba->fabric_block_timer,
11526 jiffies + msecs_to_jiffies(100));
11527
11528 return;
11529 }
11530
11531 /**
11532 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
11533 * @phba: pointer to lpfc hba data structure.
11534 * @cmdiocb: pointer to lpfc command iocb data structure.
11535 * @rspiocb: pointer to lpfc response iocb data structure.
11536 *
11537 * This routine is the callback function that is put to the fabric iocb's
11538 * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
11539 * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
11540 * function first restores and invokes the original iocb's callback function
11541 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
11542 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
11543 **/
11544 static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11545 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11546 struct lpfc_iocbq *rspiocb)
11547 {
11548 struct ls_rjt stat;
11549 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11550 u32 ulp_word4 = get_job_word4(phba, rspiocb);
11551
11552 WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
11553
11554 switch (ulp_status) {
11555 case IOSTAT_NPORT_RJT:
11556 case IOSTAT_FABRIC_RJT:
11557 if (ulp_word4 & RJT_UNAVAIL_TEMP)
11558 lpfc_block_fabric_iocbs(phba);
11559 break;
11560
11561 case IOSTAT_NPORT_BSY:
11562 case IOSTAT_FABRIC_BSY:
11563 lpfc_block_fabric_iocbs(phba);
11564 break;
11565
11566 case IOSTAT_LS_RJT:
11567 stat.un.ls_rjt_error_be =
11568 cpu_to_be32(ulp_word4);
11569 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
11570 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
11571 lpfc_block_fabric_iocbs(phba);
11572 break;
11573 }
11574
11575 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
11576
11577 cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
11578 cmdiocb->fabric_cmd_cmpl = NULL;
11579 cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
11580 cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
11581
11582 atomic_dec(&phba->fabric_iocb_count);
11583 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
11584 /* Post any pending iocbs to HBA */
11585 lpfc_resume_fabric_iocbs(phba);
11586 }
11587 }
11588
11589 /**
11590 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
11591 * @phba: pointer to lpfc hba data structure.
11592 * @iocb: pointer to lpfc command iocb data structure.
11593 *
11594 * This routine is used as the top-level API for issuing a fabric iocb command
11595 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
11596 * function makes sure that only one fabric bound iocb will be outstanding at
11597 * any given time. As such, this function will first check to see whether there
11598 * is already an outstanding fabric iocb on the wire. If so, it will put the
11599 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
11600 * issued later. Otherwise, it will issue the iocb on the wire and update the
11601 * fabric iocb count it indicate that there is one fabric iocb on the wire.
11602 *
11603 * Note, this implementation has a potential sending out fabric IOCBs out of
11604 * order. The problem is caused by the construction of the "ready" boolen does
11605 * not include the condition that the internal fabric IOCB list is empty. As
11606 * such, it is possible a fabric IOCB issued by this routine might be "jump"
11607 * ahead of the fabric IOCBs in the internal list.
11608 *
11609 * Return code
11610 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
11611 * IOCB_ERROR - failed to issue fabric iocb
11612 **/
11613 static int
lpfc_issue_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * iocb)11614 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
11615 {
11616 unsigned long iflags;
11617 int ready;
11618 int ret;
11619
11620 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
11621
11622 spin_lock_irqsave(&phba->hbalock, iflags);
11623 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
11624 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
11625
11626 if (ready)
11627 /* Increment fabric iocb count to hold the position */
11628 atomic_inc(&phba->fabric_iocb_count);
11629 spin_unlock_irqrestore(&phba->hbalock, iflags);
11630 if (ready) {
11631 iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
11632 iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
11633 iocb->cmd_flag |= LPFC_IO_FABRIC;
11634
11635 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
11636 "Fabric sched2: ste:x%x",
11637 iocb->vport->port_state, 0, 0);
11638
11639 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
11640
11641 if (ret == IOCB_ERROR) {
11642 iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
11643 iocb->fabric_cmd_cmpl = NULL;
11644 iocb->cmd_flag &= ~LPFC_IO_FABRIC;
11645 atomic_dec(&phba->fabric_iocb_count);
11646 }
11647 } else {
11648 spin_lock_irqsave(&phba->hbalock, iflags);
11649 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
11650 spin_unlock_irqrestore(&phba->hbalock, iflags);
11651 ret = IOCB_SUCCESS;
11652 }
11653 return ret;
11654 }
11655
11656 /**
11657 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
11658 * @vport: pointer to a virtual N_Port data structure.
11659 *
11660 * This routine aborts all the IOCBs associated with a @vport from the
11661 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11662 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11663 * list, removes each IOCB associated with the @vport off the list, set the
11664 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11665 * associated with the IOCB.
11666 **/
lpfc_fabric_abort_vport(struct lpfc_vport * vport)11667 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
11668 {
11669 LIST_HEAD(completions);
11670 struct lpfc_hba *phba = vport->phba;
11671 struct lpfc_iocbq *tmp_iocb, *piocb;
11672
11673 spin_lock_irq(&phba->hbalock);
11674 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11675 list) {
11676
11677 if (piocb->vport != vport)
11678 continue;
11679
11680 list_move_tail(&piocb->list, &completions);
11681 }
11682 spin_unlock_irq(&phba->hbalock);
11683
11684 /* Cancel all the IOCBs from the completions list */
11685 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11686 IOERR_SLI_ABORTED);
11687 }
11688
11689 /**
11690 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
11691 * @ndlp: pointer to a node-list data structure.
11692 *
11693 * This routine aborts all the IOCBs associated with an @ndlp from the
11694 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
11695 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
11696 * list, removes each IOCB associated with the @ndlp off the list, set the
11697 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
11698 * associated with the IOCB.
11699 **/
lpfc_fabric_abort_nport(struct lpfc_nodelist * ndlp)11700 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
11701 {
11702 LIST_HEAD(completions);
11703 struct lpfc_hba *phba = ndlp->phba;
11704 struct lpfc_iocbq *tmp_iocb, *piocb;
11705 struct lpfc_sli_ring *pring;
11706
11707 pring = lpfc_phba_elsring(phba);
11708
11709 if (unlikely(!pring))
11710 return;
11711
11712 spin_lock_irq(&phba->hbalock);
11713 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
11714 list) {
11715 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
11716
11717 list_move_tail(&piocb->list, &completions);
11718 }
11719 }
11720 spin_unlock_irq(&phba->hbalock);
11721
11722 /* Cancel all the IOCBs from the completions list */
11723 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11724 IOERR_SLI_ABORTED);
11725 }
11726
11727 /**
11728 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
11729 * @phba: pointer to lpfc hba data structure.
11730 *
11731 * This routine aborts all the IOCBs currently on the driver internal
11732 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
11733 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
11734 * list, removes IOCBs off the list, set the status field to
11735 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
11736 * the IOCB.
11737 **/
lpfc_fabric_abort_hba(struct lpfc_hba * phba)11738 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
11739 {
11740 LIST_HEAD(completions);
11741
11742 spin_lock_irq(&phba->hbalock);
11743 list_splice_init(&phba->fabric_iocb_list, &completions);
11744 spin_unlock_irq(&phba->hbalock);
11745
11746 /* Cancel all the IOCBs from the completions list */
11747 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11748 IOERR_SLI_ABORTED);
11749 }
11750
11751 /**
11752 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
11753 * @vport: pointer to lpfc vport data structure.
11754 *
11755 * This routine is invoked by the vport cleanup for deletions and the cleanup
11756 * for an ndlp on removal.
11757 **/
11758 void
lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport * vport)11759 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
11760 {
11761 struct lpfc_hba *phba = vport->phba;
11762 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
11763 struct lpfc_nodelist *ndlp = NULL;
11764 unsigned long iflag = 0;
11765
11766 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
11767 list_for_each_entry_safe(sglq_entry, sglq_next,
11768 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
11769 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
11770 lpfc_nlp_put(sglq_entry->ndlp);
11771 ndlp = sglq_entry->ndlp;
11772 sglq_entry->ndlp = NULL;
11773
11774 /* If the xri on the abts_els_sgl list is for the Fport
11775 * node and the vport is unloading, the xri aborted wcqe
11776 * likely isn't coming back. Just release the sgl.
11777 */
11778 if ((vport->load_flag & FC_UNLOADING) &&
11779 ndlp->nlp_DID == Fabric_DID) {
11780 list_del(&sglq_entry->list);
11781 sglq_entry->state = SGL_FREED;
11782 list_add_tail(&sglq_entry->list,
11783 &phba->sli4_hba.lpfc_els_sgl_list);
11784 }
11785 }
11786 }
11787 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
11788 return;
11789 }
11790
11791 /**
11792 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
11793 * @phba: pointer to lpfc hba data structure.
11794 * @axri: pointer to the els xri abort wcqe structure.
11795 *
11796 * This routine is invoked by the worker thread to process a SLI4 slow-path
11797 * ELS aborted xri.
11798 **/
11799 void
lpfc_sli4_els_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)11800 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
11801 struct sli4_wcqe_xri_aborted *axri)
11802 {
11803 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
11804 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
11805 uint16_t lxri = 0;
11806
11807 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
11808 unsigned long iflag = 0;
11809 struct lpfc_nodelist *ndlp;
11810 struct lpfc_sli_ring *pring;
11811
11812 pring = lpfc_phba_elsring(phba);
11813
11814 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
11815 list_for_each_entry_safe(sglq_entry, sglq_next,
11816 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
11817 if (sglq_entry->sli4_xritag == xri) {
11818 list_del(&sglq_entry->list);
11819 ndlp = sglq_entry->ndlp;
11820 sglq_entry->ndlp = NULL;
11821 list_add_tail(&sglq_entry->list,
11822 &phba->sli4_hba.lpfc_els_sgl_list);
11823 sglq_entry->state = SGL_FREED;
11824 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
11825 iflag);
11826
11827 if (ndlp) {
11828 lpfc_set_rrq_active(phba, ndlp,
11829 sglq_entry->sli4_lxritag,
11830 rxid, 1);
11831 lpfc_nlp_put(ndlp);
11832 }
11833
11834 /* Check if TXQ queue needs to be serviced */
11835 if (pring && !list_empty(&pring->txq))
11836 lpfc_worker_wake_up(phba);
11837 return;
11838 }
11839 }
11840 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
11841 lxri = lpfc_sli4_xri_inrange(phba, xri);
11842 if (lxri == NO_XRI)
11843 return;
11844
11845 spin_lock_irqsave(&phba->hbalock, iflag);
11846 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
11847 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
11848 spin_unlock_irqrestore(&phba->hbalock, iflag);
11849 return;
11850 }
11851 sglq_entry->state = SGL_XRI_ABORTED;
11852 spin_unlock_irqrestore(&phba->hbalock, iflag);
11853 return;
11854 }
11855
11856 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
11857 * @vport: pointer to virtual port object.
11858 * @ndlp: nodelist pointer for the impacted node.
11859 *
11860 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
11861 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
11862 * the driver is required to send a LOGO to the remote node before it
11863 * attempts to recover its login to the remote node.
11864 */
11865 void
lpfc_sli_abts_recover_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)11866 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
11867 struct lpfc_nodelist *ndlp)
11868 {
11869 struct Scsi_Host *shost;
11870 struct lpfc_hba *phba;
11871 unsigned long flags = 0;
11872
11873 shost = lpfc_shost_from_vport(vport);
11874 phba = vport->phba;
11875 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
11876 lpfc_printf_log(phba, KERN_INFO,
11877 LOG_SLI, "3093 No rport recovery needed. "
11878 "rport in state 0x%x\n", ndlp->nlp_state);
11879 return;
11880 }
11881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11882 "3094 Start rport recovery on shost id 0x%x "
11883 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
11884 "flags 0x%x\n",
11885 shost->host_no, ndlp->nlp_DID,
11886 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
11887 ndlp->nlp_flag);
11888 /*
11889 * The rport is not responding. Remove the FCP-2 flag to prevent
11890 * an ADISC in the follow-up recovery code.
11891 */
11892 spin_lock_irqsave(&ndlp->lock, flags);
11893 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
11894 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
11895 spin_unlock_irqrestore(&ndlp->lock, flags);
11896 lpfc_unreg_rpi(vport, ndlp);
11897 }
11898
lpfc_init_cs_ctl_bitmap(struct lpfc_vport * vport)11899 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
11900 {
11901 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
11902 }
11903
11904 static void
lpfc_vmid_set_cs_ctl_range(struct lpfc_vport * vport,u32 min,u32 max)11905 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
11906 {
11907 u32 i;
11908
11909 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
11910 return;
11911
11912 for (i = min; i <= max; i++)
11913 set_bit(i, vport->vmid_priority_range);
11914 }
11915
lpfc_vmid_put_cs_ctl(struct lpfc_vport * vport,u32 ctcl_vmid)11916 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
11917 {
11918 set_bit(ctcl_vmid, vport->vmid_priority_range);
11919 }
11920
lpfc_vmid_get_cs_ctl(struct lpfc_vport * vport)11921 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
11922 {
11923 u32 i;
11924
11925 i = find_first_bit(vport->vmid_priority_range,
11926 LPFC_VMID_MAX_PRIORITY_RANGE);
11927
11928 if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
11929 return 0;
11930
11931 clear_bit(i, vport->vmid_priority_range);
11932 return i;
11933 }
11934
11935 #define MAX_PRIORITY_DESC 255
11936
11937 static void
lpfc_cmpl_els_qfpa(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11938 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11939 struct lpfc_iocbq *rspiocb)
11940 {
11941 struct lpfc_vport *vport = cmdiocb->vport;
11942 struct priority_range_desc *desc;
11943 struct lpfc_dmabuf *prsp = NULL;
11944 struct lpfc_vmid_priority_range *vmid_range = NULL;
11945 u32 *data;
11946 struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
11947 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11948 u32 ulp_word4 = get_job_word4(phba, rspiocb);
11949 u8 *pcmd, max_desc;
11950 u32 len, i;
11951 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
11952
11953 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
11954 if (!prsp)
11955 goto out;
11956
11957 pcmd = prsp->virt;
11958 data = (u32 *)pcmd;
11959 if (data[0] == ELS_CMD_LS_RJT) {
11960 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
11961 "3277 QFPA LS_RJT x%x x%x\n",
11962 data[0], data[1]);
11963 goto out;
11964 }
11965 if (ulp_status) {
11966 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
11967 "6529 QFPA failed with status x%x x%x\n",
11968 ulp_status, ulp_word4);
11969 goto out;
11970 }
11971
11972 if (!vport->qfpa_res) {
11973 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
11974 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
11975 GFP_KERNEL);
11976 if (!vport->qfpa_res)
11977 goto out;
11978 }
11979
11980 len = *((u32 *)(pcmd + 4));
11981 len = be32_to_cpu(len);
11982 memcpy(vport->qfpa_res, pcmd, len + 8);
11983 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
11984
11985 desc = (struct priority_range_desc *)(pcmd + 8);
11986 vmid_range = vport->vmid_priority.vmid_range;
11987 if (!vmid_range) {
11988 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
11989 GFP_KERNEL);
11990 if (!vmid_range) {
11991 kfree(vport->qfpa_res);
11992 goto out;
11993 }
11994 vport->vmid_priority.vmid_range = vmid_range;
11995 }
11996 vport->vmid_priority.num_descriptors = len;
11997
11998 for (i = 0; i < len; i++, vmid_range++, desc++) {
11999 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12000 "6539 vmid values low=%d, high=%d, qos=%d, "
12001 "local ve id=%d\n", desc->lo_range,
12002 desc->hi_range, desc->qos_priority,
12003 desc->local_ve_id);
12004
12005 vmid_range->low = desc->lo_range << 1;
12006 if (desc->local_ve_id == QFPA_ODD_ONLY)
12007 vmid_range->low++;
12008 if (desc->qos_priority)
12009 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
12010 vmid_range->qos = desc->qos_priority;
12011
12012 vmid_range->high = desc->hi_range << 1;
12013 if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
12014 (desc->local_ve_id == QFPA_EVEN_ODD))
12015 vmid_range->high++;
12016 }
12017 lpfc_init_cs_ctl_bitmap(vport);
12018 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
12019 lpfc_vmid_set_cs_ctl_range(vport,
12020 vport->vmid_priority.vmid_range[i].low,
12021 vport->vmid_priority.vmid_range[i].high);
12022 }
12023
12024 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
12025 out:
12026 lpfc_els_free_iocb(phba, cmdiocb);
12027 lpfc_nlp_put(ndlp);
12028 }
12029
lpfc_issue_els_qfpa(struct lpfc_vport * vport)12030 int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
12031 {
12032 struct lpfc_hba *phba = vport->phba;
12033 struct lpfc_nodelist *ndlp;
12034 struct lpfc_iocbq *elsiocb;
12035 u8 *pcmd;
12036 int ret;
12037
12038 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
12039 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12040 return -ENXIO;
12041
12042 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
12043 ndlp->nlp_DID, ELS_CMD_QFPA);
12044 if (!elsiocb)
12045 return -ENOMEM;
12046
12047 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12048
12049 *((u32 *)(pcmd)) = ELS_CMD_QFPA;
12050 pcmd += 4;
12051
12052 elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
12053
12054 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12055 if (!elsiocb->ndlp) {
12056 lpfc_els_free_iocb(vport->phba, elsiocb);
12057 return -ENXIO;
12058 }
12059
12060 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
12061 if (ret != IOCB_SUCCESS) {
12062 lpfc_els_free_iocb(phba, elsiocb);
12063 lpfc_nlp_put(ndlp);
12064 return -EIO;
12065 }
12066 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
12067 return 0;
12068 }
12069
12070 int
lpfc_vmid_uvem(struct lpfc_vport * vport,struct lpfc_vmid * vmid,bool instantiated)12071 lpfc_vmid_uvem(struct lpfc_vport *vport,
12072 struct lpfc_vmid *vmid, bool instantiated)
12073 {
12074 struct lpfc_vem_id_desc *vem_id_desc;
12075 struct lpfc_nodelist *ndlp;
12076 struct lpfc_iocbq *elsiocb;
12077 struct instantiated_ve_desc *inst_desc;
12078 struct lpfc_vmid_context *vmid_context;
12079 u8 *pcmd;
12080 u32 *len;
12081 int ret = 0;
12082
12083 ndlp = lpfc_findnode_did(vport, Fabric_DID);
12084 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12085 return -ENXIO;
12086
12087 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
12088 if (!vmid_context)
12089 return -ENOMEM;
12090 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
12091 ndlp, Fabric_DID, ELS_CMD_UVEM);
12092 if (!elsiocb)
12093 goto out;
12094
12095 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
12096 "3427 Host vmid %s %d\n",
12097 vmid->host_vmid, instantiated);
12098 vmid_context->vmp = vmid;
12099 vmid_context->nlp = ndlp;
12100 vmid_context->instantiated = instantiated;
12101 elsiocb->vmid_tag.vmid_context = vmid_context;
12102 pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
12103
12104 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
12105 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
12106 LPFC_COMPRESS_VMID_SIZE);
12107
12108 *((u32 *)(pcmd)) = ELS_CMD_UVEM;
12109 len = (u32 *)(pcmd + 4);
12110 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
12111
12112 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
12113 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
12114 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
12115 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
12116 LPFC_COMPRESS_VMID_SIZE);
12117
12118 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
12119 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12120 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
12121 memcpy(inst_desc->global_vem_id, vmid->host_vmid,
12122 LPFC_COMPRESS_VMID_SIZE);
12123
12124 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
12125 bf_set(lpfc_instantiated_local_id, inst_desc,
12126 vmid->un.cs_ctl_vmid);
12127 if (instantiated) {
12128 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
12129 } else {
12130 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
12131 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
12132 }
12133 inst_desc->word6 = cpu_to_be32(inst_desc->word6);
12134
12135 elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
12136
12137 elsiocb->ndlp = lpfc_nlp_get(ndlp);
12138 if (!elsiocb->ndlp) {
12139 lpfc_els_free_iocb(vport->phba, elsiocb);
12140 goto out;
12141 }
12142
12143 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
12144 if (ret != IOCB_SUCCESS) {
12145 lpfc_els_free_iocb(vport->phba, elsiocb);
12146 lpfc_nlp_put(ndlp);
12147 goto out;
12148 }
12149
12150 return 0;
12151 out:
12152 kfree(vmid_context);
12153 return -EIO;
12154 }
12155
12156 static void
lpfc_cmpl_els_uvem(struct lpfc_hba * phba,struct lpfc_iocbq * icmdiocb,struct lpfc_iocbq * rspiocb)12157 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
12158 struct lpfc_iocbq *rspiocb)
12159 {
12160 struct lpfc_vport *vport = icmdiocb->vport;
12161 struct lpfc_dmabuf *prsp = NULL;
12162 struct lpfc_vmid_context *vmid_context =
12163 icmdiocb->vmid_tag.vmid_context;
12164 struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
12165 u8 *pcmd;
12166 u32 *data;
12167 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12168 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12169 struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
12170 struct lpfc_vmid *vmid;
12171
12172 vmid = vmid_context->vmp;
12173 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
12174 ndlp = NULL;
12175
12176 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
12177 if (!prsp)
12178 goto out;
12179 pcmd = prsp->virt;
12180 data = (u32 *)pcmd;
12181 if (data[0] == ELS_CMD_LS_RJT) {
12182 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12183 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
12184 goto out;
12185 }
12186 if (ulp_status) {
12187 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
12188 "4533 UVEM error status %x: %x\n",
12189 ulp_status, ulp_word4);
12190 goto out;
12191 }
12192 spin_lock(&phba->hbalock);
12193 /* Set IN USE flag */
12194 vport->vmid_flag |= LPFC_VMID_IN_USE;
12195 phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
12196 spin_unlock(&phba->hbalock);
12197
12198 if (vmid_context->instantiated) {
12199 write_lock(&vport->vmid_lock);
12200 vmid->flag |= LPFC_VMID_REGISTERED;
12201 vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
12202 write_unlock(&vport->vmid_lock);
12203 }
12204
12205 out:
12206 kfree(vmid_context);
12207 lpfc_els_free_iocb(phba, icmdiocb);
12208 lpfc_nlp_put(ndlp);
12209 }
12210