1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/kthread.h>
30 #include <linux/pci.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/signal.h>
34
35 #include <scsi/scsi.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc_scsi.h"
47 #include "lpfc.h"
48 #include "lpfc_logmsg.h"
49 #include "lpfc_crtn.h"
50 #include "lpfc_version.h"
51 #include "lpfc_vport.h"
52
lpfc_vport_set_state(struct lpfc_vport * vport,enum fc_vport_state new_state)53 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
54 enum fc_vport_state new_state)
55 {
56 struct fc_vport *fc_vport = vport->fc_vport;
57
58 if (fc_vport) {
59 /*
60 * When the transport defines fc_vport_set state we will replace
61 * this code with the following line
62 */
63 /* fc_vport_set_state(fc_vport, new_state); */
64 if (new_state != FC_VPORT_INITIALIZING)
65 fc_vport->vport_last_state = fc_vport->vport_state;
66 fc_vport->vport_state = new_state;
67 }
68
69 /* for all the error states we will set the invternal state to FAILED */
70 switch (new_state) {
71 case FC_VPORT_NO_FABRIC_SUPP:
72 case FC_VPORT_NO_FABRIC_RSCS:
73 case FC_VPORT_FABRIC_LOGOUT:
74 case FC_VPORT_FABRIC_REJ_WWN:
75 case FC_VPORT_FAILED:
76 vport->port_state = LPFC_VPORT_FAILED;
77 break;
78 case FC_VPORT_LINKDOWN:
79 vport->port_state = LPFC_VPORT_UNKNOWN;
80 break;
81 default:
82 /* do nothing */
83 break;
84 }
85 }
86
87 int
lpfc_alloc_vpi(struct lpfc_hba * phba)88 lpfc_alloc_vpi(struct lpfc_hba *phba)
89 {
90 unsigned long vpi;
91
92 spin_lock_irq(&phba->hbalock);
93 /* Start at bit 1 because vpi zero is reserved for the physical port */
94 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
95 if (vpi > phba->max_vpi)
96 vpi = 0;
97 else
98 set_bit(vpi, phba->vpi_bmask);
99 if (phba->sli_rev == LPFC_SLI_REV4)
100 phba->sli4_hba.max_cfg_param.vpi_used++;
101 spin_unlock_irq(&phba->hbalock);
102 return vpi;
103 }
104
105 static void
lpfc_free_vpi(struct lpfc_hba * phba,int vpi)106 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
107 {
108 if (vpi == 0)
109 return;
110 spin_lock_irq(&phba->hbalock);
111 clear_bit(vpi, phba->vpi_bmask);
112 if (phba->sli_rev == LPFC_SLI_REV4)
113 phba->sli4_hba.max_cfg_param.vpi_used--;
114 spin_unlock_irq(&phba->hbalock);
115 }
116
117 static int
lpfc_vport_sparm(struct lpfc_hba * phba,struct lpfc_vport * vport)118 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
119 {
120 LPFC_MBOXQ_t *pmb;
121 MAILBOX_t *mb;
122 struct lpfc_dmabuf *mp;
123 int rc;
124
125 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
126 if (!pmb) {
127 return -ENOMEM;
128 }
129 mb = &pmb->u.mb;
130
131 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
132 if (rc) {
133 mempool_free(pmb, phba->mbox_mem_pool);
134 return -ENOMEM;
135 }
136
137 /*
138 * Wait for the read_sparams mailbox to complete. Driver needs
139 * this per vport to start the FDISC. If the mailbox fails,
140 * just cleanup and return an error unless the failure is a
141 * mailbox timeout. For MBX_TIMEOUT, allow the default
142 * mbox completion handler to take care of the cleanup. This
143 * is safe as the mailbox command isn't one that triggers
144 * another mailbox.
145 */
146 pmb->vport = vport;
147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
148 if (rc != MBX_SUCCESS) {
149 if (signal_pending(current)) {
150 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
151 "1830 Signal aborted mbxCmd x%x\n",
152 mb->mbxCommand);
153 if (rc != MBX_TIMEOUT)
154 lpfc_mbox_rsrc_cleanup(phba, pmb,
155 MBOX_THD_UNLOCKED);
156 return -EINTR;
157 } else {
158 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
159 "1818 VPort failed init, mbxCmd x%x "
160 "READ_SPARM mbxStatus x%x, rc = x%x\n",
161 mb->mbxCommand, mb->mbxStatus, rc);
162 if (rc != MBX_TIMEOUT)
163 lpfc_mbox_rsrc_cleanup(phba, pmb,
164 MBOX_THD_UNLOCKED);
165 return -EIO;
166 }
167 }
168
169 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
170 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
171 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
172 sizeof (struct lpfc_name));
173 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
174 sizeof (struct lpfc_name));
175 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
176 return 0;
177 }
178
179 static int
lpfc_valid_wwn_format(struct lpfc_hba * phba,struct lpfc_name * wwn,const char * name_type)180 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
181 const char *name_type)
182 {
183 /* ensure that IEEE format 1 addresses
184 * contain zeros in bits 59-48
185 */
186 if (!((wwn->u.wwn[0] >> 4) == 1 &&
187 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
188 return 1;
189
190 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
191 "1822 Invalid %s: %02x:%02x:%02x:%02x:"
192 "%02x:%02x:%02x:%02x\n",
193 name_type,
194 wwn->u.wwn[0], wwn->u.wwn[1],
195 wwn->u.wwn[2], wwn->u.wwn[3],
196 wwn->u.wwn[4], wwn->u.wwn[5],
197 wwn->u.wwn[6], wwn->u.wwn[7]);
198 return 0;
199 }
200
201 static int
lpfc_unique_wwpn(struct lpfc_hba * phba,struct lpfc_vport * new_vport)202 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
203 {
204 struct lpfc_vport *vport;
205 unsigned long flags;
206
207 spin_lock_irqsave(&phba->port_list_lock, flags);
208 list_for_each_entry(vport, &phba->port_list, listentry) {
209 if (vport == new_vport)
210 continue;
211 /* If they match, return not unique */
212 if (memcmp(&vport->fc_sparam.portName,
213 &new_vport->fc_sparam.portName,
214 sizeof(struct lpfc_name)) == 0) {
215 spin_unlock_irqrestore(&phba->port_list_lock, flags);
216 return 0;
217 }
218 }
219 spin_unlock_irqrestore(&phba->port_list_lock, flags);
220 return 1;
221 }
222
223 /**
224 * lpfc_discovery_wait - Wait for driver discovery to quiesce
225 * @vport: The virtual port for which this call is being executed.
226 *
227 * This driver calls this routine specifically from lpfc_vport_delete
228 * to enforce a synchronous execution of vport
229 * delete relative to discovery activities. The
230 * lpfc_vport_delete routine should not return until it
231 * can reasonably guarantee that discovery has quiesced.
232 * Post FDISC LOGO, the driver must wait until its SAN teardown is
233 * complete and all resources recovered before allowing
234 * cleanup.
235 *
236 * This routine does not require any locks held.
237 **/
lpfc_discovery_wait(struct lpfc_vport * vport)238 static void lpfc_discovery_wait(struct lpfc_vport *vport)
239 {
240 struct lpfc_hba *phba = vport->phba;
241 uint32_t wait_flags = 0;
242 unsigned long wait_time_max;
243 unsigned long start_time;
244
245 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
246 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
247
248 /*
249 * The time constraint on this loop is a balance between the
250 * fabric RA_TOV value and dev_loss tmo. The driver's
251 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
252 */
253 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
254 wait_time_max += jiffies;
255 start_time = jiffies;
256 while (time_before(jiffies, wait_time_max)) {
257 if ((vport->num_disc_nodes > 0) ||
258 (vport->fc_flag & wait_flags) ||
259 ((vport->port_state > LPFC_VPORT_FAILED) &&
260 (vport->port_state < LPFC_VPORT_READY))) {
261 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
262 "1833 Vport discovery quiesce Wait:"
263 " state x%x fc_flags x%x"
264 " num_nodes x%x, waiting 1000 msecs"
265 " total wait msecs x%x\n",
266 vport->port_state, vport->fc_flag,
267 vport->num_disc_nodes,
268 jiffies_to_msecs(jiffies - start_time));
269 msleep(1000);
270 } else {
271 /* Base case. Wait variants satisfied. Break out */
272 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
273 "1834 Vport discovery quiesced:"
274 " state x%x fc_flags x%x"
275 " wait msecs x%x\n",
276 vport->port_state, vport->fc_flag,
277 jiffies_to_msecs(jiffies
278 - start_time));
279 break;
280 }
281 }
282
283 if (time_after(jiffies, wait_time_max))
284 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
285 "1835 Vport discovery quiesce failed:"
286 " state x%x fc_flags x%x wait msecs x%x\n",
287 vport->port_state, vport->fc_flag,
288 jiffies_to_msecs(jiffies - start_time));
289 }
290
291 int
lpfc_vport_create(struct fc_vport * fc_vport,bool disable)292 lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
293 {
294 struct lpfc_nodelist *ndlp;
295 struct Scsi_Host *shost = fc_vport->shost;
296 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
297 struct lpfc_hba *phba = pport->phba;
298 struct lpfc_vport *vport = NULL;
299 int instance;
300 int vpi;
301 int rc = VPORT_ERROR;
302 int status;
303
304 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
306 "1808 Create VPORT failed: "
307 "NPIV is not enabled: SLImode:%d\n",
308 phba->sli_rev);
309 rc = VPORT_INVAL;
310 goto error_out;
311 }
312
313 /* NPIV is not supported if HBA has NVME Target enabled */
314 if (phba->nvmet_support) {
315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
316 "3189 Create VPORT failed: "
317 "NPIV is not supported on NVME Target\n");
318 rc = VPORT_INVAL;
319 goto error_out;
320 }
321
322 vpi = lpfc_alloc_vpi(phba);
323 if (vpi == 0) {
324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
325 "1809 Create VPORT failed: "
326 "Max VPORTs (%d) exceeded\n",
327 phba->max_vpi);
328 rc = VPORT_NORESOURCES;
329 goto error_out;
330 }
331
332 /* Assign an unused board number */
333 if ((instance = lpfc_get_instance()) < 0) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
335 "1810 Create VPORT failed: Cannot get "
336 "instance number\n");
337 lpfc_free_vpi(phba, vpi);
338 rc = VPORT_NORESOURCES;
339 goto error_out;
340 }
341
342 vport = lpfc_create_port(phba, instance, &fc_vport->dev);
343 if (!vport) {
344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
345 "1811 Create VPORT failed: vpi x%x\n", vpi);
346 lpfc_free_vpi(phba, vpi);
347 rc = VPORT_NORESOURCES;
348 goto error_out;
349 }
350
351 vport->vpi = vpi;
352 lpfc_debugfs_initialize(vport);
353
354 if ((status = lpfc_vport_sparm(phba, vport))) {
355 if (status == -EINTR) {
356 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
357 "1831 Create VPORT Interrupted.\n");
358 rc = VPORT_ERROR;
359 } else {
360 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
361 "1813 Create VPORT failed. "
362 "Cannot get sparam\n");
363 rc = VPORT_NORESOURCES;
364 }
365 lpfc_free_vpi(phba, vpi);
366 destroy_port(vport);
367 goto error_out;
368 }
369
370 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
371 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
372
373 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
374 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
375
376 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
377 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
378 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
379 "1821 Create VPORT failed. "
380 "Invalid WWN format\n");
381 lpfc_free_vpi(phba, vpi);
382 destroy_port(vport);
383 rc = VPORT_INVAL;
384 goto error_out;
385 }
386
387 if (!lpfc_unique_wwpn(phba, vport)) {
388 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
389 "1823 Create VPORT failed. "
390 "Duplicate WWN on HBA\n");
391 lpfc_free_vpi(phba, vpi);
392 destroy_port(vport);
393 rc = VPORT_INVAL;
394 goto error_out;
395 }
396
397 /* Create binary sysfs attribute for vport */
398 lpfc_alloc_sysfs_attr(vport);
399
400 /* Set the DFT_LUN_Q_DEPTH accordingly */
401 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
402
403 /* Only the physical port can support NVME for now */
404 vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
405
406 *(struct lpfc_vport **)fc_vport->dd_data = vport;
407 vport->fc_vport = fc_vport;
408
409 /* At this point we are fully registered with SCSI Layer. */
410 vport->load_flag |= FC_ALLOW_FDMI;
411 if (phba->cfg_enable_SmartSAN ||
412 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
413 /* Setup appropriate attribute masks */
414 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
415 vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
416 }
417
418 /*
419 * In SLI4, the vpi must be activated before it can be used
420 * by the port.
421 */
422 if ((phba->sli_rev == LPFC_SLI_REV4) &&
423 (pport->fc_flag & FC_VFI_REGISTERED)) {
424 rc = lpfc_sli4_init_vpi(vport);
425 if (rc) {
426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
427 "1838 Failed to INIT_VPI on vpi %d "
428 "status %d\n", vpi, rc);
429 rc = VPORT_NORESOURCES;
430 lpfc_free_vpi(phba, vpi);
431 goto error_out;
432 }
433 } else if (phba->sli_rev == LPFC_SLI_REV4) {
434 /*
435 * Driver cannot INIT_VPI now. Set the flags to
436 * init_vpi when reg_vfi complete.
437 */
438 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
439 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
440 rc = VPORT_OK;
441 goto out;
442 }
443
444 if ((phba->link_state < LPFC_LINK_UP) ||
445 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
446 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
447 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
448 rc = VPORT_OK;
449 goto out;
450 }
451
452 if (disable) {
453 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
454 rc = VPORT_OK;
455 goto out;
456 }
457
458 /* Use the Physical nodes Fabric NDLP to determine if the link is
459 * up and ready to FDISC.
460 */
461 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
462 if (ndlp &&
463 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
464 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
465 lpfc_set_disctmo(vport);
466 lpfc_initial_fdisc(vport);
467 } else {
468 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
469 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
470 "0262 No NPIV Fabric support\n");
471 }
472 } else {
473 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
474 }
475 rc = VPORT_OK;
476
477 out:
478 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
479 "1825 Vport Created.\n");
480 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
481 error_out:
482 return rc;
483 }
484
485 static int
lpfc_send_npiv_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)486 lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
487 {
488 int rc;
489 struct lpfc_hba *phba = vport->phba;
490
491 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
492
493 spin_lock_irq(&ndlp->lock);
494 if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
495 !ndlp->logo_waitq) {
496 ndlp->logo_waitq = &waitq;
497 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
498 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
499 ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
500 }
501 spin_unlock_irq(&ndlp->lock);
502 rc = lpfc_issue_els_npiv_logo(vport, ndlp);
503 if (!rc) {
504 wait_event_timeout(waitq,
505 (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
506 msecs_to_jiffies(phba->fc_ratov * 2000));
507
508 if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
509 goto logo_cmpl;
510 /* LOGO wait failed. Correct status. */
511 rc = -EINTR;
512 } else {
513 rc = -EIO;
514 }
515
516 /* Error - clean up node flags. */
517 spin_lock_irq(&ndlp->lock);
518 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
519 ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
520 spin_unlock_irq(&ndlp->lock);
521
522 logo_cmpl:
523 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
524 "1824 Issue LOGO completes with status %d\n",
525 rc);
526 spin_lock_irq(&ndlp->lock);
527 ndlp->logo_waitq = NULL;
528 spin_unlock_irq(&ndlp->lock);
529 return rc;
530 }
531
532 static int
disable_vport(struct fc_vport * fc_vport)533 disable_vport(struct fc_vport *fc_vport)
534 {
535 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
536 struct lpfc_hba *phba = vport->phba;
537 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
538 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
539
540 /* Can't disable during an outstanding delete. */
541 if (vport->load_flag & FC_UNLOADING)
542 return 0;
543
544 ndlp = lpfc_findnode_did(vport, Fabric_DID);
545 if (ndlp && phba->link_state >= LPFC_LINK_UP)
546 (void)lpfc_send_npiv_logo(vport, ndlp);
547
548 lpfc_sli_host_down(vport);
549
550 /* Mark all nodes for discovery so we can remove them by
551 * calling lpfc_cleanup_rpis(vport, 1)
552 */
553 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
554 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
555 continue;
556 lpfc_disc_state_machine(vport, ndlp, NULL,
557 NLP_EVT_DEVICE_RECOVERY);
558 }
559 lpfc_cleanup_rpis(vport, 1);
560
561 lpfc_stop_vport_timers(vport);
562 lpfc_unreg_all_rpis(vport);
563 lpfc_unreg_default_rpis(vport);
564 /*
565 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
566 * scsi_host_put() to release the vport.
567 */
568 lpfc_mbx_unreg_vpi(vport);
569 if (phba->sli_rev == LPFC_SLI_REV4) {
570 spin_lock_irq(shost->host_lock);
571 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
572 spin_unlock_irq(shost->host_lock);
573 }
574
575 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
576 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
577 "1826 Vport Disabled.\n");
578 return VPORT_OK;
579 }
580
581 static int
enable_vport(struct fc_vport * fc_vport)582 enable_vport(struct fc_vport *fc_vport)
583 {
584 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
585 struct lpfc_hba *phba = vport->phba;
586 struct lpfc_nodelist *ndlp = NULL;
587 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
588
589 if ((phba->link_state < LPFC_LINK_UP) ||
590 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
591 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
592 return VPORT_OK;
593 }
594
595 spin_lock_irq(shost->host_lock);
596 vport->load_flag |= FC_LOADING;
597 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
598 spin_unlock_irq(shost->host_lock);
599 lpfc_issue_init_vpi(vport);
600 goto out;
601 }
602
603 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
604 spin_unlock_irq(shost->host_lock);
605
606 /* Use the Physical nodes Fabric NDLP to determine if the link is
607 * up and ready to FDISC.
608 */
609 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
610 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
611 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
612 lpfc_set_disctmo(vport);
613 lpfc_initial_fdisc(vport);
614 } else {
615 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
616 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
617 "0264 No NPIV Fabric support\n");
618 }
619 } else {
620 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
621 }
622
623 out:
624 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
625 "1827 Vport Enabled.\n");
626 return VPORT_OK;
627 }
628
629 int
lpfc_vport_disable(struct fc_vport * fc_vport,bool disable)630 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
631 {
632 if (disable)
633 return disable_vport(fc_vport);
634 else
635 return enable_vport(fc_vport);
636 }
637
638 int
lpfc_vport_delete(struct fc_vport * fc_vport)639 lpfc_vport_delete(struct fc_vport *fc_vport)
640 {
641 struct lpfc_nodelist *ndlp = NULL;
642 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
643 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
644 struct lpfc_hba *phba = vport->phba;
645 int rc;
646
647 if (vport->port_type == LPFC_PHYSICAL_PORT) {
648 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
649 "1812 vport_delete failed: Cannot delete "
650 "physical host\n");
651 return VPORT_ERROR;
652 }
653
654 /* If the vport is a static vport fail the deletion. */
655 if ((vport->vport_flag & STATIC_VPORT) &&
656 !(phba->pport->load_flag & FC_UNLOADING)) {
657 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
658 "1837 vport_delete failed: Cannot delete "
659 "static vport.\n");
660 return VPORT_ERROR;
661 }
662
663 spin_lock_irq(&phba->hbalock);
664 vport->load_flag |= FC_UNLOADING;
665 spin_unlock_irq(&phba->hbalock);
666
667 /*
668 * If we are not unloading the driver then prevent the vport_delete
669 * from happening until after this vport's discovery is finished.
670 */
671 if (!(phba->pport->load_flag & FC_UNLOADING)) {
672 int check_count = 0;
673 while (check_count < ((phba->fc_ratov * 3) + 3) &&
674 vport->port_state > LPFC_VPORT_FAILED &&
675 vport->port_state < LPFC_VPORT_READY) {
676 check_count++;
677 msleep(1000);
678 }
679 if (vport->port_state > LPFC_VPORT_FAILED &&
680 vport->port_state < LPFC_VPORT_READY)
681 return -EAGAIN;
682 }
683
684 /*
685 * Take early refcount for outstanding I/O requests we schedule during
686 * delete processing for unreg_vpi. Always keep this before
687 * scsi_remove_host() as we can no longer obtain a reference through
688 * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
689 */
690 if (!scsi_host_get(shost))
691 return VPORT_INVAL;
692
693 lpfc_free_sysfs_attr(vport);
694 lpfc_debugfs_terminate(vport);
695
696 /* Remove FC host to break driver binding. */
697 fc_remove_host(shost);
698 scsi_remove_host(shost);
699
700 /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */
701 ndlp = lpfc_findnode_did(vport, Fabric_DID);
702 if (!ndlp)
703 goto skip_logo;
704
705 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
706 phba->link_state >= LPFC_LINK_UP &&
707 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
708 if (vport->cfg_enable_da_id) {
709 /* Send DA_ID and wait for a completion. */
710 rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
711 if (rc) {
712 lpfc_printf_log(vport->phba, KERN_WARNING,
713 LOG_VPORT,
714 "1829 CT command failed to "
715 "delete objects on fabric, "
716 "rc %d\n", rc);
717 }
718 }
719
720 /*
721 * If the vpi is not registered, then a valid FDISC doesn't
722 * exist and there is no need for a ELS LOGO. Just cleanup
723 * the ndlp.
724 */
725 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
726 goto skip_logo;
727
728 /* Issue a Fabric LOGO to cleanup fabric resources. */
729 ndlp = lpfc_findnode_did(vport, Fabric_DID);
730 if (!ndlp)
731 goto skip_logo;
732
733 rc = lpfc_send_npiv_logo(vport, ndlp);
734 if (rc)
735 goto skip_logo;
736 }
737
738 if (!(phba->pport->load_flag & FC_UNLOADING))
739 lpfc_discovery_wait(vport);
740
741 skip_logo:
742
743 lpfc_cleanup(vport);
744
745 /* Remove scsi host now. The nodes are cleaned up. */
746 lpfc_sli_host_down(vport);
747 lpfc_stop_vport_timers(vport);
748
749 if (!(phba->pport->load_flag & FC_UNLOADING)) {
750 lpfc_unreg_all_rpis(vport);
751 lpfc_unreg_default_rpis(vport);
752 /*
753 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
754 * does the scsi_host_put() to release the vport.
755 */
756 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
757 lpfc_mbx_unreg_vpi(vport))
758 scsi_host_put(shost);
759 } else {
760 scsi_host_put(shost);
761 }
762
763 lpfc_free_vpi(phba, vport->vpi);
764 vport->work_port_events = 0;
765 spin_lock_irq(&phba->port_list_lock);
766 list_del_init(&vport->listentry);
767 spin_unlock_irq(&phba->port_list_lock);
768 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
769 "1828 Vport Deleted.\n");
770 scsi_host_put(shost);
771 return VPORT_OK;
772 }
773
774 struct lpfc_vport **
lpfc_create_vport_work_array(struct lpfc_hba * phba)775 lpfc_create_vport_work_array(struct lpfc_hba *phba)
776 {
777 struct lpfc_vport *port_iterator;
778 struct lpfc_vport **vports;
779 int index = 0;
780 vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *),
781 GFP_KERNEL);
782 if (vports == NULL)
783 return NULL;
784 spin_lock_irq(&phba->port_list_lock);
785 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
786 if (port_iterator->load_flag & FC_UNLOADING)
787 continue;
788 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
789 lpfc_printf_vlog(port_iterator, KERN_ERR,
790 LOG_TRACE_EVENT,
791 "1801 Create vport work array FAILED: "
792 "cannot do scsi_host_get\n");
793 continue;
794 }
795 vports[index++] = port_iterator;
796 }
797 spin_unlock_irq(&phba->port_list_lock);
798 return vports;
799 }
800
801 void
lpfc_destroy_vport_work_array(struct lpfc_hba * phba,struct lpfc_vport ** vports)802 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
803 {
804 int i;
805 if (vports == NULL)
806 return;
807 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
808 scsi_host_put(lpfc_shost_from_vport(vports[i]));
809 kfree(vports);
810 }
811
812
813 /**
814 * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
815 * @vport: Pointer to vport object.
816 *
817 * This function resets the statistical data for the vport. This function
818 * is called with the host_lock held
819 **/
820 void
lpfc_vport_reset_stat_data(struct lpfc_vport * vport)821 lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
822 {
823 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
824
825 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
826 if (ndlp->lat_data)
827 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
828 sizeof(struct lpfc_scsicmd_bkt));
829 }
830 }
831
832
833 /**
834 * lpfc_alloc_bucket - Allocate data buffer required for statistical data
835 * @vport: Pointer to vport object.
836 *
837 * This function allocates data buffer required for all the FC
838 * nodes of the vport to collect statistical data.
839 **/
840 void
lpfc_alloc_bucket(struct lpfc_vport * vport)841 lpfc_alloc_bucket(struct lpfc_vport *vport)
842 {
843 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
844
845 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
846
847 kfree(ndlp->lat_data);
848 ndlp->lat_data = NULL;
849
850 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
851 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
852 sizeof(struct lpfc_scsicmd_bkt),
853 GFP_ATOMIC);
854
855 if (!ndlp->lat_data)
856 lpfc_printf_vlog(vport, KERN_ERR,
857 LOG_TRACE_EVENT,
858 "0287 lpfc_alloc_bucket failed to "
859 "allocate statistical data buffer DID "
860 "0x%x\n", ndlp->nlp_DID);
861 }
862 }
863 }
864
865 /**
866 * lpfc_free_bucket - Free data buffer required for statistical data
867 * @vport: Pointer to vport object.
868 *
869 * Th function frees statistical data buffer of all the FC
870 * nodes of the vport.
871 **/
872 void
lpfc_free_bucket(struct lpfc_vport * vport)873 lpfc_free_bucket(struct lpfc_vport *vport)
874 {
875 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
876
877 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
878
879 kfree(ndlp->lat_data);
880 ndlp->lat_data = NULL;
881 }
882 }
883