1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_version.h"
47 #include "lpfc_vport.h"
48
lpfc_vport_set_state(struct lpfc_vport * vport,enum fc_vport_state new_state)49 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
50 enum fc_vport_state new_state)
51 {
52 struct fc_vport *fc_vport = vport->fc_vport;
53
54 if (fc_vport) {
55 /*
56 * When the transport defines fc_vport_set state we will replace
57 * this code with the following line
58 */
59 /* fc_vport_set_state(fc_vport, new_state); */
60 if (new_state != FC_VPORT_INITIALIZING)
61 fc_vport->vport_last_state = fc_vport->vport_state;
62 fc_vport->vport_state = new_state;
63 }
64
65 /* for all the error states we will set the invternal state to FAILED */
66 switch (new_state) {
67 case FC_VPORT_NO_FABRIC_SUPP:
68 case FC_VPORT_NO_FABRIC_RSCS:
69 case FC_VPORT_FABRIC_LOGOUT:
70 case FC_VPORT_FABRIC_REJ_WWN:
71 case FC_VPORT_FAILED:
72 vport->port_state = LPFC_VPORT_FAILED;
73 break;
74 case FC_VPORT_LINKDOWN:
75 vport->port_state = LPFC_VPORT_UNKNOWN;
76 break;
77 default:
78 /* do nothing */
79 break;
80 }
81 }
82
83 static int
lpfc_alloc_vpi(struct lpfc_hba * phba)84 lpfc_alloc_vpi(struct lpfc_hba *phba)
85 {
86 int vpi;
87
88 spin_lock_irq(&phba->hbalock);
89 /* Start at bit 1 because vpi zero is reserved for the physical port */
90 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
91 if (vpi > phba->max_vpi)
92 vpi = 0;
93 else
94 set_bit(vpi, phba->vpi_bmask);
95 if (phba->sli_rev == LPFC_SLI_REV4)
96 phba->sli4_hba.max_cfg_param.vpi_used++;
97 spin_unlock_irq(&phba->hbalock);
98 return vpi;
99 }
100
101 static void
lpfc_free_vpi(struct lpfc_hba * phba,int vpi)102 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
103 {
104 if (vpi == 0)
105 return;
106 spin_lock_irq(&phba->hbalock);
107 clear_bit(vpi, phba->vpi_bmask);
108 if (phba->sli_rev == LPFC_SLI_REV4)
109 phba->sli4_hba.max_cfg_param.vpi_used--;
110 spin_unlock_irq(&phba->hbalock);
111 }
112
113 static int
lpfc_vport_sparm(struct lpfc_hba * phba,struct lpfc_vport * vport)114 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
115 {
116 LPFC_MBOXQ_t *pmb;
117 MAILBOX_t *mb;
118 struct lpfc_dmabuf *mp;
119 int rc;
120
121 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
122 if (!pmb) {
123 return -ENOMEM;
124 }
125 mb = &pmb->u.mb;
126
127 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
128 if (rc) {
129 mempool_free(pmb, phba->mbox_mem_pool);
130 return -ENOMEM;
131 }
132
133 /*
134 * Grab buffer pointer and clear context1 so we can use
135 * lpfc_sli_issue_box_wait
136 */
137 mp = (struct lpfc_dmabuf *) pmb->context1;
138 pmb->context1 = NULL;
139
140 pmb->vport = vport;
141 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
142 if (rc != MBX_SUCCESS) {
143 if (signal_pending(current)) {
144 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
145 "1830 Signal aborted mbxCmd x%x\n",
146 mb->mbxCommand);
147 lpfc_mbuf_free(phba, mp->virt, mp->phys);
148 kfree(mp);
149 if (rc != MBX_TIMEOUT)
150 mempool_free(pmb, phba->mbox_mem_pool);
151 return -EINTR;
152 } else {
153 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
154 "1818 VPort failed init, mbxCmd x%x "
155 "READ_SPARM mbxStatus x%x, rc = x%x\n",
156 mb->mbxCommand, mb->mbxStatus, rc);
157 lpfc_mbuf_free(phba, mp->virt, mp->phys);
158 kfree(mp);
159 if (rc != MBX_TIMEOUT)
160 mempool_free(pmb, phba->mbox_mem_pool);
161 return -EIO;
162 }
163 }
164
165 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
166 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
167 sizeof (struct lpfc_name));
168 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
169 sizeof (struct lpfc_name));
170
171 lpfc_mbuf_free(phba, mp->virt, mp->phys);
172 kfree(mp);
173 mempool_free(pmb, phba->mbox_mem_pool);
174
175 return 0;
176 }
177
178 static int
lpfc_valid_wwn_format(struct lpfc_hba * phba,struct lpfc_name * wwn,const char * name_type)179 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
180 const char *name_type)
181 {
182 /* ensure that IEEE format 1 addresses
183 * contain zeros in bits 59-48
184 */
185 if (!((wwn->u.wwn[0] >> 4) == 1 &&
186 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
187 return 1;
188
189 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
190 "1822 Invalid %s: %02x:%02x:%02x:%02x:"
191 "%02x:%02x:%02x:%02x\n",
192 name_type,
193 wwn->u.wwn[0], wwn->u.wwn[1],
194 wwn->u.wwn[2], wwn->u.wwn[3],
195 wwn->u.wwn[4], wwn->u.wwn[5],
196 wwn->u.wwn[6], wwn->u.wwn[7]);
197 return 0;
198 }
199
200 static int
lpfc_unique_wwpn(struct lpfc_hba * phba,struct lpfc_vport * new_vport)201 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
202 {
203 struct lpfc_vport *vport;
204 unsigned long flags;
205
206 spin_lock_irqsave(&phba->hbalock, flags);
207 list_for_each_entry(vport, &phba->port_list, listentry) {
208 if (vport == new_vport)
209 continue;
210 /* If they match, return not unique */
211 if (memcmp(&vport->fc_sparam.portName,
212 &new_vport->fc_sparam.portName,
213 sizeof(struct lpfc_name)) == 0) {
214 spin_unlock_irqrestore(&phba->hbalock, flags);
215 return 0;
216 }
217 }
218 spin_unlock_irqrestore(&phba->hbalock, flags);
219 return 1;
220 }
221
222 /**
223 * lpfc_discovery_wait - Wait for driver discovery to quiesce
224 * @vport: The virtual port for which this call is being executed.
225 *
226 * This driver calls this routine specifically from lpfc_vport_delete
227 * to enforce a synchronous execution of vport
228 * delete relative to discovery activities. The
229 * lpfc_vport_delete routine should not return until it
230 * can reasonably guarantee that discovery has quiesced.
231 * Post FDISC LOGO, the driver must wait until its SAN teardown is
232 * complete and all resources recovered before allowing
233 * cleanup.
234 *
235 * This routine does not require any locks held.
236 **/
lpfc_discovery_wait(struct lpfc_vport * vport)237 static void lpfc_discovery_wait(struct lpfc_vport *vport)
238 {
239 struct lpfc_hba *phba = vport->phba;
240 uint32_t wait_flags = 0;
241 unsigned long wait_time_max;
242 unsigned long start_time;
243
244 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
245 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
246
247 /*
248 * The time constraint on this loop is a balance between the
249 * fabric RA_TOV value and dev_loss tmo. The driver's
250 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
251 */
252 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
253 wait_time_max += jiffies;
254 start_time = jiffies;
255 while (time_before(jiffies, wait_time_max)) {
256 if ((vport->num_disc_nodes > 0) ||
257 (vport->fc_flag & wait_flags) ||
258 ((vport->port_state > LPFC_VPORT_FAILED) &&
259 (vport->port_state < LPFC_VPORT_READY))) {
260 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
261 "1833 Vport discovery quiesce Wait:"
262 " state x%x fc_flags x%x"
263 " num_nodes x%x, waiting 1000 msecs"
264 " total wait msecs x%x\n",
265 vport->port_state, vport->fc_flag,
266 vport->num_disc_nodes,
267 jiffies_to_msecs(jiffies - start_time));
268 msleep(1000);
269 } else {
270 /* Base case. Wait variants satisfied. Break out */
271 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
272 "1834 Vport discovery quiesced:"
273 " state x%x fc_flags x%x"
274 " wait msecs x%x\n",
275 vport->port_state, vport->fc_flag,
276 jiffies_to_msecs(jiffies
277 - start_time));
278 break;
279 }
280 }
281
282 if (time_after(jiffies, wait_time_max))
283 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
284 "1835 Vport discovery quiesce failed:"
285 " state x%x fc_flags x%x wait msecs x%x\n",
286 vport->port_state, vport->fc_flag,
287 jiffies_to_msecs(jiffies - start_time));
288 }
289
290 int
lpfc_vport_create(struct fc_vport * fc_vport,bool disable)291 lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
292 {
293 struct lpfc_nodelist *ndlp;
294 struct Scsi_Host *shost = fc_vport->shost;
295 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
296 struct lpfc_hba *phba = pport->phba;
297 struct lpfc_vport *vport = NULL;
298 int instance;
299 int vpi;
300 int rc = VPORT_ERROR;
301 int status;
302
303 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
304 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
305 "1808 Create VPORT failed: "
306 "NPIV is not enabled: SLImode:%d\n",
307 phba->sli_rev);
308 rc = VPORT_INVAL;
309 goto error_out;
310 }
311
312 vpi = lpfc_alloc_vpi(phba);
313 if (vpi == 0) {
314 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
315 "1809 Create VPORT failed: "
316 "Max VPORTs (%d) exceeded\n",
317 phba->max_vpi);
318 rc = VPORT_NORESOURCES;
319 goto error_out;
320 }
321
322 /* Assign an unused board number */
323 if ((instance = lpfc_get_instance()) < 0) {
324 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
325 "1810 Create VPORT failed: Cannot get "
326 "instance number\n");
327 lpfc_free_vpi(phba, vpi);
328 rc = VPORT_NORESOURCES;
329 goto error_out;
330 }
331
332 vport = lpfc_create_port(phba, instance, &fc_vport->dev);
333 if (!vport) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
335 "1811 Create VPORT failed: vpi x%x\n", vpi);
336 lpfc_free_vpi(phba, vpi);
337 rc = VPORT_NORESOURCES;
338 goto error_out;
339 }
340
341 vport->vpi = vpi;
342 lpfc_debugfs_initialize(vport);
343
344 if ((status = lpfc_vport_sparm(phba, vport))) {
345 if (status == -EINTR) {
346 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
347 "1831 Create VPORT Interrupted.\n");
348 rc = VPORT_ERROR;
349 } else {
350 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
351 "1813 Create VPORT failed. "
352 "Cannot get sparam\n");
353 rc = VPORT_NORESOURCES;
354 }
355 lpfc_free_vpi(phba, vpi);
356 destroy_port(vport);
357 goto error_out;
358 }
359
360 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
361 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
362
363 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
364 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
365
366 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
367 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
368 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
369 "1821 Create VPORT failed. "
370 "Invalid WWN format\n");
371 lpfc_free_vpi(phba, vpi);
372 destroy_port(vport);
373 rc = VPORT_INVAL;
374 goto error_out;
375 }
376
377 if (!lpfc_unique_wwpn(phba, vport)) {
378 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
379 "1823 Create VPORT failed. "
380 "Duplicate WWN on HBA\n");
381 lpfc_free_vpi(phba, vpi);
382 destroy_port(vport);
383 rc = VPORT_INVAL;
384 goto error_out;
385 }
386
387 /* Create binary sysfs attribute for vport */
388 lpfc_alloc_sysfs_attr(vport);
389
390 *(struct lpfc_vport **)fc_vport->dd_data = vport;
391 vport->fc_vport = fc_vport;
392
393 /*
394 * In SLI4, the vpi must be activated before it can be used
395 * by the port.
396 */
397 if ((phba->sli_rev == LPFC_SLI_REV4) &&
398 (pport->fc_flag & FC_VFI_REGISTERED)) {
399 rc = lpfc_sli4_init_vpi(vport);
400 if (rc) {
401 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
402 "1838 Failed to INIT_VPI on vpi %d "
403 "status %d\n", vpi, rc);
404 rc = VPORT_NORESOURCES;
405 lpfc_free_vpi(phba, vpi);
406 goto error_out;
407 }
408 } else if (phba->sli_rev == LPFC_SLI_REV4) {
409 /*
410 * Driver cannot INIT_VPI now. Set the flags to
411 * init_vpi when reg_vfi complete.
412 */
413 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
414 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
415 rc = VPORT_OK;
416 goto out;
417 }
418
419 if ((phba->link_state < LPFC_LINK_UP) ||
420 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
421 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
422 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
423 rc = VPORT_OK;
424 goto out;
425 }
426
427 if (disable) {
428 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
429 rc = VPORT_OK;
430 goto out;
431 }
432
433 /* Use the Physical nodes Fabric NDLP to determine if the link is
434 * up and ready to FDISC.
435 */
436 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
437 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
438 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
439 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
440 lpfc_set_disctmo(vport);
441 lpfc_initial_fdisc(vport);
442 } else {
443 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
444 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
445 "0262 No NPIV Fabric support\n");
446 }
447 } else {
448 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
449 }
450 rc = VPORT_OK;
451
452 out:
453 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
454 "1825 Vport Created.\n");
455 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
456 error_out:
457 return rc;
458 }
459
460 static int
disable_vport(struct fc_vport * fc_vport)461 disable_vport(struct fc_vport *fc_vport)
462 {
463 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
464 struct lpfc_hba *phba = vport->phba;
465 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
466 long timeout;
467 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
468
469 ndlp = lpfc_findnode_did(vport, Fabric_DID);
470 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
471 && phba->link_state >= LPFC_LINK_UP) {
472 vport->unreg_vpi_cmpl = VPORT_INVAL;
473 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
474 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
475 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
476 timeout = schedule_timeout(timeout);
477 }
478
479 lpfc_sli_host_down(vport);
480
481 /* Mark all nodes for discovery so we can remove them by
482 * calling lpfc_cleanup_rpis(vport, 1)
483 */
484 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
485 if (!NLP_CHK_NODE_ACT(ndlp))
486 continue;
487 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
488 continue;
489 lpfc_disc_state_machine(vport, ndlp, NULL,
490 NLP_EVT_DEVICE_RECOVERY);
491 }
492 lpfc_cleanup_rpis(vport, 1);
493
494 lpfc_stop_vport_timers(vport);
495 lpfc_unreg_all_rpis(vport);
496 lpfc_unreg_default_rpis(vport);
497 /*
498 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
499 * scsi_host_put() to release the vport.
500 */
501 lpfc_mbx_unreg_vpi(vport);
502 spin_lock_irq(shost->host_lock);
503 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
504 spin_unlock_irq(shost->host_lock);
505
506 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
507 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
508 "1826 Vport Disabled.\n");
509 return VPORT_OK;
510 }
511
512 static int
enable_vport(struct fc_vport * fc_vport)513 enable_vport(struct fc_vport *fc_vport)
514 {
515 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
516 struct lpfc_hba *phba = vport->phba;
517 struct lpfc_nodelist *ndlp = NULL;
518 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
519
520 if ((phba->link_state < LPFC_LINK_UP) ||
521 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
522 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
523 return VPORT_OK;
524 }
525
526 spin_lock_irq(shost->host_lock);
527 vport->load_flag |= FC_LOADING;
528 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
529 spin_unlock_irq(shost->host_lock);
530
531 /* Use the Physical nodes Fabric NDLP to determine if the link is
532 * up and ready to FDISC.
533 */
534 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
535 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
536 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
537 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
538 lpfc_set_disctmo(vport);
539 lpfc_initial_fdisc(vport);
540 } else {
541 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
542 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
543 "0264 No NPIV Fabric support\n");
544 }
545 } else {
546 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
547 }
548 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
549 "1827 Vport Enabled.\n");
550 return VPORT_OK;
551 }
552
553 int
lpfc_vport_disable(struct fc_vport * fc_vport,bool disable)554 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
555 {
556 if (disable)
557 return disable_vport(fc_vport);
558 else
559 return enable_vport(fc_vport);
560 }
561
562
563 int
lpfc_vport_delete(struct fc_vport * fc_vport)564 lpfc_vport_delete(struct fc_vport *fc_vport)
565 {
566 struct lpfc_nodelist *ndlp = NULL;
567 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
569 struct lpfc_hba *phba = vport->phba;
570 long timeout;
571
572 if (vport->port_type == LPFC_PHYSICAL_PORT) {
573 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
574 "1812 vport_delete failed: Cannot delete "
575 "physical host\n");
576 return VPORT_ERROR;
577 }
578
579 /* If the vport is a static vport fail the deletion. */
580 if ((vport->vport_flag & STATIC_VPORT) &&
581 !(phba->pport->load_flag & FC_UNLOADING)) {
582 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
583 "1837 vport_delete failed: Cannot delete "
584 "static vport.\n");
585 return VPORT_ERROR;
586 }
587 spin_lock_irq(&phba->hbalock);
588 vport->load_flag |= FC_UNLOADING;
589 spin_unlock_irq(&phba->hbalock);
590 /*
591 * If we are not unloading the driver then prevent the vport_delete
592 * from happening until after this vport's discovery is finished.
593 */
594 if (!(phba->pport->load_flag & FC_UNLOADING)) {
595 int check_count = 0;
596 while (check_count < ((phba->fc_ratov * 3) + 3) &&
597 vport->port_state > LPFC_VPORT_FAILED &&
598 vport->port_state < LPFC_VPORT_READY) {
599 check_count++;
600 msleep(1000);
601 }
602 if (vport->port_state > LPFC_VPORT_FAILED &&
603 vport->port_state < LPFC_VPORT_READY)
604 return -EAGAIN;
605 }
606 /*
607 * This is a bit of a mess. We want to ensure the shost doesn't get
608 * torn down until we're done with the embedded lpfc_vport structure.
609 *
610 * Beyond holding a reference for this function, we also need a
611 * reference for outstanding I/O requests we schedule during delete
612 * processing. But once we scsi_remove_host() we can no longer obtain
613 * a reference through scsi_host_get().
614 *
615 * So we take two references here. We release one reference at the
616 * bottom of the function -- after delinking the vport. And we
617 * release the other at the completion of the unreg_vpi that get's
618 * initiated after we've disposed of all other resources associated
619 * with the port.
620 */
621 if (!scsi_host_get(shost))
622 return VPORT_INVAL;
623 if (!scsi_host_get(shost)) {
624 scsi_host_put(shost);
625 return VPORT_INVAL;
626 }
627 lpfc_free_sysfs_attr(vport);
628
629 lpfc_debugfs_terminate(vport);
630
631 /* Remove FC host and then SCSI host with the vport */
632 fc_remove_host(lpfc_shost_from_vport(vport));
633 scsi_remove_host(lpfc_shost_from_vport(vport));
634
635 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
636
637 /* In case of driver unload, we shall not perform fabric logo as the
638 * worker thread already stopped at this stage and, in this case, we
639 * can safely skip the fabric logo.
640 */
641 if (phba->pport->load_flag & FC_UNLOADING) {
642 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
643 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
644 phba->link_state >= LPFC_LINK_UP) {
645 /* First look for the Fabric ndlp */
646 ndlp = lpfc_findnode_did(vport, Fabric_DID);
647 if (!ndlp)
648 goto skip_logo;
649 else if (!NLP_CHK_NODE_ACT(ndlp)) {
650 ndlp = lpfc_enable_node(vport, ndlp,
651 NLP_STE_UNUSED_NODE);
652 if (!ndlp)
653 goto skip_logo;
654 }
655 /* Remove ndlp from vport npld list */
656 lpfc_dequeue_node(vport, ndlp);
657
658 /* Indicate free memory when release */
659 spin_lock_irq(&phba->ndlp_lock);
660 NLP_SET_FREE_REQ(ndlp);
661 spin_unlock_irq(&phba->ndlp_lock);
662 /* Kick off release ndlp when it can be safely done */
663 lpfc_nlp_put(ndlp);
664 }
665 goto skip_logo;
666 }
667
668 /* Otherwise, we will perform fabric logo as needed */
669 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
670 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
671 phba->link_state >= LPFC_LINK_UP &&
672 phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
673 if (vport->cfg_enable_da_id) {
674 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
675 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
676 while (vport->ct_flags && timeout)
677 timeout = schedule_timeout(timeout);
678 else
679 lpfc_printf_log(vport->phba, KERN_WARNING,
680 LOG_VPORT,
681 "1829 CT command failed to "
682 "delete objects on fabric\n");
683 }
684 /* First look for the Fabric ndlp */
685 ndlp = lpfc_findnode_did(vport, Fabric_DID);
686 if (!ndlp) {
687 /* Cannot find existing Fabric ndlp, allocate one */
688 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
689 if (!ndlp)
690 goto skip_logo;
691 lpfc_nlp_init(vport, ndlp, Fabric_DID);
692 /* Indicate free memory when release */
693 NLP_SET_FREE_REQ(ndlp);
694 } else {
695 if (!NLP_CHK_NODE_ACT(ndlp))
696 ndlp = lpfc_enable_node(vport, ndlp,
697 NLP_STE_UNUSED_NODE);
698 if (!ndlp)
699 goto skip_logo;
700
701 /* Remove ndlp from vport npld list */
702 lpfc_dequeue_node(vport, ndlp);
703 spin_lock_irq(&phba->ndlp_lock);
704 if (!NLP_CHK_FREE_REQ(ndlp))
705 /* Indicate free memory when release */
706 NLP_SET_FREE_REQ(ndlp);
707 else {
708 /* Skip this if ndlp is already in free mode */
709 spin_unlock_irq(&phba->ndlp_lock);
710 goto skip_logo;
711 }
712 spin_unlock_irq(&phba->ndlp_lock);
713 }
714 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
715 goto skip_logo;
716 vport->unreg_vpi_cmpl = VPORT_INVAL;
717 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
718 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
719 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
720 timeout = schedule_timeout(timeout);
721 }
722
723 if (!(phba->pport->load_flag & FC_UNLOADING))
724 lpfc_discovery_wait(vport);
725
726 skip_logo:
727 lpfc_cleanup(vport);
728 lpfc_sli_host_down(vport);
729
730 lpfc_stop_vport_timers(vport);
731
732 if (!(phba->pport->load_flag & FC_UNLOADING)) {
733 lpfc_unreg_all_rpis(vport);
734 lpfc_unreg_default_rpis(vport);
735 /*
736 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
737 * does the scsi_host_put() to release the vport.
738 */
739 if (lpfc_mbx_unreg_vpi(vport))
740 scsi_host_put(shost);
741 } else
742 scsi_host_put(shost);
743
744 lpfc_free_vpi(phba, vport->vpi);
745 vport->work_port_events = 0;
746 spin_lock_irq(&phba->hbalock);
747 list_del_init(&vport->listentry);
748 spin_unlock_irq(&phba->hbalock);
749 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
750 "1828 Vport Deleted.\n");
751 scsi_host_put(shost);
752 return VPORT_OK;
753 }
754
755 struct lpfc_vport **
lpfc_create_vport_work_array(struct lpfc_hba * phba)756 lpfc_create_vport_work_array(struct lpfc_hba *phba)
757 {
758 struct lpfc_vport *port_iterator;
759 struct lpfc_vport **vports;
760 int index = 0;
761 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
762 GFP_KERNEL);
763 if (vports == NULL)
764 return NULL;
765 spin_lock_irq(&phba->hbalock);
766 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
767 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
768 if (!(port_iterator->load_flag & FC_UNLOADING))
769 lpfc_printf_vlog(port_iterator, KERN_ERR,
770 LOG_VPORT,
771 "1801 Create vport work array FAILED: "
772 "cannot do scsi_host_get\n");
773 continue;
774 }
775 vports[index++] = port_iterator;
776 }
777 spin_unlock_irq(&phba->hbalock);
778 return vports;
779 }
780
781 void
lpfc_destroy_vport_work_array(struct lpfc_hba * phba,struct lpfc_vport ** vports)782 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
783 {
784 int i;
785 if (vports == NULL)
786 return;
787 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
788 scsi_host_put(lpfc_shost_from_vport(vports[i]));
789 kfree(vports);
790 }
791
792
793 /**
794 * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
795 * @vport: Pointer to vport object.
796 *
797 * This function resets the statistical data for the vport. This function
798 * is called with the host_lock held
799 **/
800 void
lpfc_vport_reset_stat_data(struct lpfc_vport * vport)801 lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
802 {
803 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
804
805 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
806 if (!NLP_CHK_NODE_ACT(ndlp))
807 continue;
808 if (ndlp->lat_data)
809 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
810 sizeof(struct lpfc_scsicmd_bkt));
811 }
812 }
813
814
815 /**
816 * lpfc_alloc_bucket - Allocate data buffer required for statistical data
817 * @vport: Pointer to vport object.
818 *
819 * This function allocates data buffer required for all the FC
820 * nodes of the vport to collect statistical data.
821 **/
822 void
lpfc_alloc_bucket(struct lpfc_vport * vport)823 lpfc_alloc_bucket(struct lpfc_vport *vport)
824 {
825 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
826
827 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
828 if (!NLP_CHK_NODE_ACT(ndlp))
829 continue;
830
831 kfree(ndlp->lat_data);
832 ndlp->lat_data = NULL;
833
834 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
835 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
836 sizeof(struct lpfc_scsicmd_bkt),
837 GFP_ATOMIC);
838
839 if (!ndlp->lat_data)
840 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
841 "0287 lpfc_alloc_bucket failed to "
842 "allocate statistical data buffer DID "
843 "0x%x\n", ndlp->nlp_DID);
844 }
845 }
846 }
847
848 /**
849 * lpfc_free_bucket - Free data buffer required for statistical data
850 * @vport: Pointer to vport object.
851 *
852 * Th function frees statistical data buffer of all the FC
853 * nodes of the vport.
854 **/
855 void
lpfc_free_bucket(struct lpfc_vport * vport)856 lpfc_free_bucket(struct lpfc_vport *vport)
857 {
858 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
859
860 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
861 if (!NLP_CHK_NODE_ACT(ndlp))
862 continue;
863
864 kfree(ndlp->lat_data);
865 ndlp->lat_data = NULL;
866 }
867 }
868