1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 /*
19  *  bfad.c Linux driver PCI interface module.
20  */
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <linux/fs.h>
27 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <asm/uaccess.h>
30 #include <asm/fcntl.h>
31 
32 #include "bfad_drv.h"
33 #include "bfad_im.h"
34 #include "bfa_fcs.h"
35 #include "bfa_defs.h"
36 #include "bfa.h"
37 
38 BFA_TRC_FILE(LDRV, BFAD);
39 DEFINE_MUTEX(bfad_mutex);
40 LIST_HEAD(bfad_list);
41 
42 static int	bfad_inst;
43 static int      num_sgpgs_parm;
44 int		supported_fc4s;
45 char		*host_name, *os_name, *os_patch;
46 int		num_rports, num_ios, num_tms;
47 int		num_fcxps, num_ufbufs;
48 int		reqq_size, rspq_size, num_sgpgs;
49 int		rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50 int		bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51 int		bfa_io_max_sge = BFAD_IO_MAX_SGE;
52 int		bfa_log_level = 3; /* WARNING log level */
53 int		ioc_auto_recover = BFA_TRUE;
54 int		bfa_linkup_delay = -1;
55 int		fdmi_enable = BFA_TRUE;
56 int		pcie_max_read_reqsz;
57 int		bfa_debugfs_enable = 1;
58 int		msix_disable_cb = 0, msix_disable_ct = 0;
59 
60 u32	bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
61 u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
62 
63 static const char *msix_name_ct[] = {
64 	"cpe0", "cpe1", "cpe2", "cpe3",
65 	"rme0", "rme1", "rme2", "rme3",
66 	"ctrl" };
67 
68 static const char *msix_name_cb[] = {
69 	"cpe0", "cpe1", "cpe2", "cpe3",
70 	"rme0", "rme1", "rme2", "rme3",
71 	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
72 
73 MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
74 MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
75 MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
76 
77 module_param(os_name, charp, S_IRUGO | S_IWUSR);
78 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
79 module_param(os_patch, charp, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
81 module_param(host_name, charp, S_IRUGO | S_IWUSR);
82 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
83 module_param(num_rports, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
85 				"(physical/logical), default=1024");
86 module_param(num_ios, int, S_IRUGO | S_IWUSR);
87 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
88 module_param(num_tms, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
90 module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
92 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
93 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
94 				"buffers, default=64");
95 module_param(reqq_size, int, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
97 				"default=256");
98 module_param(rspq_size, int, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
100 				"default=64");
101 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
103 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
105 					"Range[>0]");
106 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
108 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
110 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
112 				"Range[Critical:1|Error:2|Warning:3|Info:4]");
113 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
115 				"Range[off:0|on:1]");
116 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
117 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
118 			"boot port. Otherwise 10 secs in RHEL4 & 0 for "
119 			"[RHEL5, SLES10, ESX40] Range[>0]");
120 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
121 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
122 			"for Brocade-415/425/815/825 cards, default=0, "
123 			" Range[false:0|true:1]");
124 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
125 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
126 			"if possible for Brocade-1010/1020/804/1007/902/1741 "
127 			"cards, default=0, Range[false:0|true:1]");
128 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
129 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
130 				"Range[false:0|true:1]");
131 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
132 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
133 		"(use system setting), Range[128|256|512|1024|2048|4096]");
134 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
135 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
136 		" Range[false:0|true:1]");
137 
138 static void
139 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
140 static void
141 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
142 static void
143 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
144 static void
145 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
146 static void
147 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
148 static void
149 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
150 static void
151 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
152 
153 /*
154  * Beginning state for the driver instance, awaiting the pci_probe event
155  */
156 static void
bfad_sm_uninit(struct bfad_s * bfad,enum bfad_sm_event event)157 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
158 {
159 	bfa_trc(bfad, event);
160 
161 	switch (event) {
162 	case BFAD_E_CREATE:
163 		bfa_sm_set_state(bfad, bfad_sm_created);
164 		bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
165 						"%s", "bfad_worker");
166 		if (IS_ERR(bfad->bfad_tsk)) {
167 			printk(KERN_INFO "bfad[%d]: Kernel thread "
168 				"creation failed!\n", bfad->inst_no);
169 			bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
170 		}
171 		bfa_sm_send_event(bfad, BFAD_E_INIT);
172 		break;
173 
174 	case BFAD_E_STOP:
175 		/* Ignore stop; already in uninit */
176 		break;
177 
178 	default:
179 		bfa_sm_fault(bfad, event);
180 	}
181 }
182 
183 /*
184  * Driver Instance is created, awaiting event INIT to initialize the bfad
185  */
186 static void
bfad_sm_created(struct bfad_s * bfad,enum bfad_sm_event event)187 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
188 {
189 	unsigned long flags;
190 
191 	bfa_trc(bfad, event);
192 
193 	switch (event) {
194 	case BFAD_E_INIT:
195 		bfa_sm_set_state(bfad, bfad_sm_initializing);
196 
197 		init_completion(&bfad->comp);
198 
199 		/* Enable Interrupt and wait bfa_init completion */
200 		if (bfad_setup_intr(bfad)) {
201 			printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
202 					bfad->inst_no);
203 			bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
204 			break;
205 		}
206 
207 		spin_lock_irqsave(&bfad->bfad_lock, flags);
208 		bfa_iocfc_init(&bfad->bfa);
209 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
210 
211 		/* Set up interrupt handler for each vectors */
212 		if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
213 			bfad_install_msix_handler(bfad)) {
214 			printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
215 				__func__, bfad->inst_no);
216 		}
217 
218 		bfad_init_timer(bfad);
219 
220 		wait_for_completion(&bfad->comp);
221 
222 		if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
223 			bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
224 		} else {
225 			bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
226 			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
227 		}
228 
229 		break;
230 
231 	case BFAD_E_KTHREAD_CREATE_FAILED:
232 		bfa_sm_set_state(bfad, bfad_sm_uninit);
233 		break;
234 
235 	default:
236 		bfa_sm_fault(bfad, event);
237 	}
238 }
239 
240 static void
bfad_sm_initializing(struct bfad_s * bfad,enum bfad_sm_event event)241 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
242 {
243 	int	retval;
244 	unsigned long	flags;
245 
246 	bfa_trc(bfad, event);
247 
248 	switch (event) {
249 	case BFAD_E_INIT_SUCCESS:
250 		kthread_stop(bfad->bfad_tsk);
251 		spin_lock_irqsave(&bfad->bfad_lock, flags);
252 		bfad->bfad_tsk = NULL;
253 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
254 
255 		retval = bfad_start_ops(bfad);
256 		if (retval != BFA_STATUS_OK)
257 			break;
258 		bfa_sm_set_state(bfad, bfad_sm_operational);
259 		break;
260 
261 	case BFAD_E_INTR_INIT_FAILED:
262 		bfa_sm_set_state(bfad, bfad_sm_uninit);
263 		kthread_stop(bfad->bfad_tsk);
264 		spin_lock_irqsave(&bfad->bfad_lock, flags);
265 		bfad->bfad_tsk = NULL;
266 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
267 		break;
268 
269 	case BFAD_E_INIT_FAILED:
270 		bfa_sm_set_state(bfad, bfad_sm_failed);
271 		break;
272 	default:
273 		bfa_sm_fault(bfad, event);
274 	}
275 }
276 
277 static void
bfad_sm_failed(struct bfad_s * bfad,enum bfad_sm_event event)278 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
279 {
280 	int	retval;
281 
282 	bfa_trc(bfad, event);
283 
284 	switch (event) {
285 	case BFAD_E_INIT_SUCCESS:
286 		retval = bfad_start_ops(bfad);
287 		if (retval != BFA_STATUS_OK)
288 			break;
289 		bfa_sm_set_state(bfad, bfad_sm_operational);
290 		break;
291 
292 	case BFAD_E_STOP:
293 		if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
294 			bfad_uncfg_pport(bfad);
295 		if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
296 			bfad_im_probe_undo(bfad);
297 			bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
298 		}
299 		bfad_stop(bfad);
300 		break;
301 
302 	case BFAD_E_EXIT_COMP:
303 		bfa_sm_set_state(bfad, bfad_sm_uninit);
304 		bfad_remove_intr(bfad);
305 		del_timer_sync(&bfad->hal_tmo);
306 		break;
307 
308 	default:
309 		bfa_sm_fault(bfad, event);
310 	}
311 }
312 
313 static void
bfad_sm_operational(struct bfad_s * bfad,enum bfad_sm_event event)314 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
315 {
316 	bfa_trc(bfad, event);
317 
318 	switch (event) {
319 	case BFAD_E_STOP:
320 		bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
321 		bfad_fcs_stop(bfad);
322 		break;
323 
324 	default:
325 		bfa_sm_fault(bfad, event);
326 	}
327 }
328 
329 static void
bfad_sm_fcs_exit(struct bfad_s * bfad,enum bfad_sm_event event)330 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
331 {
332 	bfa_trc(bfad, event);
333 
334 	switch (event) {
335 	case BFAD_E_FCS_EXIT_COMP:
336 		bfa_sm_set_state(bfad, bfad_sm_stopping);
337 		bfad_stop(bfad);
338 		break;
339 
340 	default:
341 		bfa_sm_fault(bfad, event);
342 	}
343 }
344 
345 static void
bfad_sm_stopping(struct bfad_s * bfad,enum bfad_sm_event event)346 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
347 {
348 	bfa_trc(bfad, event);
349 
350 	switch (event) {
351 	case BFAD_E_EXIT_COMP:
352 		bfa_sm_set_state(bfad, bfad_sm_uninit);
353 		bfad_remove_intr(bfad);
354 		del_timer_sync(&bfad->hal_tmo);
355 		bfad_im_probe_undo(bfad);
356 		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
357 		bfad_uncfg_pport(bfad);
358 		break;
359 
360 	default:
361 		bfa_sm_fault(bfad, event);
362 		break;
363 	}
364 }
365 
366 /*
367  *  BFA callbacks
368  */
369 void
bfad_hcb_comp(void * arg,bfa_status_t status)370 bfad_hcb_comp(void *arg, bfa_status_t status)
371 {
372 	struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
373 
374 	fcomp->status = status;
375 	complete(&fcomp->comp);
376 }
377 
378 /*
379  * bfa_init callback
380  */
381 void
bfa_cb_init(void * drv,bfa_status_t init_status)382 bfa_cb_init(void *drv, bfa_status_t init_status)
383 {
384 	struct bfad_s	      *bfad = drv;
385 
386 	if (init_status == BFA_STATUS_OK) {
387 		bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
388 
389 		/*
390 		 * If BFAD_HAL_INIT_FAIL flag is set:
391 		 * Wake up the kernel thread to start
392 		 * the bfad operations after HAL init done
393 		 */
394 		if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
395 			bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
396 			wake_up_process(bfad->bfad_tsk);
397 		}
398 	}
399 
400 	complete(&bfad->comp);
401 }
402 
403 /*
404  *  BFA_FCS callbacks
405  */
406 struct bfad_port_s *
bfa_fcb_lport_new(struct bfad_s * bfad,struct bfa_fcs_lport_s * port,enum bfa_lport_role roles,struct bfad_vf_s * vf_drv,struct bfad_vport_s * vp_drv)407 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
408 		 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
409 		 struct bfad_vport_s *vp_drv)
410 {
411 	bfa_status_t	rc;
412 	struct bfad_port_s    *port_drv;
413 
414 	if (!vp_drv && !vf_drv) {
415 		port_drv = &bfad->pport;
416 		port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
417 	} else if (!vp_drv && vf_drv) {
418 		port_drv = &vf_drv->base_port;
419 		port_drv->pvb_type = BFAD_PORT_VF_BASE;
420 	} else if (vp_drv && !vf_drv) {
421 		port_drv = &vp_drv->drv_port;
422 		port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
423 	} else {
424 		port_drv = &vp_drv->drv_port;
425 		port_drv->pvb_type = BFAD_PORT_VF_VPORT;
426 	}
427 
428 	port_drv->fcs_port = port;
429 	port_drv->roles = roles;
430 
431 	if (roles & BFA_LPORT_ROLE_FCP_IM) {
432 		rc = bfad_im_port_new(bfad, port_drv);
433 		if (rc != BFA_STATUS_OK) {
434 			bfad_im_port_delete(bfad, port_drv);
435 			port_drv = NULL;
436 		}
437 	}
438 
439 	return port_drv;
440 }
441 
442 void
bfa_fcb_lport_delete(struct bfad_s * bfad,enum bfa_lport_role roles,struct bfad_vf_s * vf_drv,struct bfad_vport_s * vp_drv)443 bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
444 		    struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
445 {
446 	struct bfad_port_s    *port_drv;
447 
448 	/* this will be only called from rmmod context */
449 	if (vp_drv && !vp_drv->comp_del) {
450 		port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
451 				((vf_drv) ? (&(vf_drv)->base_port) :
452 				(&(bfad)->pport));
453 		bfa_trc(bfad, roles);
454 		if (roles & BFA_LPORT_ROLE_FCP_IM)
455 			bfad_im_port_delete(bfad, port_drv);
456 	}
457 }
458 
459 /*
460  * FCS RPORT alloc callback, after successful PLOGI by FCS
461  */
462 bfa_status_t
bfa_fcb_rport_alloc(struct bfad_s * bfad,struct bfa_fcs_rport_s ** rport,struct bfad_rport_s ** rport_drv)463 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
464 		    struct bfad_rport_s **rport_drv)
465 {
466 	bfa_status_t	rc = BFA_STATUS_OK;
467 
468 	*rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
469 	if (*rport_drv == NULL) {
470 		rc = BFA_STATUS_ENOMEM;
471 		goto ext;
472 	}
473 
474 	*rport = &(*rport_drv)->fcs_rport;
475 
476 ext:
477 	return rc;
478 }
479 
480 /*
481  * FCS PBC VPORT Create
482  */
483 void
bfa_fcb_pbc_vport_create(struct bfad_s * bfad,struct bfi_pbc_vport_s pbc_vport)484 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
485 {
486 
487 	struct bfa_lport_cfg_s port_cfg = {0};
488 	struct bfad_vport_s   *vport;
489 	int rc;
490 
491 	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
492 	if (!vport) {
493 		bfa_trc(bfad, 0);
494 		return;
495 	}
496 
497 	vport->drv_port.bfad = bfad;
498 	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
499 	port_cfg.pwwn = pbc_vport.vp_pwwn;
500 	port_cfg.nwwn = pbc_vport.vp_nwwn;
501 	port_cfg.preboot_vp  = BFA_TRUE;
502 
503 	rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
504 				  &port_cfg, vport);
505 
506 	if (rc != BFA_STATUS_OK) {
507 		bfa_trc(bfad, 0);
508 		return;
509 	}
510 
511 	list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
512 }
513 
514 void
bfad_hal_mem_release(struct bfad_s * bfad)515 bfad_hal_mem_release(struct bfad_s *bfad)
516 {
517 	int		i;
518 	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
519 	struct bfa_mem_elem_s *meminfo_elem;
520 
521 	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
522 		meminfo_elem = &hal_meminfo->meminfo[i];
523 		if (meminfo_elem->kva != NULL) {
524 			switch (meminfo_elem->mem_type) {
525 			case BFA_MEM_TYPE_KVA:
526 				vfree(meminfo_elem->kva);
527 				break;
528 			case BFA_MEM_TYPE_DMA:
529 				dma_free_coherent(&bfad->pcidev->dev,
530 					meminfo_elem->mem_len,
531 					meminfo_elem->kva,
532 					(dma_addr_t) meminfo_elem->dma);
533 				break;
534 			default:
535 				WARN_ON(1);
536 				break;
537 			}
538 		}
539 	}
540 
541 	memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
542 }
543 
544 void
bfad_update_hal_cfg(struct bfa_iocfc_cfg_s * bfa_cfg)545 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
546 {
547 	if (num_rports > 0)
548 		bfa_cfg->fwcfg.num_rports = num_rports;
549 	if (num_ios > 0)
550 		bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
551 	if (num_tms > 0)
552 		bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
553 	if (num_fcxps > 0)
554 		bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
555 	if (num_ufbufs > 0)
556 		bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
557 	if (reqq_size > 0)
558 		bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
559 	if (rspq_size > 0)
560 		bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
561 	if (num_sgpgs > 0)
562 		bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
563 
564 	/*
565 	 * populate the hal values back to the driver for sysfs use.
566 	 * otherwise, the default values will be shown as 0 in sysfs
567 	 */
568 	num_rports = bfa_cfg->fwcfg.num_rports;
569 	num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
570 	num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
571 	num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
572 	num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
573 	reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
574 	rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
575 	num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
576 }
577 
578 bfa_status_t
bfad_hal_mem_alloc(struct bfad_s * bfad)579 bfad_hal_mem_alloc(struct bfad_s *bfad)
580 {
581 	int		i;
582 	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
583 	struct bfa_mem_elem_s *meminfo_elem;
584 	dma_addr_t	phys_addr;
585 	void	       *kva;
586 	bfa_status_t	rc = BFA_STATUS_OK;
587 	int retry_count = 0;
588 	int reset_value = 1;
589 	int min_num_sgpgs = 512;
590 
591 	bfa_cfg_get_default(&bfad->ioc_cfg);
592 
593 retry:
594 	bfad_update_hal_cfg(&bfad->ioc_cfg);
595 	bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
596 	bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
597 
598 	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
599 		meminfo_elem = &hal_meminfo->meminfo[i];
600 		switch (meminfo_elem->mem_type) {
601 		case BFA_MEM_TYPE_KVA:
602 			kva = vmalloc(meminfo_elem->mem_len);
603 			if (kva == NULL) {
604 				bfad_hal_mem_release(bfad);
605 				rc = BFA_STATUS_ENOMEM;
606 				goto ext;
607 			}
608 			memset(kva, 0, meminfo_elem->mem_len);
609 			meminfo_elem->kva = kva;
610 			break;
611 		case BFA_MEM_TYPE_DMA:
612 			kva = dma_alloc_coherent(&bfad->pcidev->dev,
613 				meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
614 			if (kva == NULL) {
615 				bfad_hal_mem_release(bfad);
616 				/*
617 				 * If we cannot allocate with default
618 				 * num_sgpages try with half the value.
619 				 */
620 				if (num_sgpgs > min_num_sgpgs) {
621 					printk(KERN_INFO
622 					"bfad[%d]: memory allocation failed"
623 					" with num_sgpgs: %d\n",
624 						bfad->inst_no, num_sgpgs);
625 					nextLowerInt(&num_sgpgs);
626 					printk(KERN_INFO
627 					"bfad[%d]: trying to allocate memory"
628 					" with num_sgpgs: %d\n",
629 						bfad->inst_no, num_sgpgs);
630 					retry_count++;
631 					goto retry;
632 				} else {
633 					if (num_sgpgs_parm > 0)
634 						num_sgpgs = num_sgpgs_parm;
635 					else {
636 						reset_value =
637 							(1 << retry_count);
638 						num_sgpgs *= reset_value;
639 					}
640 					rc = BFA_STATUS_ENOMEM;
641 					goto ext;
642 				}
643 			}
644 
645 			if (num_sgpgs_parm > 0)
646 				num_sgpgs = num_sgpgs_parm;
647 			else {
648 				reset_value = (1 << retry_count);
649 				num_sgpgs *= reset_value;
650 			}
651 
652 			memset(kva, 0, meminfo_elem->mem_len);
653 			meminfo_elem->kva = kva;
654 			meminfo_elem->dma = phys_addr;
655 			break;
656 		default:
657 			break;
658 
659 		}
660 	}
661 ext:
662 	return rc;
663 }
664 
665 /*
666  * Create a vport under a vf.
667  */
668 bfa_status_t
bfad_vport_create(struct bfad_s * bfad,u16 vf_id,struct bfa_lport_cfg_s * port_cfg,struct device * dev)669 bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
670 		  struct bfa_lport_cfg_s *port_cfg, struct device *dev)
671 {
672 	struct bfad_vport_s   *vport;
673 	int		rc = BFA_STATUS_OK;
674 	unsigned long	flags;
675 	struct completion fcomp;
676 
677 	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
678 	if (!vport) {
679 		rc = BFA_STATUS_ENOMEM;
680 		goto ext;
681 	}
682 
683 	vport->drv_port.bfad = bfad;
684 	spin_lock_irqsave(&bfad->bfad_lock, flags);
685 	rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
686 				  port_cfg, vport);
687 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
688 
689 	if (rc != BFA_STATUS_OK)
690 		goto ext_free_vport;
691 
692 	if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
693 		rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
694 							dev);
695 		if (rc != BFA_STATUS_OK)
696 			goto ext_free_fcs_vport;
697 	}
698 
699 	spin_lock_irqsave(&bfad->bfad_lock, flags);
700 	bfa_fcs_vport_start(&vport->fcs_vport);
701 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
702 
703 	return BFA_STATUS_OK;
704 
705 ext_free_fcs_vport:
706 	spin_lock_irqsave(&bfad->bfad_lock, flags);
707 	vport->comp_del = &fcomp;
708 	init_completion(vport->comp_del);
709 	bfa_fcs_vport_delete(&vport->fcs_vport);
710 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
711 	wait_for_completion(vport->comp_del);
712 ext_free_vport:
713 	kfree(vport);
714 ext:
715 	return rc;
716 }
717 
718 void
bfad_bfa_tmo(unsigned long data)719 bfad_bfa_tmo(unsigned long data)
720 {
721 	struct bfad_s	      *bfad = (struct bfad_s *) data;
722 	unsigned long	flags;
723 	struct list_head	       doneq;
724 
725 	spin_lock_irqsave(&bfad->bfad_lock, flags);
726 
727 	bfa_timer_beat(&bfad->bfa.timer_mod);
728 
729 	bfa_comp_deq(&bfad->bfa, &doneq);
730 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
731 
732 	if (!list_empty(&doneq)) {
733 		bfa_comp_process(&bfad->bfa, &doneq);
734 		spin_lock_irqsave(&bfad->bfad_lock, flags);
735 		bfa_comp_free(&bfad->bfa, &doneq);
736 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
737 	}
738 
739 	mod_timer(&bfad->hal_tmo,
740 		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
741 }
742 
743 void
bfad_init_timer(struct bfad_s * bfad)744 bfad_init_timer(struct bfad_s *bfad)
745 {
746 	init_timer(&bfad->hal_tmo);
747 	bfad->hal_tmo.function = bfad_bfa_tmo;
748 	bfad->hal_tmo.data = (unsigned long)bfad;
749 
750 	mod_timer(&bfad->hal_tmo,
751 		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
752 }
753 
754 int
bfad_pci_init(struct pci_dev * pdev,struct bfad_s * bfad)755 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
756 {
757 	int		rc = -ENODEV;
758 
759 	if (pci_enable_device(pdev)) {
760 		printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
761 		goto out;
762 	}
763 
764 	if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
765 		goto out_disable_device;
766 
767 	pci_set_master(pdev);
768 
769 
770 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
771 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
772 			printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
773 			goto out_release_region;
774 		}
775 
776 	bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
777 
778 	if (bfad->pci_bar0_kva == NULL) {
779 		printk(KERN_ERR "Fail to map bar0\n");
780 		goto out_release_region;
781 	}
782 
783 	bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
784 	bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
785 	bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
786 	bfad->hal_pcidev.device_id = pdev->device;
787 	bfad->pci_name = pci_name(pdev);
788 
789 	bfad->pci_attr.vendor_id = pdev->vendor;
790 	bfad->pci_attr.device_id = pdev->device;
791 	bfad->pci_attr.ssid = pdev->subsystem_device;
792 	bfad->pci_attr.ssvid = pdev->subsystem_vendor;
793 	bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
794 
795 	bfad->pcidev = pdev;
796 
797 	/* Adjust PCIe Maximum Read Request Size */
798 	if (pcie_max_read_reqsz > 0) {
799 		int pcie_cap_reg;
800 		u16 pcie_dev_ctl;
801 		u16 mask = 0xffff;
802 
803 		switch (pcie_max_read_reqsz) {
804 		case 128:
805 			mask = 0x0;
806 			break;
807 		case 256:
808 			mask = 0x1000;
809 			break;
810 		case 512:
811 			mask = 0x2000;
812 			break;
813 		case 1024:
814 			mask = 0x3000;
815 			break;
816 		case 2048:
817 			mask = 0x4000;
818 			break;
819 		case 4096:
820 			mask = 0x5000;
821 			break;
822 		default:
823 			break;
824 		}
825 
826 		pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
827 		if (mask != 0xffff && pcie_cap_reg) {
828 			pcie_cap_reg += 0x08;
829 			pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
830 			if ((pcie_dev_ctl & 0x7000) != mask) {
831 				printk(KERN_WARNING "BFA[%s]: "
832 				"pcie_max_read_request_size is %d, "
833 				"reset to %d\n", bfad->pci_name,
834 				(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
835 				pcie_max_read_reqsz);
836 
837 				pcie_dev_ctl &= ~0x7000;
838 				pci_write_config_word(pdev, pcie_cap_reg,
839 						pcie_dev_ctl | mask);
840 			}
841 		}
842 	}
843 
844 	return 0;
845 
846 out_release_region:
847 	pci_release_regions(pdev);
848 out_disable_device:
849 	pci_disable_device(pdev);
850 out:
851 	return rc;
852 }
853 
854 void
bfad_pci_uninit(struct pci_dev * pdev,struct bfad_s * bfad)855 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
856 {
857 	pci_iounmap(pdev, bfad->pci_bar0_kva);
858 	pci_release_regions(pdev);
859 	pci_disable_device(pdev);
860 	pci_set_drvdata(pdev, NULL);
861 }
862 
863 bfa_status_t
bfad_drv_init(struct bfad_s * bfad)864 bfad_drv_init(struct bfad_s *bfad)
865 {
866 	bfa_status_t	rc;
867 	unsigned long	flags;
868 
869 	bfad->cfg_data.rport_del_timeout = rport_del_timeout;
870 	bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
871 	bfad->cfg_data.io_max_sge = bfa_io_max_sge;
872 	bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
873 
874 	rc = bfad_hal_mem_alloc(bfad);
875 	if (rc != BFA_STATUS_OK) {
876 		printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
877 		       bfad->inst_no);
878 		printk(KERN_WARNING
879 			"Not enough memory to attach all Brocade HBA ports, %s",
880 			"System may need more memory.\n");
881 		goto out_hal_mem_alloc_failure;
882 	}
883 
884 	bfad->bfa.trcmod = bfad->trcmod;
885 	bfad->bfa.plog = &bfad->plog_buf;
886 	bfa_plog_init(&bfad->plog_buf);
887 	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
888 		     0, "Driver Attach");
889 
890 	bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
891 		   &bfad->hal_pcidev);
892 
893 	/* FCS INIT */
894 	spin_lock_irqsave(&bfad->bfad_lock, flags);
895 	bfad->bfa_fcs.trcmod = bfad->trcmod;
896 	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
897 	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
898 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
899 
900 	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
901 
902 	return BFA_STATUS_OK;
903 
904 out_hal_mem_alloc_failure:
905 	return BFA_STATUS_FAILED;
906 }
907 
908 void
bfad_drv_uninit(struct bfad_s * bfad)909 bfad_drv_uninit(struct bfad_s *bfad)
910 {
911 	unsigned long   flags;
912 
913 	spin_lock_irqsave(&bfad->bfad_lock, flags);
914 	init_completion(&bfad->comp);
915 	bfa_iocfc_stop(&bfad->bfa);
916 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
917 	wait_for_completion(&bfad->comp);
918 
919 	del_timer_sync(&bfad->hal_tmo);
920 	bfa_isr_disable(&bfad->bfa);
921 	bfa_detach(&bfad->bfa);
922 	bfad_remove_intr(bfad);
923 	bfad_hal_mem_release(bfad);
924 
925 	bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
926 }
927 
928 void
bfad_drv_start(struct bfad_s * bfad)929 bfad_drv_start(struct bfad_s *bfad)
930 {
931 	unsigned long	flags;
932 
933 	spin_lock_irqsave(&bfad->bfad_lock, flags);
934 	bfa_iocfc_start(&bfad->bfa);
935 	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
936 	bfad->bfad_flags |= BFAD_HAL_START_DONE;
937 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
938 
939 	if (bfad->im)
940 		flush_workqueue(bfad->im->drv_workq);
941 }
942 
943 void
bfad_fcs_stop(struct bfad_s * bfad)944 bfad_fcs_stop(struct bfad_s *bfad)
945 {
946 	unsigned long	flags;
947 
948 	spin_lock_irqsave(&bfad->bfad_lock, flags);
949 	init_completion(&bfad->comp);
950 	bfad->pport.flags |= BFAD_PORT_DELETE;
951 	bfa_fcs_exit(&bfad->bfa_fcs);
952 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
953 	wait_for_completion(&bfad->comp);
954 
955 	bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
956 }
957 
958 void
bfad_stop(struct bfad_s * bfad)959 bfad_stop(struct bfad_s *bfad)
960 {
961 	unsigned long	flags;
962 
963 	spin_lock_irqsave(&bfad->bfad_lock, flags);
964 	init_completion(&bfad->comp);
965 	bfa_iocfc_stop(&bfad->bfa);
966 	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
967 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
968 	wait_for_completion(&bfad->comp);
969 
970 	bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
971 }
972 
973 bfa_status_t
bfad_cfg_pport(struct bfad_s * bfad,enum bfa_lport_role role)974 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
975 {
976 	int		rc = BFA_STATUS_OK;
977 
978 	/* Allocate scsi_host for the physical port */
979 	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
980 	    (role & BFA_LPORT_ROLE_FCP_IM)) {
981 		if (bfad->pport.im_port == NULL) {
982 			rc = BFA_STATUS_FAILED;
983 			goto out;
984 		}
985 
986 		rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
987 						&bfad->pcidev->dev);
988 		if (rc != BFA_STATUS_OK)
989 			goto out;
990 
991 		bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
992 	}
993 
994 	/* Setup the debugfs node for this scsi_host */
995 	if (bfa_debugfs_enable)
996 		bfad_debugfs_init(&bfad->pport);
997 
998 	bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
999 
1000 out:
1001 	return rc;
1002 }
1003 
1004 void
bfad_uncfg_pport(struct bfad_s * bfad)1005 bfad_uncfg_pport(struct bfad_s *bfad)
1006 {
1007 	/* Remove the debugfs node for this scsi_host */
1008 	kfree(bfad->regdata);
1009 	bfad_debugfs_exit(&bfad->pport);
1010 
1011 	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1012 	    (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1013 		bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1014 		bfad_im_port_clean(bfad->pport.im_port);
1015 		kfree(bfad->pport.im_port);
1016 		bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1017 	}
1018 
1019 	bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1020 }
1021 
1022 bfa_status_t
bfad_start_ops(struct bfad_s * bfad)1023 bfad_start_ops(struct bfad_s *bfad) {
1024 
1025 	int	retval;
1026 	unsigned long	flags;
1027 	struct bfad_vport_s *vport, *vport_new;
1028 	struct bfa_fcs_driver_info_s driver_info;
1029 
1030 	/* Fill the driver_info info to fcs*/
1031 	memset(&driver_info, 0, sizeof(driver_info));
1032 	strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1033 		sizeof(driver_info.version) - 1);
1034 	if (host_name)
1035 		strncpy(driver_info.host_machine_name, host_name,
1036 			sizeof(driver_info.host_machine_name) - 1);
1037 	if (os_name)
1038 		strncpy(driver_info.host_os_name, os_name,
1039 			sizeof(driver_info.host_os_name) - 1);
1040 	if (os_patch)
1041 		strncpy(driver_info.host_os_patch, os_patch,
1042 			sizeof(driver_info.host_os_patch) - 1);
1043 
1044 	strncpy(driver_info.os_device_name, bfad->pci_name,
1045 		sizeof(driver_info.os_device_name - 1));
1046 
1047 	/* FCS INIT */
1048 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1049 	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1050 	bfa_fcs_init(&bfad->bfa_fcs);
1051 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1052 
1053 	retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1054 	if (retval != BFA_STATUS_OK) {
1055 		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1056 			bfa_sm_set_state(bfad, bfad_sm_failed);
1057 		bfad_stop(bfad);
1058 		return BFA_STATUS_FAILED;
1059 	}
1060 
1061 	/* BFAD level FC4 IM specific resource allocation */
1062 	retval = bfad_im_probe(bfad);
1063 	if (retval != BFA_STATUS_OK) {
1064 		printk(KERN_WARNING "bfad_im_probe failed\n");
1065 		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1066 			bfa_sm_set_state(bfad, bfad_sm_failed);
1067 		bfad_im_probe_undo(bfad);
1068 		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1069 		bfad_uncfg_pport(bfad);
1070 		bfad_stop(bfad);
1071 		return BFA_STATUS_FAILED;
1072 	} else
1073 		bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1074 
1075 	bfad_drv_start(bfad);
1076 
1077 	/* Complete pbc vport create */
1078 	list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1079 				list_entry) {
1080 		struct fc_vport_identifiers vid;
1081 		struct fc_vport *fc_vport;
1082 		char pwwn_buf[BFA_STRING_32];
1083 
1084 		memset(&vid, 0, sizeof(vid));
1085 		vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1086 		vid.vport_type = FC_PORTTYPE_NPIV;
1087 		vid.disable = false;
1088 		vid.node_name = wwn_to_u64((u8 *)
1089 				(&((vport->fcs_vport).lport.port_cfg.nwwn)));
1090 		vid.port_name = wwn_to_u64((u8 *)
1091 				(&((vport->fcs_vport).lport.port_cfg.pwwn)));
1092 		fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1093 		if (!fc_vport) {
1094 			wwn2str(pwwn_buf, vid.port_name);
1095 			printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1096 				" %s\n", bfad->inst_no, pwwn_buf);
1097 		}
1098 		list_del(&vport->list_entry);
1099 		kfree(vport);
1100 	}
1101 
1102 	/*
1103 	 * If bfa_linkup_delay is set to -1 default; try to retrive the
1104 	 * value using the bfad_get_linkup_delay(); else use the
1105 	 * passed in module param value as the bfa_linkup_delay.
1106 	 */
1107 	if (bfa_linkup_delay < 0) {
1108 		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1109 		bfad_rport_online_wait(bfad);
1110 		bfa_linkup_delay = -1;
1111 	} else
1112 		bfad_rport_online_wait(bfad);
1113 
1114 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1115 
1116 	return BFA_STATUS_OK;
1117 }
1118 
1119 int
bfad_worker(void * ptr)1120 bfad_worker(void *ptr)
1121 {
1122 	struct bfad_s *bfad;
1123 	unsigned long   flags;
1124 
1125 	bfad = (struct bfad_s *)ptr;
1126 
1127 	while (!kthread_should_stop()) {
1128 
1129 		/* Send event BFAD_E_INIT_SUCCESS */
1130 		bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1131 
1132 		spin_lock_irqsave(&bfad->bfad_lock, flags);
1133 		bfad->bfad_tsk = NULL;
1134 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1135 
1136 		break;
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  *  BFA driver interrupt functions
1144  */
1145 irqreturn_t
bfad_intx(int irq,void * dev_id)1146 bfad_intx(int irq, void *dev_id)
1147 {
1148 	struct bfad_s	*bfad = dev_id;
1149 	struct list_head	doneq;
1150 	unsigned long	flags;
1151 	bfa_boolean_t rc;
1152 
1153 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1154 	rc = bfa_intx(&bfad->bfa);
1155 	if (!rc) {
1156 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1157 		return IRQ_NONE;
1158 	}
1159 
1160 	bfa_comp_deq(&bfad->bfa, &doneq);
1161 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1162 
1163 	if (!list_empty(&doneq)) {
1164 		bfa_comp_process(&bfad->bfa, &doneq);
1165 
1166 		spin_lock_irqsave(&bfad->bfad_lock, flags);
1167 		bfa_comp_free(&bfad->bfa, &doneq);
1168 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1169 	}
1170 
1171 	return IRQ_HANDLED;
1172 
1173 }
1174 
1175 static irqreturn_t
bfad_msix(int irq,void * dev_id)1176 bfad_msix(int irq, void *dev_id)
1177 {
1178 	struct bfad_msix_s *vec = dev_id;
1179 	struct bfad_s *bfad = vec->bfad;
1180 	struct list_head doneq;
1181 	unsigned long   flags;
1182 
1183 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1184 
1185 	bfa_msix(&bfad->bfa, vec->msix.entry);
1186 	bfa_comp_deq(&bfad->bfa, &doneq);
1187 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1188 
1189 	if (!list_empty(&doneq)) {
1190 		bfa_comp_process(&bfad->bfa, &doneq);
1191 
1192 		spin_lock_irqsave(&bfad->bfad_lock, flags);
1193 		bfa_comp_free(&bfad->bfa, &doneq);
1194 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1195 	}
1196 
1197 	return IRQ_HANDLED;
1198 }
1199 
1200 /*
1201  * Initialize the MSIX entry table.
1202  */
1203 static void
bfad_init_msix_entry(struct bfad_s * bfad,struct msix_entry * msix_entries,int mask,int max_bit)1204 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1205 			 int mask, int max_bit)
1206 {
1207 	int	i;
1208 	int	match = 0x00000001;
1209 
1210 	for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1211 		if (mask & match) {
1212 			bfad->msix_tab[bfad->nvec].msix.entry = i;
1213 			bfad->msix_tab[bfad->nvec].bfad = bfad;
1214 			msix_entries[bfad->nvec].entry = i;
1215 			bfad->nvec++;
1216 		}
1217 
1218 		match <<= 1;
1219 	}
1220 
1221 }
1222 
1223 int
bfad_install_msix_handler(struct bfad_s * bfad)1224 bfad_install_msix_handler(struct bfad_s *bfad)
1225 {
1226 	int i, error = 0;
1227 
1228 	for (i = 0; i < bfad->nvec; i++) {
1229 		sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1230 				bfad->pci_name,
1231 				((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
1232 				msix_name_ct[i] : msix_name_cb[i]));
1233 
1234 		error = request_irq(bfad->msix_tab[i].msix.vector,
1235 				    (irq_handler_t) bfad_msix, 0,
1236 				    bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1237 		bfa_trc(bfad, i);
1238 		bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1239 		if (error) {
1240 			int	j;
1241 
1242 			for (j = 0; j < i; j++)
1243 				free_irq(bfad->msix_tab[j].msix.vector,
1244 						&bfad->msix_tab[j]);
1245 
1246 			return 1;
1247 		}
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 /*
1254  * Setup MSIX based interrupt.
1255  */
1256 int
bfad_setup_intr(struct bfad_s * bfad)1257 bfad_setup_intr(struct bfad_s *bfad)
1258 {
1259 	int error = 0;
1260 	u32 mask = 0, i, num_bit = 0, max_bit = 0;
1261 	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1262 	struct pci_dev *pdev = bfad->pcidev;
1263 
1264 	/* Call BFA to get the msix map for this PCI function.  */
1265 	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1266 
1267 	/* Set up the msix entry table */
1268 	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1269 
1270 	if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
1271 	    (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
1272 
1273 		error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1274 		if (error) {
1275 			/*
1276 			 * Only error number of vector is available.
1277 			 * We don't have a mechanism to map multiple
1278 			 * interrupts into one vector, so even if we
1279 			 * can try to request less vectors, we don't
1280 			 * know how to associate interrupt events to
1281 			 *  vectors. Linux doesn't duplicate vectors
1282 			 * in the MSIX table for this case.
1283 			 */
1284 
1285 			printk(KERN_WARNING "bfad%d: "
1286 				"pci_enable_msix failed (%d),"
1287 				" use line based.\n", bfad->inst_no, error);
1288 
1289 			goto line_based;
1290 		}
1291 
1292 		/* Save the vectors */
1293 		for (i = 0; i < bfad->nvec; i++) {
1294 			bfa_trc(bfad, msix_entries[i].vector);
1295 			bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1296 		}
1297 
1298 		bfa_msix_init(&bfad->bfa, bfad->nvec);
1299 
1300 		bfad->bfad_flags |= BFAD_MSIX_ON;
1301 
1302 		return error;
1303 	}
1304 
1305 line_based:
1306 	error = 0;
1307 	if (request_irq
1308 	    (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1309 	     BFAD_DRIVER_NAME, bfad) != 0) {
1310 		/* Enable interrupt handler failed */
1311 		return 1;
1312 	}
1313 
1314 	return error;
1315 }
1316 
1317 void
bfad_remove_intr(struct bfad_s * bfad)1318 bfad_remove_intr(struct bfad_s *bfad)
1319 {
1320 	int	i;
1321 
1322 	if (bfad->bfad_flags & BFAD_MSIX_ON) {
1323 		for (i = 0; i < bfad->nvec; i++)
1324 			free_irq(bfad->msix_tab[i].msix.vector,
1325 					&bfad->msix_tab[i]);
1326 
1327 		pci_disable_msix(bfad->pcidev);
1328 		bfad->bfad_flags &= ~BFAD_MSIX_ON;
1329 	} else {
1330 		free_irq(bfad->pcidev->irq, bfad);
1331 	}
1332 }
1333 
1334 /*
1335  * PCI probe entry.
1336  */
1337 int
bfad_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pid)1338 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1339 {
1340 	struct bfad_s	*bfad;
1341 	int		error = -ENODEV, retval;
1342 
1343 	/* For single port cards - only claim function 0 */
1344 	if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1345 		(PCI_FUNC(pdev->devfn) != 0))
1346 		return -ENODEV;
1347 
1348 	bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1349 	if (!bfad) {
1350 		error = -ENOMEM;
1351 		goto out;
1352 	}
1353 
1354 	bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1355 	if (!bfad->trcmod) {
1356 		printk(KERN_WARNING "Error alloc trace buffer!\n");
1357 		error = -ENOMEM;
1358 		goto out_alloc_trace_failure;
1359 	}
1360 
1361 	/* TRACE INIT */
1362 	bfa_trc_init(bfad->trcmod);
1363 	bfa_trc(bfad, bfad_inst);
1364 
1365 	if (!(bfad_load_fwimg(pdev))) {
1366 		kfree(bfad->trcmod);
1367 		goto out_alloc_trace_failure;
1368 	}
1369 
1370 	retval = bfad_pci_init(pdev, bfad);
1371 	if (retval) {
1372 		printk(KERN_WARNING "bfad_pci_init failure!\n");
1373 		error = retval;
1374 		goto out_pci_init_failure;
1375 	}
1376 
1377 	mutex_lock(&bfad_mutex);
1378 	bfad->inst_no = bfad_inst++;
1379 	list_add_tail(&bfad->list_entry, &bfad_list);
1380 	mutex_unlock(&bfad_mutex);
1381 
1382 	/* Initializing the state machine: State set to uninit */
1383 	bfa_sm_set_state(bfad, bfad_sm_uninit);
1384 
1385 	spin_lock_init(&bfad->bfad_lock);
1386 	pci_set_drvdata(pdev, bfad);
1387 
1388 	bfad->ref_count = 0;
1389 	bfad->pport.bfad = bfad;
1390 	INIT_LIST_HEAD(&bfad->pbc_vport_list);
1391 
1392 	retval = bfad_drv_init(bfad);
1393 	if (retval != BFA_STATUS_OK)
1394 		goto out_drv_init_failure;
1395 
1396 	bfa_sm_send_event(bfad, BFAD_E_CREATE);
1397 
1398 	if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1399 		goto out_bfad_sm_failure;
1400 
1401 	return 0;
1402 
1403 out_bfad_sm_failure:
1404 	bfa_detach(&bfad->bfa);
1405 	bfad_hal_mem_release(bfad);
1406 out_drv_init_failure:
1407 	mutex_lock(&bfad_mutex);
1408 	bfad_inst--;
1409 	list_del(&bfad->list_entry);
1410 	mutex_unlock(&bfad_mutex);
1411 	bfad_pci_uninit(pdev, bfad);
1412 out_pci_init_failure:
1413 	kfree(bfad->trcmod);
1414 out_alloc_trace_failure:
1415 	kfree(bfad);
1416 out:
1417 	return error;
1418 }
1419 
1420 /*
1421  * PCI remove entry.
1422  */
1423 void
bfad_pci_remove(struct pci_dev * pdev)1424 bfad_pci_remove(struct pci_dev *pdev)
1425 {
1426 	struct bfad_s	      *bfad = pci_get_drvdata(pdev);
1427 	unsigned long	flags;
1428 
1429 	bfa_trc(bfad, bfad->inst_no);
1430 
1431 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1432 	if (bfad->bfad_tsk != NULL) {
1433 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1434 		kthread_stop(bfad->bfad_tsk);
1435 	} else {
1436 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1437 	}
1438 
1439 	/* Send Event BFAD_E_STOP */
1440 	bfa_sm_send_event(bfad, BFAD_E_STOP);
1441 
1442 	/* Driver detach and dealloc mem */
1443 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1444 	bfa_detach(&bfad->bfa);
1445 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1446 	bfad_hal_mem_release(bfad);
1447 
1448 	/* Cleaning the BFAD instance */
1449 	mutex_lock(&bfad_mutex);
1450 	bfad_inst--;
1451 	list_del(&bfad->list_entry);
1452 	mutex_unlock(&bfad_mutex);
1453 	bfad_pci_uninit(pdev, bfad);
1454 
1455 	kfree(bfad->trcmod);
1456 	kfree(bfad);
1457 }
1458 
1459 struct pci_device_id bfad_id_table[] = {
1460 	{
1461 		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1462 		.device = BFA_PCI_DEVICE_ID_FC_8G2P,
1463 		.subvendor = PCI_ANY_ID,
1464 		.subdevice = PCI_ANY_ID,
1465 	},
1466 	{
1467 		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1468 		.device = BFA_PCI_DEVICE_ID_FC_8G1P,
1469 		.subvendor = PCI_ANY_ID,
1470 		.subdevice = PCI_ANY_ID,
1471 	},
1472 	{
1473 		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1474 		.device = BFA_PCI_DEVICE_ID_CT,
1475 		.subvendor = PCI_ANY_ID,
1476 		.subdevice = PCI_ANY_ID,
1477 		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1478 		.class_mask = ~0,
1479 	},
1480 	{
1481 		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1482 		.device = BFA_PCI_DEVICE_ID_CT_FC,
1483 		.subvendor = PCI_ANY_ID,
1484 		.subdevice = PCI_ANY_ID,
1485 		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1486 		.class_mask = ~0,
1487 	},
1488 
1489 	{0, 0},
1490 };
1491 
1492 MODULE_DEVICE_TABLE(pci, bfad_id_table);
1493 
1494 static struct pci_driver bfad_pci_driver = {
1495 	.name = BFAD_DRIVER_NAME,
1496 	.id_table = bfad_id_table,
1497 	.probe = bfad_pci_probe,
1498 	.remove = __devexit_p(bfad_pci_remove),
1499 };
1500 
1501 /*
1502  * Driver module init.
1503  */
1504 static int __init
bfad_init(void)1505 bfad_init(void)
1506 {
1507 	int		error = 0;
1508 
1509 	printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1510 			BFAD_DRIVER_VERSION);
1511 
1512 	if (num_sgpgs > 0)
1513 		num_sgpgs_parm = num_sgpgs;
1514 
1515 	error = bfad_im_module_init();
1516 	if (error) {
1517 		error = -ENOMEM;
1518 		printk(KERN_WARNING "bfad_im_module_init failure\n");
1519 		goto ext;
1520 	}
1521 
1522 	if (strcmp(FCPI_NAME, " fcpim") == 0)
1523 		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1524 
1525 	bfa_auto_recover = ioc_auto_recover;
1526 	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1527 
1528 	error = pci_register_driver(&bfad_pci_driver);
1529 	if (error) {
1530 		printk(KERN_WARNING "pci_register_driver failure\n");
1531 		goto ext;
1532 	}
1533 
1534 	return 0;
1535 
1536 ext:
1537 	bfad_im_module_exit();
1538 	return error;
1539 }
1540 
1541 /*
1542  * Driver module exit.
1543  */
1544 static void __exit
bfad_exit(void)1545 bfad_exit(void)
1546 {
1547 	pci_unregister_driver(&bfad_pci_driver);
1548 	bfad_im_module_exit();
1549 	bfad_free_fwimg();
1550 }
1551 
1552 /* Firmware handling */
1553 u32 *
bfad_read_firmware(struct pci_dev * pdev,u32 ** bfi_image,u32 * bfi_image_size,char * fw_name)1554 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1555 		u32 *bfi_image_size, char *fw_name)
1556 {
1557 	const struct firmware *fw;
1558 
1559 	if (request_firmware(&fw, fw_name, &pdev->dev)) {
1560 		printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1561 		goto error;
1562 	}
1563 
1564 	*bfi_image = vmalloc(fw->size);
1565 	if (NULL == *bfi_image) {
1566 		printk(KERN_ALERT "Fail to allocate buffer for fw image "
1567 			"size=%x!\n", (u32) fw->size);
1568 		goto error;
1569 	}
1570 
1571 	memcpy(*bfi_image, fw->data, fw->size);
1572 	*bfi_image_size = fw->size/sizeof(u32);
1573 
1574 	return *bfi_image;
1575 
1576 error:
1577 	return NULL;
1578 }
1579 
1580 u32 *
bfad_get_firmware_buf(struct pci_dev * pdev)1581 bfad_get_firmware_buf(struct pci_dev *pdev)
1582 {
1583 	if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1584 		if (bfi_image_ct_fc_size == 0)
1585 			bfad_read_firmware(pdev, &bfi_image_ct_fc,
1586 				&bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
1587 		return bfi_image_ct_fc;
1588 	} else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
1589 		if (bfi_image_ct_cna_size == 0)
1590 			bfad_read_firmware(pdev, &bfi_image_ct_cna,
1591 				&bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
1592 		return bfi_image_ct_cna;
1593 	} else {
1594 		if (bfi_image_cb_fc_size == 0)
1595 			bfad_read_firmware(pdev, &bfi_image_cb_fc,
1596 				&bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
1597 		return bfi_image_cb_fc;
1598 	}
1599 }
1600 
1601 module_init(bfad_init);
1602 module_exit(bfad_exit);
1603 MODULE_LICENSE("GPL");
1604 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1605 MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1606 MODULE_VERSION(BFAD_DRIVER_VERSION);
1607