1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20 #include "bfi_ctreg.h"
21
22 BFA_TRC_FILE(HAL, CORE);
23
24 /*
25 * BFA module list terminated by NULL
26 */
27 static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
34 &hal_mod_fcpim,
35 NULL
36 };
37
38 /*
39 * Message handlers for various modules.
40 */
41 static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itnim_isr, /* BFI_MC_ITNIM */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74 };
75 /*
76 * Message handlers for mailbox command classes
77 */
78 static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87 };
88
89
90
91 static void
bfa_com_port_attach(struct bfa_s * bfa,struct bfa_meminfo_s * mi)92 bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93 {
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109 }
110
111 /*
112 * BFA IOC FC related definitions
113 */
114
115 /*
116 * IOC local definitions
117 */
118 #define BFA_IOCFC_TOV 5000 /* msecs */
119
120 enum {
121 BFA_IOCFC_ACT_NONE = 0,
122 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3,
125 };
126
127 #define DEF_CFG_NUM_FABRICS 1
128 #define DEF_CFG_NUM_LPORTS 256
129 #define DEF_CFG_NUM_CQS 4
130 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131 #define DEF_CFG_NUM_TSKIM_REQS 128
132 #define DEF_CFG_NUM_FCXP_REQS 64
133 #define DEF_CFG_NUM_UF_BUFS 64
134 #define DEF_CFG_NUM_RPORTS 1024
135 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136 #define DEF_CFG_NUM_TINS 256
137
138 #define DEF_CFG_NUM_SGPGS 2048
139 #define DEF_CFG_NUM_REQQ_ELEMS 256
140 #define DEF_CFG_NUM_RSPQ_ELEMS 64
141 #define DEF_CFG_NUM_SBOOT_TGTS 16
142 #define DEF_CFG_NUM_SBOOT_LUNS 16
143
144 /*
145 * forward declaration for IOC FC functions
146 */
147 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
148 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
149 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
150 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
151 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
152
153 /*
154 * BFA Interrupt handling functions
155 */
156 static void
bfa_reqq_resume(struct bfa_s * bfa,int qid)157 bfa_reqq_resume(struct bfa_s *bfa, int qid)
158 {
159 struct list_head *waitq, *qe, *qen;
160 struct bfa_reqq_wait_s *wqe;
161
162 waitq = bfa_reqq(bfa, qid);
163 list_for_each_safe(qe, qen, waitq) {
164 /*
165 * Callback only as long as there is room in request queue
166 */
167 if (bfa_reqq_full(bfa, qid))
168 break;
169
170 list_del(qe);
171 wqe = (struct bfa_reqq_wait_s *) qe;
172 wqe->qresume(wqe->cbarg);
173 }
174 }
175
176 void
bfa_msix_all(struct bfa_s * bfa,int vec)177 bfa_msix_all(struct bfa_s *bfa, int vec)
178 {
179 bfa_intx(bfa);
180 }
181
182 bfa_boolean_t
bfa_intx(struct bfa_s * bfa)183 bfa_intx(struct bfa_s *bfa)
184 {
185 u32 intr, qintr;
186 int queue;
187
188 intr = readl(bfa->iocfc.bfa_regs.intr_status);
189 if (!intr)
190 return BFA_FALSE;
191
192 /*
193 * RME completion queue interrupt
194 */
195 qintr = intr & __HFN_INT_RME_MASK;
196 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
197
198 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
199 if (intr & (__HFN_INT_RME_Q0 << queue))
200 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
201 }
202 intr &= ~qintr;
203 if (!intr)
204 return BFA_TRUE;
205
206 /*
207 * CPE completion queue interrupt
208 */
209 qintr = intr & __HFN_INT_CPE_MASK;
210 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
211
212 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
213 if (intr & (__HFN_INT_CPE_Q0 << queue))
214 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
215 }
216 intr &= ~qintr;
217 if (!intr)
218 return BFA_TRUE;
219
220 bfa_msix_lpu_err(bfa, intr);
221
222 return BFA_TRUE;
223 }
224
225 void
bfa_isr_enable(struct bfa_s * bfa)226 bfa_isr_enable(struct bfa_s *bfa)
227 {
228 u32 intr_unmask;
229 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
230
231 bfa_trc(bfa, pci_func);
232
233 bfa_msix_install(bfa);
234 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
235 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
236 __HFN_INT_LL_HALT);
237
238 if (pci_func == 0)
239 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
240 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
241 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
242 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
243 __HFN_INT_MBOX_LPU0);
244 else
245 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
246 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
247 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
248 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
249 __HFN_INT_MBOX_LPU1);
250
251 writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
252 writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
253 bfa->iocfc.intr_mask = ~intr_unmask;
254 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
255 }
256
257 void
bfa_isr_disable(struct bfa_s * bfa)258 bfa_isr_disable(struct bfa_s *bfa)
259 {
260 bfa_isr_mode_set(bfa, BFA_FALSE);
261 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
262 bfa_msix_uninstall(bfa);
263 }
264
265 void
bfa_msix_reqq(struct bfa_s * bfa,int qid)266 bfa_msix_reqq(struct bfa_s *bfa, int qid)
267 {
268 struct list_head *waitq;
269
270 qid &= (BFI_IOC_MAX_CQS - 1);
271
272 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
273
274 /*
275 * Resume any pending requests in the corresponding reqq.
276 */
277 waitq = bfa_reqq(bfa, qid);
278 if (!list_empty(waitq))
279 bfa_reqq_resume(bfa, qid);
280 }
281
282 void
bfa_isr_unhandled(struct bfa_s * bfa,struct bfi_msg_s * m)283 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
284 {
285 bfa_trc(bfa, m->mhdr.msg_class);
286 bfa_trc(bfa, m->mhdr.msg_id);
287 bfa_trc(bfa, m->mhdr.mtag.i2htok);
288 WARN_ON(1);
289 bfa_trc_stop(bfa->trcmod);
290 }
291
292 void
bfa_msix_rspq(struct bfa_s * bfa,int qid)293 bfa_msix_rspq(struct bfa_s *bfa, int qid)
294 {
295 struct bfi_msg_s *m;
296 u32 pi, ci;
297 struct list_head *waitq;
298
299 qid &= (BFI_IOC_MAX_CQS - 1);
300
301 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
302
303 ci = bfa_rspq_ci(bfa, qid);
304 pi = bfa_rspq_pi(bfa, qid);
305
306 if (bfa->rme_process) {
307 while (ci != pi) {
308 m = bfa_rspq_elem(bfa, qid, ci);
309 bfa_isrs[m->mhdr.msg_class] (bfa, m);
310 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
311 }
312 }
313
314 /*
315 * update CI
316 */
317 bfa_rspq_ci(bfa, qid) = pi;
318 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
319 mmiowb();
320
321 /*
322 * Resume any pending requests in the corresponding reqq.
323 */
324 waitq = bfa_reqq(bfa, qid);
325 if (!list_empty(waitq))
326 bfa_reqq_resume(bfa, qid);
327 }
328
329 void
bfa_msix_lpu_err(struct bfa_s * bfa,int vec)330 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
331 {
332 u32 intr, curr_value;
333
334 intr = readl(bfa->iocfc.bfa_regs.intr_status);
335
336 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
337 bfa_ioc_mbox_isr(&bfa->ioc);
338
339 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
340 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
341
342 if (intr) {
343 if (intr & __HFN_INT_LL_HALT) {
344 /*
345 * If LL_HALT bit is set then FW Init Halt LL Port
346 * Register needs to be cleared as well so Interrupt
347 * Status Register will be cleared.
348 */
349 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
350 curr_value &= ~__FW_INIT_HALT_P;
351 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
352 }
353
354 if (intr & __HFN_INT_ERR_PSS) {
355 /*
356 * ERR_PSS bit needs to be cleared as well in case
357 * interrups are shared so driver's interrupt handler is
358 * still called even though it is already masked out.
359 */
360 curr_value = readl(
361 bfa->ioc.ioc_regs.pss_err_status_reg);
362 curr_value &= __PSS_ERR_STATUS_SET;
363 writel(curr_value,
364 bfa->ioc.ioc_regs.pss_err_status_reg);
365 }
366
367 writel(intr, bfa->iocfc.bfa_regs.intr_status);
368 bfa_ioc_error_isr(&bfa->ioc);
369 }
370 }
371
372 /*
373 * BFA IOC FC related functions
374 */
375
376 /*
377 * BFA IOC private functions
378 */
379
380 static void
bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s * cfg,u32 * dm_len)381 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
382 {
383 int i, per_reqq_sz, per_rspq_sz;
384
385 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
386 BFA_DMA_ALIGN_SZ);
387 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
388 BFA_DMA_ALIGN_SZ);
389
390 /*
391 * Calculate CQ size
392 */
393 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
394 *dm_len = *dm_len + per_reqq_sz;
395 *dm_len = *dm_len + per_rspq_sz;
396 }
397
398 /*
399 * Calculate Shadow CI/PI size
400 */
401 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
402 *dm_len += (2 * BFA_CACHELINE_SZ);
403 }
404
405 static void
bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s * cfg,u32 * dm_len)406 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
407 {
408 *dm_len +=
409 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
410 *dm_len +=
411 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
412 BFA_CACHELINE_SZ);
413 }
414
415 /*
416 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
417 */
418 static void
bfa_iocfc_send_cfg(void * bfa_arg)419 bfa_iocfc_send_cfg(void *bfa_arg)
420 {
421 struct bfa_s *bfa = bfa_arg;
422 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
423 struct bfi_iocfc_cfg_req_s cfg_req;
424 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
425 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
426 int i;
427
428 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
429 bfa_trc(bfa, cfg->fwcfg.num_cqs);
430
431 bfa_iocfc_reset_queues(bfa);
432
433 /*
434 * initialize IOC configuration info
435 */
436 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
437 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
438
439 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
440 /*
441 * dma map REQ and RSP circular queues and shadow pointers
442 */
443 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
444 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
445 iocfc->req_cq_ba[i].pa);
446 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
447 iocfc->req_cq_shadow_ci[i].pa);
448 cfg_info->req_cq_elems[i] =
449 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
450
451 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
452 iocfc->rsp_cq_ba[i].pa);
453 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
454 iocfc->rsp_cq_shadow_pi[i].pa);
455 cfg_info->rsp_cq_elems[i] =
456 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
457 }
458
459 /*
460 * Enable interrupt coalescing if it is driver init path
461 * and not ioc disable/enable path.
462 */
463 if (!iocfc->cfgdone)
464 cfg_info->intr_attr.coalesce = BFA_TRUE;
465
466 iocfc->cfgdone = BFA_FALSE;
467
468 /*
469 * dma map IOC configuration itself
470 */
471 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
472 bfa_lpuid(bfa));
473 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
474
475 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
476 sizeof(struct bfi_iocfc_cfg_req_s));
477 }
478
479 static void
bfa_iocfc_init_mem(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)480 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
481 struct bfa_pcidev_s *pcidev)
482 {
483 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
484
485 bfa->bfad = bfad;
486 iocfc->bfa = bfa;
487 iocfc->action = BFA_IOCFC_ACT_NONE;
488
489 iocfc->cfg = *cfg;
490
491 /*
492 * Initialize chip specific handlers.
493 */
494 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
495 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
496 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
497 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
498 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
499 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
500 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
501 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
502 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
503 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
504 } else {
505 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
506 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
507 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
508 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
509 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
510 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
511 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
512 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
513 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
514 }
515
516 iocfc->hwif.hw_reginit(bfa);
517 bfa->msix.nvecs = 0;
518 }
519
520 static void
bfa_iocfc_mem_claim(struct bfa_s * bfa,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo)521 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
522 struct bfa_meminfo_s *meminfo)
523 {
524 u8 *dm_kva;
525 u64 dm_pa;
526 int i, per_reqq_sz, per_rspq_sz;
527 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
528 int dbgsz;
529
530 dm_kva = bfa_meminfo_dma_virt(meminfo);
531 dm_pa = bfa_meminfo_dma_phys(meminfo);
532
533 /*
534 * First allocate dma memory for IOC.
535 */
536 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
537 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
538 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
539
540 /*
541 * Claim DMA-able memory for the request/response queues and for shadow
542 * ci/pi registers
543 */
544 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
545 BFA_DMA_ALIGN_SZ);
546 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
547 BFA_DMA_ALIGN_SZ);
548
549 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
550 iocfc->req_cq_ba[i].kva = dm_kva;
551 iocfc->req_cq_ba[i].pa = dm_pa;
552 memset(dm_kva, 0, per_reqq_sz);
553 dm_kva += per_reqq_sz;
554 dm_pa += per_reqq_sz;
555
556 iocfc->rsp_cq_ba[i].kva = dm_kva;
557 iocfc->rsp_cq_ba[i].pa = dm_pa;
558 memset(dm_kva, 0, per_rspq_sz);
559 dm_kva += per_rspq_sz;
560 dm_pa += per_rspq_sz;
561 }
562
563 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
564 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
565 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
566 dm_kva += BFA_CACHELINE_SZ;
567 dm_pa += BFA_CACHELINE_SZ;
568
569 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
570 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
571 dm_kva += BFA_CACHELINE_SZ;
572 dm_pa += BFA_CACHELINE_SZ;
573 }
574
575 /*
576 * Claim DMA-able memory for the config info page
577 */
578 bfa->iocfc.cfg_info.kva = dm_kva;
579 bfa->iocfc.cfg_info.pa = dm_pa;
580 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
581 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
582 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
583
584 /*
585 * Claim DMA-able memory for the config response
586 */
587 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
588 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
589 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
590
591 dm_kva +=
592 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
593 BFA_CACHELINE_SZ);
594 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
595 BFA_CACHELINE_SZ);
596
597
598 bfa_meminfo_dma_virt(meminfo) = dm_kva;
599 bfa_meminfo_dma_phys(meminfo) = dm_pa;
600
601 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
602 if (dbgsz > 0) {
603 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
604 bfa_meminfo_kva(meminfo) += dbgsz;
605 }
606 }
607
608 /*
609 * Start BFA submodules.
610 */
611 static void
bfa_iocfc_start_submod(struct bfa_s * bfa)612 bfa_iocfc_start_submod(struct bfa_s *bfa)
613 {
614 int i;
615
616 bfa->rme_process = BFA_TRUE;
617
618 for (i = 0; hal_mods[i]; i++)
619 hal_mods[i]->start(bfa);
620 }
621
622 /*
623 * Disable BFA submodules.
624 */
625 static void
bfa_iocfc_disable_submod(struct bfa_s * bfa)626 bfa_iocfc_disable_submod(struct bfa_s *bfa)
627 {
628 int i;
629
630 for (i = 0; hal_mods[i]; i++)
631 hal_mods[i]->iocdisable(bfa);
632 }
633
634 static void
bfa_iocfc_init_cb(void * bfa_arg,bfa_boolean_t complete)635 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
636 {
637 struct bfa_s *bfa = bfa_arg;
638
639 if (complete) {
640 if (bfa->iocfc.cfgdone)
641 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
642 else
643 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
644 } else {
645 if (bfa->iocfc.cfgdone)
646 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
647 }
648 }
649
650 static void
bfa_iocfc_stop_cb(void * bfa_arg,bfa_boolean_t compl)651 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
652 {
653 struct bfa_s *bfa = bfa_arg;
654 struct bfad_s *bfad = bfa->bfad;
655
656 if (compl)
657 complete(&bfad->comp);
658 else
659 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
660 }
661
662 static void
bfa_iocfc_disable_cb(void * bfa_arg,bfa_boolean_t compl)663 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
664 {
665 struct bfa_s *bfa = bfa_arg;
666 struct bfad_s *bfad = bfa->bfad;
667
668 if (compl)
669 complete(&bfad->disable_comp);
670 }
671
672 /*
673 * Update BFA configuration from firmware configuration.
674 */
675 static void
bfa_iocfc_cfgrsp(struct bfa_s * bfa)676 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
677 {
678 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
679 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
680 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
681
682 fwcfg->num_cqs = fwcfg->num_cqs;
683 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
684 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
685 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
686 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
687 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
688
689 iocfc->cfgdone = BFA_TRUE;
690
691 /*
692 * Configuration is complete - initialize/start submodules
693 */
694 bfa_fcport_init(bfa);
695
696 if (iocfc->action == BFA_IOCFC_ACT_INIT)
697 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
698 else
699 bfa_iocfc_start_submod(bfa);
700 }
701 void
bfa_iocfc_reset_queues(struct bfa_s * bfa)702 bfa_iocfc_reset_queues(struct bfa_s *bfa)
703 {
704 int q;
705
706 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
707 bfa_reqq_ci(bfa, q) = 0;
708 bfa_reqq_pi(bfa, q) = 0;
709 bfa_rspq_ci(bfa, q) = 0;
710 bfa_rspq_pi(bfa, q) = 0;
711 }
712 }
713
714 /*
715 * IOC enable request is complete
716 */
717 static void
bfa_iocfc_enable_cbfn(void * bfa_arg,enum bfa_status status)718 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
719 {
720 struct bfa_s *bfa = bfa_arg;
721
722 if (status != BFA_STATUS_OK) {
723 bfa_isr_disable(bfa);
724 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
725 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
726 bfa_iocfc_init_cb, bfa);
727 return;
728 }
729
730 bfa_iocfc_send_cfg(bfa);
731 }
732
733 /*
734 * IOC disable request is complete
735 */
736 static void
bfa_iocfc_disable_cbfn(void * bfa_arg)737 bfa_iocfc_disable_cbfn(void *bfa_arg)
738 {
739 struct bfa_s *bfa = bfa_arg;
740
741 bfa_isr_disable(bfa);
742 bfa_iocfc_disable_submod(bfa);
743
744 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
745 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
746 bfa);
747 else {
748 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
749 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
750 bfa);
751 }
752 }
753
754 /*
755 * Notify sub-modules of hardware failure.
756 */
757 static void
bfa_iocfc_hbfail_cbfn(void * bfa_arg)758 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
759 {
760 struct bfa_s *bfa = bfa_arg;
761
762 bfa->rme_process = BFA_FALSE;
763
764 bfa_isr_disable(bfa);
765 bfa_iocfc_disable_submod(bfa);
766
767 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
768 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
769 bfa);
770 }
771
772 /*
773 * Actions on chip-reset completion.
774 */
775 static void
bfa_iocfc_reset_cbfn(void * bfa_arg)776 bfa_iocfc_reset_cbfn(void *bfa_arg)
777 {
778 struct bfa_s *bfa = bfa_arg;
779
780 bfa_iocfc_reset_queues(bfa);
781 bfa_isr_enable(bfa);
782 }
783
784
785 /*
786 * Query IOC memory requirement information.
787 */
788 void
bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * km_len,u32 * dm_len)789 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
790 u32 *dm_len)
791 {
792 /* dma memory for IOC */
793 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
794
795 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
796 bfa_iocfc_cqs_sz(cfg, dm_len);
797 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
798 }
799
800 /*
801 * Query IOC memory requirement information.
802 */
803 void
bfa_iocfc_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)804 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
805 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
806 {
807 int i;
808 struct bfa_ioc_s *ioc = &bfa->ioc;
809
810 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
811 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
812 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
813 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
814
815 ioc->trcmod = bfa->trcmod;
816 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
817
818 /*
819 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
820 */
821 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
822 bfa_ioc_set_fcmode(&bfa->ioc);
823
824 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
825 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
826
827 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
828 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
829 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
830
831 INIT_LIST_HEAD(&bfa->comp_q);
832 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
833 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
834 }
835
836 /*
837 * Query IOC memory requirement information.
838 */
839 void
bfa_iocfc_init(struct bfa_s * bfa)840 bfa_iocfc_init(struct bfa_s *bfa)
841 {
842 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
843 bfa_ioc_enable(&bfa->ioc);
844 }
845
846 /*
847 * IOC start called from bfa_start(). Called to start IOC operations
848 * at driver instantiation for this instance.
849 */
850 void
bfa_iocfc_start(struct bfa_s * bfa)851 bfa_iocfc_start(struct bfa_s *bfa)
852 {
853 if (bfa->iocfc.cfgdone)
854 bfa_iocfc_start_submod(bfa);
855 }
856
857 /*
858 * IOC stop called from bfa_stop(). Called only when driver is unloaded
859 * for this instance.
860 */
861 void
bfa_iocfc_stop(struct bfa_s * bfa)862 bfa_iocfc_stop(struct bfa_s *bfa)
863 {
864 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
865
866 bfa->rme_process = BFA_FALSE;
867 bfa_ioc_disable(&bfa->ioc);
868 }
869
870 void
bfa_iocfc_isr(void * bfaarg,struct bfi_mbmsg_s * m)871 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
872 {
873 struct bfa_s *bfa = bfaarg;
874 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
875 union bfi_iocfc_i2h_msg_u *msg;
876
877 msg = (union bfi_iocfc_i2h_msg_u *) m;
878 bfa_trc(bfa, msg->mh.msg_id);
879
880 switch (msg->mh.msg_id) {
881 case BFI_IOCFC_I2H_CFG_REPLY:
882 iocfc->cfg_reply = &msg->cfg_reply;
883 bfa_iocfc_cfgrsp(bfa);
884 break;
885 case BFI_IOCFC_I2H_UPDATEQ_RSP:
886 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
887 break;
888 default:
889 WARN_ON(1);
890 }
891 }
892
893 void
bfa_iocfc_get_attr(struct bfa_s * bfa,struct bfa_iocfc_attr_s * attr)894 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
895 {
896 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
897
898 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
899
900 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
901 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
902 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
903
904 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
905 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
906 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
907
908 attr->config = iocfc->cfg;
909 }
910
911 bfa_status_t
bfa_iocfc_israttr_set(struct bfa_s * bfa,struct bfa_iocfc_intr_attr_s * attr)912 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
913 {
914 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
915 struct bfi_iocfc_set_intr_req_s *m;
916
917 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
918 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
919 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
920
921 if (!bfa_iocfc_is_operational(bfa))
922 return BFA_STATUS_OK;
923
924 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
925 if (!m)
926 return BFA_STATUS_DEVBUSY;
927
928 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
929 bfa_lpuid(bfa));
930 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
931 m->delay = iocfc->cfginfo->intr_attr.delay;
932 m->latency = iocfc->cfginfo->intr_attr.latency;
933
934 bfa_trc(bfa, attr->delay);
935 bfa_trc(bfa, attr->latency);
936
937 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
938 return BFA_STATUS_OK;
939 }
940
941 void
bfa_iocfc_set_snsbase(struct bfa_s * bfa,u64 snsbase_pa)942 bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
943 {
944 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
945
946 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
947 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
948 }
949 /*
950 * Enable IOC after it is disabled.
951 */
952 void
bfa_iocfc_enable(struct bfa_s * bfa)953 bfa_iocfc_enable(struct bfa_s *bfa)
954 {
955 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
956 "IOC Enable");
957 bfa_ioc_enable(&bfa->ioc);
958 }
959
960 void
bfa_iocfc_disable(struct bfa_s * bfa)961 bfa_iocfc_disable(struct bfa_s *bfa)
962 {
963 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
964 "IOC Disable");
965 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
966
967 bfa->rme_process = BFA_FALSE;
968 bfa_ioc_disable(&bfa->ioc);
969 }
970
971
972 bfa_boolean_t
bfa_iocfc_is_operational(struct bfa_s * bfa)973 bfa_iocfc_is_operational(struct bfa_s *bfa)
974 {
975 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
976 }
977
978 /*
979 * Return boot target port wwns -- read from boot information in flash.
980 */
981 void
bfa_iocfc_get_bootwwns(struct bfa_s * bfa,u8 * nwwns,wwn_t * wwns)982 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
983 {
984 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
985 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
986 int i;
987
988 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
989 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
990 *nwwns = cfgrsp->pbc_cfg.nbluns;
991 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
992 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
993
994 return;
995 }
996
997 *nwwns = cfgrsp->bootwwns.nwwns;
998 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
999 }
1000
1001 int
bfa_iocfc_get_pbc_vports(struct bfa_s * bfa,struct bfi_pbc_vport_s * pbc_vport)1002 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1003 {
1004 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1005 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1006
1007 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1008 return cfgrsp->pbc_cfg.nvports;
1009 }
1010
1011
1012 /*
1013 * Use this function query the memory requirement of the BFA library.
1014 * This function needs to be called before bfa_attach() to get the
1015 * memory required of the BFA layer for a given driver configuration.
1016 *
1017 * This call will fail, if the cap is out of range compared to pre-defined
1018 * values within the BFA library
1019 *
1020 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1021 * its configuration in this structure.
1022 * The default values for struct bfa_iocfc_cfg_s can be
1023 * fetched using bfa_cfg_get_default() API.
1024 *
1025 * If cap's boundary check fails, the library will use
1026 * the default bfa_cap_t values (and log a warning msg).
1027 *
1028 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1029 * indicates the memory type (see bfa_mem_type_t) and
1030 * amount of memory required.
1031 *
1032 * Driver should allocate the memory, populate the
1033 * starting address for each block and provide the same
1034 * structure as input parameter to bfa_attach() call.
1035 *
1036 * @return void
1037 *
1038 * Special Considerations: @note
1039 */
1040 void
bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo)1041 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1042 {
1043 int i;
1044 u32 km_len = 0, dm_len = 0;
1045
1046 WARN_ON((cfg == NULL) || (meminfo == NULL));
1047
1048 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1049 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1050 BFA_MEM_TYPE_KVA;
1051 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1052 BFA_MEM_TYPE_DMA;
1053
1054 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1055
1056 for (i = 0; hal_mods[i]; i++)
1057 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1058
1059 dm_len += bfa_port_meminfo();
1060
1061 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1062 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1063 }
1064
1065 /*
1066 * Use this function to do attach the driver instance with the BFA
1067 * library. This function will not trigger any HW initialization
1068 * process (which will be done in bfa_init() call)
1069 *
1070 * This call will fail, if the cap is out of range compared to
1071 * pre-defined values within the BFA library
1072 *
1073 * @param[out] bfa Pointer to bfa_t.
1074 * @param[in] bfad Opaque handle back to the driver's IOC structure
1075 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1076 * that was used in bfa_cfg_get_meminfo().
1077 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1078 * use the bfa_cfg_get_meminfo() call to
1079 * find the memory blocks required, allocate the
1080 * required memory and provide the starting addresses.
1081 * @param[in] pcidev pointer to struct bfa_pcidev_s
1082 *
1083 * @return
1084 * void
1085 *
1086 * Special Considerations:
1087 *
1088 * @note
1089 *
1090 */
1091 void
bfa_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)1092 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1093 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1094 {
1095 int i;
1096 struct bfa_mem_elem_s *melem;
1097
1098 bfa->fcs = BFA_FALSE;
1099
1100 WARN_ON((cfg == NULL) || (meminfo == NULL));
1101
1102 /*
1103 * initialize all memory pointers for iterative allocation
1104 */
1105 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1106 melem = meminfo->meminfo + i;
1107 melem->kva_curp = melem->kva;
1108 melem->dma_curp = melem->dma;
1109 }
1110
1111 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1112
1113 for (i = 0; hal_mods[i]; i++)
1114 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1115
1116 bfa_com_port_attach(bfa, meminfo);
1117 }
1118
1119 /*
1120 * Use this function to delete a BFA IOC. IOC should be stopped (by
1121 * calling bfa_stop()) before this function call.
1122 *
1123 * @param[in] bfa - pointer to bfa_t.
1124 *
1125 * @return
1126 * void
1127 *
1128 * Special Considerations:
1129 *
1130 * @note
1131 */
1132 void
bfa_detach(struct bfa_s * bfa)1133 bfa_detach(struct bfa_s *bfa)
1134 {
1135 int i;
1136
1137 for (i = 0; hal_mods[i]; i++)
1138 hal_mods[i]->detach(bfa);
1139 bfa_ioc_detach(&bfa->ioc);
1140 }
1141
1142 void
bfa_comp_deq(struct bfa_s * bfa,struct list_head * comp_q)1143 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1144 {
1145 INIT_LIST_HEAD(comp_q);
1146 list_splice_tail_init(&bfa->comp_q, comp_q);
1147 }
1148
1149 void
bfa_comp_process(struct bfa_s * bfa,struct list_head * comp_q)1150 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1151 {
1152 struct list_head *qe;
1153 struct list_head *qen;
1154 struct bfa_cb_qe_s *hcb_qe;
1155
1156 list_for_each_safe(qe, qen, comp_q) {
1157 hcb_qe = (struct bfa_cb_qe_s *) qe;
1158 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1159 }
1160 }
1161
1162 void
bfa_comp_free(struct bfa_s * bfa,struct list_head * comp_q)1163 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1164 {
1165 struct list_head *qe;
1166 struct bfa_cb_qe_s *hcb_qe;
1167
1168 while (!list_empty(comp_q)) {
1169 bfa_q_deq(comp_q, &qe);
1170 hcb_qe = (struct bfa_cb_qe_s *) qe;
1171 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1172 }
1173 }
1174
1175
1176 /*
1177 * Return the list of PCI vendor/device id lists supported by this
1178 * BFA instance.
1179 */
1180 void
bfa_get_pciids(struct bfa_pciid_s ** pciids,int * npciids)1181 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1182 {
1183 static struct bfa_pciid_s __pciids[] = {
1184 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1185 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1186 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1187 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1188 };
1189
1190 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1191 *pciids = __pciids;
1192 }
1193
1194 /*
1195 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1196 * into BFA layer). The OS driver can then turn back and overwrite entries that
1197 * have been configured by the user.
1198 *
1199 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1200 *
1201 * @return
1202 * void
1203 *
1204 * Special Considerations:
1205 * note
1206 */
1207 void
bfa_cfg_get_default(struct bfa_iocfc_cfg_s * cfg)1208 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1209 {
1210 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1211 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1212 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1213 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1214 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1215 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1216 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1217 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1218
1219 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1220 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1221 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1222 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1223 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1224 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1225 cfg->drvcfg.ioc_recover = BFA_FALSE;
1226 cfg->drvcfg.delay_comp = BFA_FALSE;
1227
1228 }
1229
1230 void
bfa_cfg_get_min(struct bfa_iocfc_cfg_s * cfg)1231 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1232 {
1233 bfa_cfg_get_default(cfg);
1234 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1235 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1236 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1237 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1238 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1239
1240 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1241 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1242 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1243 cfg->drvcfg.min_cfg = BFA_TRUE;
1244 }
1245