1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 /*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18 #include "bna.h"
19 #include "bfa_sm.h"
20 #include "bfi.h"
21
22 /**
23 * IB
24 */
25 #define bna_ib_find_free_ibidx(_mask, _pos)\
26 do {\
27 (_pos) = 0;\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
30 (_pos)++;\
31 } while (0)
32
33 #define bna_ib_count_ibidx(_mask, _count)\
34 do {\
35 int pos = 0;\
36 (_count) = 0;\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
39 (_count) = pos + 1;\
40 pos++;\
41 } \
42 } while (0)
43
44 #define bna_ib_select_segpool(_count, _q_idx)\
45 do {\
46 int i;\
47 (_q_idx) = -1;\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
50 (_q_idx) = i;\
51 break;\
52 } \
53 } \
54 } while (0)
55
56 struct bna_ibidx_pool {
57 int pool_size;
58 int pool_entry_size;
59 };
60 init_ibidx_pool(ibidx_pool);
61
62 static struct bna_intr *
bna_intr_get(struct bna_ib_mod * ib_mod,enum bna_intr_type intr_type,int vector)63 bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
64 int vector)
65 {
66 struct bna_intr *intr;
67 struct list_head *qe;
68
69 list_for_each(qe, &ib_mod->intr_active_q) {
70 intr = (struct bna_intr *)qe;
71
72 if ((intr->intr_type == intr_type) &&
73 (intr->vector == vector)) {
74 intr->ref_count++;
75 return intr;
76 }
77 }
78
79 if (list_empty(&ib_mod->intr_free_q))
80 return NULL;
81
82 bfa_q_deq(&ib_mod->intr_free_q, &intr);
83 bfa_q_qe_init(&intr->qe);
84
85 intr->ref_count = 1;
86 intr->intr_type = intr_type;
87 intr->vector = vector;
88
89 list_add_tail(&intr->qe, &ib_mod->intr_active_q);
90
91 return intr;
92 }
93
94 static void
bna_intr_put(struct bna_ib_mod * ib_mod,struct bna_intr * intr)95 bna_intr_put(struct bna_ib_mod *ib_mod,
96 struct bna_intr *intr)
97 {
98 intr->ref_count--;
99
100 if (intr->ref_count == 0) {
101 intr->ib = NULL;
102 list_del(&intr->qe);
103 bfa_q_qe_init(&intr->qe);
104 list_add_tail(&intr->qe, &ib_mod->intr_free_q);
105 }
106 }
107
108 void
bna_ib_mod_init(struct bna_ib_mod * ib_mod,struct bna * bna,struct bna_res_info * res_info)109 bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
110 struct bna_res_info *res_info)
111 {
112 int i;
113 int j;
114 int count;
115 u8 offset;
116 struct bna_doorbell_qset *qset;
117 unsigned long off;
118
119 ib_mod->bna = bna;
120
121 ib_mod->ib = (struct bna_ib *)
122 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
123 ib_mod->intr = (struct bna_intr *)
124 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
125 ib_mod->idx_seg = (struct bna_ibidx_seg *)
126 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
127
128 INIT_LIST_HEAD(&ib_mod->ib_free_q);
129 INIT_LIST_HEAD(&ib_mod->intr_free_q);
130 INIT_LIST_HEAD(&ib_mod->intr_active_q);
131
132 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
133 INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
134
135 for (i = 0; i < BFI_MAX_IB; i++) {
136 ib_mod->ib[i].ib_id = i;
137
138 ib_mod->ib[i].ib_seg_host_addr_kva =
139 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
140 ib_mod->ib[i].ib_seg_host_addr.lsb =
141 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
142 ib_mod->ib[i].ib_seg_host_addr.msb =
143 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
144
145 qset = (struct bna_doorbell_qset *)0;
146 off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
147 * (0x20 >> 2)]);
148 ib_mod->ib[i].door_bell.doorbell_addr = off +
149 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
150
151 bfa_q_qe_init(&ib_mod->ib[i].qe);
152 list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
153
154 bfa_q_qe_init(&ib_mod->intr[i].qe);
155 list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
156 }
157
158 count = 0;
159 offset = 0;
160 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
161 for (j = 0; j < ibidx_pool[i].pool_size; j++) {
162 bfa_q_qe_init(&ib_mod->idx_seg[count]);
163 ib_mod->idx_seg[count].ib_seg_size =
164 ibidx_pool[i].pool_entry_size;
165 ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
166 list_add_tail(&ib_mod->idx_seg[count].qe,
167 &ib_mod->ibidx_seg_pool[i]);
168 count++;
169 offset += ibidx_pool[i].pool_entry_size;
170 }
171 }
172 }
173
174 void
bna_ib_mod_uninit(struct bna_ib_mod * ib_mod)175 bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
176 {
177 int i;
178 int j;
179 struct list_head *qe;
180
181 i = 0;
182 list_for_each(qe, &ib_mod->ib_free_q)
183 i++;
184
185 i = 0;
186 list_for_each(qe, &ib_mod->intr_free_q)
187 i++;
188
189 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
190 j = 0;
191 list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
192 j++;
193 }
194
195 ib_mod->bna = NULL;
196 }
197
198 static struct bna_ib *
bna_ib_get(struct bna_ib_mod * ib_mod,enum bna_intr_type intr_type,int vector)199 bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type,
201 int vector)
202 {
203 struct bna_ib *ib;
204 struct bna_intr *intr;
205
206 if (intr_type == BNA_INTR_T_INTX)
207 vector = (1 << vector);
208
209 intr = bna_intr_get(ib_mod, intr_type, vector);
210 if (intr == NULL)
211 return NULL;
212
213 if (intr->ib) {
214 if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
215 bna_intr_put(ib_mod, intr);
216 return NULL;
217 }
218 intr->ib->ref_count++;
219 return intr->ib;
220 }
221
222 if (list_empty(&ib_mod->ib_free_q)) {
223 bna_intr_put(ib_mod, intr);
224 return NULL;
225 }
226
227 bfa_q_deq(&ib_mod->ib_free_q, &ib);
228 bfa_q_qe_init(&ib->qe);
229
230 ib->ref_count = 1;
231 ib->start_count = 0;
232 ib->idx_mask = 0;
233
234 ib->intr = intr;
235 ib->idx_seg = NULL;
236 intr->ib = ib;
237
238 ib->bna = ib_mod->bna;
239
240 return ib;
241 }
242
243 static void
bna_ib_put(struct bna_ib_mod * ib_mod,struct bna_ib * ib)244 bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245 {
246 bna_intr_put(ib_mod, ib->intr);
247
248 ib->ref_count--;
249
250 if (ib->ref_count == 0) {
251 ib->intr = NULL;
252 ib->bna = NULL;
253 list_add_tail(&ib->qe, &ib_mod->ib_free_q);
254 }
255 }
256
257 /* Returns index offset - starting from 0 */
258 static int
bna_ib_reserve_idx(struct bna_ib * ib)259 bna_ib_reserve_idx(struct bna_ib *ib)
260 {
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
262 struct bna_ibidx_seg *idx_seg;
263 int idx;
264 int num_idx;
265 int q_idx;
266
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib->idx_mask, idx);
269 if (idx == BFI_IBIDX_MAX_SEGSIZE)
270 return -1;
271
272 /*
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
275 */
276 bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
277
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
280 ib->idx_mask |= (1 << idx);
281 return idx;
282 }
283
284 if (ib->start_count)
285 return -1;
286
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx, q_idx);
289 while (1) {
290 if (q_idx == BFI_IBIDX_TOTAL_POOLS)
291 return -1;
292 if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
293 break;
294 q_idx++;
295 }
296 bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
297 bfa_q_qe_init(&idx_seg->qe);
298
299 /* Free the old segment */
300 if (ib->idx_seg) {
301 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
302 list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
303 }
304
305 ib->idx_seg = idx_seg;
306
307 ib->idx_mask |= (1 << idx);
308
309 return idx;
310 }
311
312 static void
bna_ib_release_idx(struct bna_ib * ib,int idx)313 bna_ib_release_idx(struct bna_ib *ib, int idx)
314 {
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
316 struct bna_ibidx_seg *idx_seg;
317 int num_idx;
318 int cur_q_idx;
319 int new_q_idx;
320
321 ib->idx_mask &= ~(1 << idx);
322
323 if (ib->start_count)
324 return;
325
326 bna_ib_count_ibidx(ib->idx_mask, num_idx);
327
328 /*
329 * Free the segment, if there are no more indexes in the segment
330 * held by this IB
331 */
332 if (!num_idx) {
333 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
334 list_add_tail(&ib->idx_seg->qe,
335 &ib_mod->ibidx_seg_pool[cur_q_idx]);
336 ib->idx_seg = NULL;
337 return;
338 }
339
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx, new_q_idx);
342 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
343 while (new_q_idx < cur_q_idx) {
344 if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
345 break;
346 new_q_idx++;
347 }
348 if (new_q_idx < cur_q_idx) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
351 bfa_q_qe_init(&idx_seg->qe);
352 /* Free the old segment */
353 list_add_tail(&ib->idx_seg->qe,
354 &ib_mod->ibidx_seg_pool[cur_q_idx]);
355 ib->idx_seg = idx_seg;
356 }
357 }
358
359 static int
bna_ib_config(struct bna_ib * ib,struct bna_ib_config * ib_config)360 bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361 {
362 if (ib->start_count)
363 return -1;
364
365 ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
366 ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
367 ib->ib_config.interpkt_count = ib_config->interpkt_count;
368 ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
369
370 ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
371 if (ib->intr->intr_type == BNA_INTR_T_MSIX)
372 ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
373
374 return 0;
375 }
376
377 static void
bna_ib_start(struct bna_ib * ib)378 bna_ib_start(struct bna_ib *ib)
379 {
380 struct bna_ib_blk_mem ib_cfg;
381 struct bna_ib_blk_mem *ib_mem;
382 u32 pg_num;
383 u32 intx_mask;
384 int i;
385 void __iomem *base_addr;
386 unsigned long off;
387
388 ib->start_count++;
389
390 if (ib->start_count > 1)
391 return;
392
393 ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
394 ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
395
396 ib_cfg.clsc_n_ctrl_n_msix = (((u32)
397 ib->ib_config.coalescing_timeo << 16) |
398 ((u32)ib->ib_config.ctrl_flags << 8) |
399 (ib->intr->vector));
400 ib_cfg.ipkt_n_ent_n_idxof =
401 ((u32)
402 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
403 ((u32)ib->idx_seg->ib_seg_size << 8) |
404 (ib->idx_seg->ib_idx_tbl_offset);
405 ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
406 ib->ib_config.interpkt_count << 24);
407
408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
409 HQM_IB_RAM_BASE_OFFSET);
410 writel(pg_num, ib->bna->regs.page_addr);
411
412 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
413 HQM_IB_RAM_BASE_OFFSET);
414
415 ib_mem = (struct bna_ib_blk_mem *)0;
416 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
417 writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
418
419 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
420 writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
421
422 off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
423 writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
424
425 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
426 writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
427
428 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
429 writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
430
431 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
432 (u32)ib->ib_config.coalescing_timeo, 0);
433
434 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
435 HQM_INDX_TBL_RAM_BASE_OFFSET);
436 writel(pg_num, ib->bna->regs.page_addr);
437
438 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
439 HQM_INDX_TBL_RAM_BASE_OFFSET);
440 for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
441 off = (unsigned long)
442 ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
443 writel(0, base_addr + off);
444 }
445
446 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
447 bna_intx_disable(ib->bna, intx_mask);
448 intx_mask &= ~(ib->intr->vector);
449 bna_intx_enable(ib->bna, intx_mask);
450 }
451 }
452
453 static void
bna_ib_stop(struct bna_ib * ib)454 bna_ib_stop(struct bna_ib *ib)
455 {
456 u32 intx_mask;
457
458 ib->start_count--;
459
460 if (ib->start_count == 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE,
462 ib->door_bell.doorbell_addr);
463 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
464 bna_intx_disable(ib->bna, intx_mask);
465 intx_mask |= (ib->intr->vector);
466 bna_intx_enable(ib->bna, intx_mask);
467 }
468 }
469 }
470
471 static void
bna_ib_fail(struct bna_ib * ib)472 bna_ib_fail(struct bna_ib *ib)
473 {
474 ib->start_count = 0;
475 }
476
477 /**
478 * RXF
479 */
480 static void rxf_enable(struct bna_rxf *rxf);
481 static void rxf_disable(struct bna_rxf *rxf);
482 static void __rxf_config_set(struct bna_rxf *rxf);
483 static void __rxf_rit_set(struct bna_rxf *rxf);
484 static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
485 static int rxf_process_packet_filter(struct bna_rxf *rxf);
486 static int rxf_clear_packet_filter(struct bna_rxf *rxf);
487 static void rxf_reset_packet_filter(struct bna_rxf *rxf);
488 static void rxf_cb_enabled(void *arg, int status);
489 static void rxf_cb_disabled(void *arg, int status);
490 static void bna_rxf_cb_stats_cleared(void *arg, int status);
491 static void __rxf_enable(struct bna_rxf *rxf);
492 static void __rxf_disable(struct bna_rxf *rxf);
493
494 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
495 enum bna_rxf_event);
496 bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
497 enum bna_rxf_event);
498 bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
499 enum bna_rxf_event);
500 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
501 enum bna_rxf_event);
502 bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
503 enum bna_rxf_event);
504 bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
505 enum bna_rxf_event);
506 bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
507 enum bna_rxf_event);
508 bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
509 enum bna_rxf_event);
510 bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
511 enum bna_rxf_event);
512
513 static struct bfa_sm_table rxf_sm_table[] = {
514 {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
515 {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
517 {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
519 {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
520 {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
521 {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
523 };
524
525 static void
bna_rxf_sm_stopped_entry(struct bna_rxf * rxf)526 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
527 {
528 call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
529 }
530
531 static void
bna_rxf_sm_stopped(struct bna_rxf * rxf,enum bna_rxf_event event)532 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
533 {
534 switch (event) {
535 case RXF_E_START:
536 bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
537 break;
538
539 case RXF_E_STOP:
540 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
541 break;
542
543 case RXF_E_FAIL:
544 /* No-op */
545 break;
546
547 case RXF_E_CAM_FLTR_MOD:
548 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
549 break;
550
551 case RXF_E_STARTED:
552 case RXF_E_STOPPED:
553 case RXF_E_CAM_FLTR_RESP:
554 /**
555 * These events are received due to flushing of mbox
556 * when device fails
557 */
558 /* No-op */
559 break;
560
561 case RXF_E_PAUSE:
562 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
563 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
564 break;
565
566 case RXF_E_RESUME:
567 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
568 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
569 break;
570
571 default:
572 bfa_sm_fault(rxf->rx->bna, event);
573 }
574 }
575
576 static void
bna_rxf_sm_start_wait_entry(struct bna_rxf * rxf)577 bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
578 {
579 __rxf_config_set(rxf);
580 __rxf_rit_set(rxf);
581 rxf_enable(rxf);
582 }
583
584 static void
bna_rxf_sm_start_wait(struct bna_rxf * rxf,enum bna_rxf_event event)585 bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
586 {
587 switch (event) {
588 case RXF_E_STOP:
589 /**
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
592 */
593 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
594 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
595 break;
596
597 case RXF_E_FAIL:
598 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
599 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
600 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
601 break;
602
603 case RXF_E_CAM_FLTR_MOD:
604 /* No-op */
605 break;
606
607 case RXF_E_STARTED:
608 /**
609 * Force rxf_process_filter() to go through initial
610 * config
611 */
612 if ((rxf->ucast_active_mac != NULL) &&
613 (rxf->ucast_pending_set == 0))
614 rxf->ucast_pending_set = 1;
615
616 if (rxf->rss_status == BNA_STATUS_T_ENABLED)
617 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
618
619 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
620
621 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
622 break;
623
624 case RXF_E_PAUSE:
625 case RXF_E_RESUME:
626 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
627 break;
628
629 default:
630 bfa_sm_fault(rxf->rx->bna, event);
631 }
632 }
633
634 static void
bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf * rxf)635 bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
636 {
637 if (!rxf_process_packet_filter(rxf)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
640 }
641 }
642
643 static void
bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf * rxf,enum bna_rxf_event event)644 bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
645 {
646 switch (event) {
647 case RXF_E_STOP:
648 /**
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
651 */
652 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
653 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
654 break;
655
656 case RXF_E_FAIL:
657 rxf_reset_packet_filter(rxf);
658 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
659 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
660 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
661 break;
662
663 case RXF_E_CAM_FLTR_MOD:
664 /* No-op */
665 break;
666
667 case RXF_E_CAM_FLTR_RESP:
668 if (!rxf_process_packet_filter(rxf)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
671 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
672 }
673 break;
674
675 case RXF_E_PAUSE:
676 case RXF_E_RESUME:
677 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
678 break;
679
680 default:
681 bfa_sm_fault(rxf->rx->bna, event);
682 }
683 }
684
685 static void
bna_rxf_sm_started_entry(struct bna_rxf * rxf)686 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
687 {
688 call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
689
690 if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
691 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
692 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
693 else
694 bfa_fsm_send_event(rxf, RXF_E_RESUME);
695 }
696
697 }
698
699 static void
bna_rxf_sm_started(struct bna_rxf * rxf,enum bna_rxf_event event)700 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
701 {
702 switch (event) {
703 case RXF_E_STOP:
704 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
707 break;
708
709 case RXF_E_FAIL:
710 rxf_reset_packet_filter(rxf);
711 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
712 break;
713
714 case RXF_E_CAM_FLTR_MOD:
715 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
716 break;
717
718 case RXF_E_PAUSE:
719 bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
720 break;
721
722 case RXF_E_RESUME:
723 bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
724 break;
725
726 default:
727 bfa_sm_fault(rxf->rx->bna, event);
728 }
729 }
730
731 static void
bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf * rxf)732 bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
733 {
734 /**
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
738 */
739 }
740
741 static void
bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf * rxf,enum bna_rxf_event event)742 bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
743 {
744 switch (event) {
745 case RXF_E_FAIL:
746 /**
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
750 */
751 rxf_reset_packet_filter(rxf);
752 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
753 break;
754
755 case RXF_E_CAM_FLTR_RESP:
756 if (!rxf_clear_packet_filter(rxf)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
759 rxf_disable(rxf);
760 }
761 break;
762
763 default:
764 bfa_sm_fault(rxf->rx->bna, event);
765 }
766 }
767
768 static void
bna_rxf_sm_stop_wait_entry(struct bna_rxf * rxf)769 bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
770 {
771 /**
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
775 */
776 }
777
778 static void
bna_rxf_sm_stop_wait(struct bna_rxf * rxf,enum bna_rxf_event event)779 bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
780 {
781 switch (event) {
782 case RXF_E_FAIL:
783 /**
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
787 */
788 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
789 break;
790
791 case RXF_E_STARTED:
792 /**
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
795 * RXF_E_STOP event
796 */
797 rxf_disable(rxf);
798 break;
799
800 case RXF_E_STOPPED:
801 /**
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
805 */
806 bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
807 break;
808
809 case RXF_E_PAUSE:
810 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
811 break;
812
813 case RXF_E_RESUME:
814 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
815 break;
816
817 default:
818 bfa_sm_fault(rxf->rx->bna, event);
819 }
820 }
821
822 static void
bna_rxf_sm_pause_wait_entry(struct bna_rxf * rxf)823 bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
824 {
825 rxf->rxf_flags &=
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
827 __rxf_disable(rxf);
828 }
829
830 static void
bna_rxf_sm_pause_wait(struct bna_rxf * rxf,enum bna_rxf_event event)831 bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
832 {
833 switch (event) {
834 case RXF_E_FAIL:
835 /**
836 * FSM was in the process of disabling rxf, initiated by
837 * bnad.
838 */
839 call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
840 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
841 break;
842
843 case RXF_E_STOPPED:
844 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
845 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
846 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
847 break;
848
849 /*
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
852 */
853 default:
854 bfa_sm_fault(rxf->rx->bna, event);
855 }
856 }
857
858 static void
bna_rxf_sm_resume_wait_entry(struct bna_rxf * rxf)859 bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
860 {
861 rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
862 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
863 __rxf_enable(rxf);
864 }
865
866 static void
bna_rxf_sm_resume_wait(struct bna_rxf * rxf,enum bna_rxf_event event)867 bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
868 {
869 switch (event) {
870 case RXF_E_FAIL:
871 /**
872 * FSM was in the process of disabling rxf, initiated by
873 * bnad.
874 */
875 call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
876 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
877 break;
878
879 case RXF_E_STARTED:
880 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
881 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
882 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
883 break;
884
885 /*
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
888 */
889 default:
890 bfa_sm_fault(rxf->rx->bna, event);
891 }
892 }
893
894 static void
bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf * rxf)895 bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
896 {
897 __bna_rxf_stat_clr(rxf);
898 }
899
900 static void
bna_rxf_sm_stat_clr_wait(struct bna_rxf * rxf,enum bna_rxf_event event)901 bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
902 {
903 switch (event) {
904 case RXF_E_FAIL:
905 case RXF_E_STAT_CLEARED:
906 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
907 break;
908
909 default:
910 bfa_sm_fault(rxf->rx->bna, event);
911 }
912 }
913
914 static void
__rxf_enable(struct bna_rxf * rxf)915 __rxf_enable(struct bna_rxf *rxf)
916 {
917 struct bfi_ll_rxf_multi_req ll_req;
918 u32 bm[2] = {0, 0};
919
920 if (rxf->rxf_id < 32)
921 bm[0] = 1 << rxf->rxf_id;
922 else
923 bm[1] = 1 << (rxf->rxf_id - 32);
924
925 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
926 ll_req.rxf_id_mask[0] = htonl(bm[0]);
927 ll_req.rxf_id_mask[1] = htonl(bm[1]);
928 ll_req.enable = 1;
929
930 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
931 rxf_cb_enabled, rxf);
932
933 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
934 }
935
936 static void
__rxf_disable(struct bna_rxf * rxf)937 __rxf_disable(struct bna_rxf *rxf)
938 {
939 struct bfi_ll_rxf_multi_req ll_req;
940 u32 bm[2] = {0, 0};
941
942 if (rxf->rxf_id < 32)
943 bm[0] = 1 << rxf->rxf_id;
944 else
945 bm[1] = 1 << (rxf->rxf_id - 32);
946
947 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
948 ll_req.rxf_id_mask[0] = htonl(bm[0]);
949 ll_req.rxf_id_mask[1] = htonl(bm[1]);
950 ll_req.enable = 0;
951
952 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
953 rxf_cb_disabled, rxf);
954
955 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
956 }
957
958 static void
__rxf_config_set(struct bna_rxf * rxf)959 __rxf_config_set(struct bna_rxf *rxf)
960 {
961 u32 i;
962 struct bna_rss_mem *rss_mem;
963 struct bna_rx_fndb_ram *rx_fndb_ram;
964 struct bna *bna = rxf->rx->bna;
965 void __iomem *base_addr;
966 unsigned long off;
967
968 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
969 RSS_TABLE_BASE_OFFSET);
970
971 rss_mem = (struct bna_rss_mem *)0;
972
973 /* Configure RSS if required */
974 if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
977 bna->port_num, RSS_TABLE_BASE_OFFSET),
978 bna->regs.page_addr);
979
980 /* temporarily disable RSS, while hash value is written */
981 off = (unsigned long)&rss_mem[0].type_n_hash;
982 writel(0, base_addr + off);
983
984 for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
985 off = (unsigned long)
986 &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
987 writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
988 base_addr + off);
989 }
990
991 off = (unsigned long)&rss_mem[0].type_n_hash;
992 writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
993 base_addr + off);
994 }
995
996 /* Configure RxF */
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
999 RX_FNDB_RAM_BASE_OFFSET),
1000 bna->regs.page_addr);
1001
1002 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003 RX_FNDB_RAM_BASE_OFFSET);
1004
1005 rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1006
1007 /* We always use RSS table 0 */
1008 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009 writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1010 base_addr + off);
1011
1012 /* small large buffer enable/disable */
1013 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014 writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1015 base_addr + off);
1016
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019 writel((rxf->rit_segment->rit_offset << 16) |
1020 (rxf->forced_offset << 8) |
1021 (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1022 base_addr + off);
1023
1024 /*
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1027 */
1028
1029 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030 writel(((u32)rxf->default_vlan_tag << 16) |
1031 (rxf->ctrl_flags &
1032 (BNA_RXF_CF_DEFAULT_VLAN |
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034 BNA_RXF_CF_VLAN_STRIP)) |
1035 (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036 rxf->hds_cfg.header_size,
1037 base_addr + off);
1038 }
1039
1040 void
__rxf_vlan_filter_set(struct bna_rxf * rxf,enum bna_status status)1041 __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1042 {
1043 struct bna *bna = rxf->rx->bna;
1044 int i;
1045
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047 (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048 bna->regs.page_addr);
1049
1050 if (status == BNA_STATUS_T_ENABLED) {
1051 /* enable VLAN filtering on this function */
1052 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053 writel(rxf->vlan_filter_table[i],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1056 i * 32));
1057 }
1058 } else {
1059 /* disable VLAN filtering on this function */
1060 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1061 writel(0xffffffff,
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1064 i * 32));
1065 }
1066 }
1067 }
1068
1069 static void
__rxf_rit_set(struct bna_rxf * rxf)1070 __rxf_rit_set(struct bna_rxf *rxf)
1071 {
1072 struct bna *bna = rxf->rx->bna;
1073 struct bna_rit_mem *rit_mem;
1074 int i;
1075 void __iomem *base_addr;
1076 unsigned long off;
1077
1078 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079 FUNCTION_TO_RXQ_TRANSLATE);
1080
1081 rit_mem = (struct bna_rit_mem *)0;
1082
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084 FUNCTION_TO_RXQ_TRANSLATE),
1085 bna->regs.page_addr);
1086
1087 for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088 off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089 writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090 rxf->rit_segment->rit[i].small_rxq_id,
1091 base_addr + off);
1092 }
1093 }
1094
1095 static void
__bna_rxf_stat_clr(struct bna_rxf * rxf)1096 __bna_rxf_stat_clr(struct bna_rxf *rxf)
1097 {
1098 struct bfi_ll_stats_req ll_req;
1099 u32 bm[2] = {0, 0};
1100
1101 if (rxf->rxf_id < 32)
1102 bm[0] = 1 << rxf->rxf_id;
1103 else
1104 bm[1] = 1 << (rxf->rxf_id - 32);
1105
1106 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107 ll_req.stats_mask = 0;
1108 ll_req.txf_id_mask[0] = 0;
1109 ll_req.txf_id_mask[1] = 0;
1110
1111 ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112 ll_req.rxf_id_mask[1] = htonl(bm[1]);
1113
1114 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115 bna_rxf_cb_stats_cleared, rxf);
1116 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1117 }
1118
1119 static void
rxf_enable(struct bna_rxf * rxf)1120 rxf_enable(struct bna_rxf *rxf)
1121 {
1122 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1124 else {
1125 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1126 __rxf_enable(rxf);
1127 }
1128 }
1129
1130 static void
rxf_cb_enabled(void * arg,int status)1131 rxf_cb_enabled(void *arg, int status)
1132 {
1133 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1134
1135 bfa_q_qe_init(&rxf->mbox_qe.qe);
1136 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1137 }
1138
1139 static void
rxf_disable(struct bna_rxf * rxf)1140 rxf_disable(struct bna_rxf *rxf)
1141 {
1142 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1144 else
1145 rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1146 __rxf_disable(rxf);
1147 }
1148
1149 static void
rxf_cb_disabled(void * arg,int status)1150 rxf_cb_disabled(void *arg, int status)
1151 {
1152 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1153
1154 bfa_q_qe_init(&rxf->mbox_qe.qe);
1155 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1156 }
1157
1158 void
rxf_cb_cam_fltr_mbox_cmd(void * arg,int status)1159 rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1160 {
1161 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1162
1163 bfa_q_qe_init(&rxf->mbox_qe.qe);
1164
1165 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1166 }
1167
1168 static void
bna_rxf_cb_stats_cleared(void * arg,int status)1169 bna_rxf_cb_stats_cleared(void *arg, int status)
1170 {
1171 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1172
1173 bfa_q_qe_init(&rxf->mbox_qe.qe);
1174 bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1175 }
1176
1177 void
rxf_cam_mbox_cmd(struct bna_rxf * rxf,u8 cmd,const struct bna_mac * mac_addr)1178 rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179 const struct bna_mac *mac_addr)
1180 {
1181 struct bfi_ll_mac_addr_req req;
1182
1183 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1184
1185 req.rxf_id = rxf->rxf_id;
1186 memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1187
1188 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189 rxf_cb_cam_fltr_mbox_cmd, rxf);
1190
1191 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1192 }
1193
1194 static int
rxf_process_packet_filter_mcast(struct bna_rxf * rxf)1195 rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1196 {
1197 struct bna_mac *mac = NULL;
1198 struct list_head *qe;
1199
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf->mcast_pending_add_q)) {
1202 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1203 bfa_q_qe_init(qe);
1204 mac = (struct bna_mac *)qe;
1205 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1207 return 1;
1208 }
1209
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf->mcast_pending_del_q)) {
1212 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1213 bfa_q_qe_init(qe);
1214 mac = (struct bna_mac *)qe;
1215 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1217 return 1;
1218 }
1219
1220 return 0;
1221 }
1222
1223 static int
rxf_process_packet_filter_vlan(struct bna_rxf * rxf)1224 rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1225 {
1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
1230 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1231 }
1232
1233 /* Apply RSS configuration */
1234 if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1235 rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1236 if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1237 /* RSS is being disabled */
1238 rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1239 __rxf_rit_set(rxf);
1240 __rxf_config_set(rxf);
1241 } else {
1242 /* RSS is being enabled or reconfigured */
1243 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1244 __rxf_rit_set(rxf);
1245 __rxf_config_set(rxf);
1246 }
1247 }
1248
1249 return 0;
1250 }
1251
1252 /**
1253 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1254 * command. Also processes pending filter configuration - promiscuous mode,
1255 * default mode, allmutli mode and issues mailbox command or directly applies
1256 * to h/w
1257 */
1258 static int
rxf_process_packet_filter(struct bna_rxf * rxf)1259 rxf_process_packet_filter(struct bna_rxf *rxf)
1260 {
1261 /* Set the default MAC first */
1262 if (rxf->ucast_pending_set > 0) {
1263 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1264 rxf->ucast_active_mac);
1265 rxf->ucast_pending_set--;
1266 return 1;
1267 }
1268
1269 if (rxf_process_packet_filter_ucast(rxf))
1270 return 1;
1271
1272 if (rxf_process_packet_filter_mcast(rxf))
1273 return 1;
1274
1275 if (rxf_process_packet_filter_promisc(rxf))
1276 return 1;
1277
1278 if (rxf_process_packet_filter_allmulti(rxf))
1279 return 1;
1280
1281 if (rxf_process_packet_filter_vlan(rxf))
1282 return 1;
1283
1284 return 0;
1285 }
1286
1287 static int
rxf_clear_packet_filter_mcast(struct bna_rxf * rxf)1288 rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1289 {
1290 struct bna_mac *mac = NULL;
1291 struct list_head *qe;
1292
1293 /* 3. delete pending mcast entries */
1294 if (!list_empty(&rxf->mcast_pending_del_q)) {
1295 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1296 bfa_q_qe_init(qe);
1297 mac = (struct bna_mac *)qe;
1298 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1299 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1300 return 1;
1301 }
1302
1303 /* 4. clear active mcast entries; move them to pending_add_q */
1304 if (!list_empty(&rxf->mcast_active_q)) {
1305 bfa_q_deq(&rxf->mcast_active_q, &qe);
1306 bfa_q_qe_init(qe);
1307 mac = (struct bna_mac *)qe;
1308 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1309 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1310 return 1;
1311 }
1312
1313 return 0;
1314 }
1315
1316 /**
1317 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1318 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1319 * so that they are added to CAM again in the rxf start path. Moves the current
1320 * filter settings - promiscuous, default, allmutli - to pending filter
1321 * configuration
1322 */
1323 static int
rxf_clear_packet_filter(struct bna_rxf * rxf)1324 rxf_clear_packet_filter(struct bna_rxf *rxf)
1325 {
1326 if (rxf_clear_packet_filter_ucast(rxf))
1327 return 1;
1328
1329 if (rxf_clear_packet_filter_mcast(rxf))
1330 return 1;
1331
1332 /* 5. clear active default MAC in the CAM */
1333 if (rxf->ucast_pending_set > 0)
1334 rxf->ucast_pending_set = 0;
1335
1336 if (rxf_clear_packet_filter_promisc(rxf))
1337 return 1;
1338
1339 if (rxf_clear_packet_filter_allmulti(rxf))
1340 return 1;
1341
1342 return 0;
1343 }
1344
1345 static void
rxf_reset_packet_filter_mcast(struct bna_rxf * rxf)1346 rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1347 {
1348 struct list_head *qe;
1349 struct bna_mac *mac;
1350
1351 /* 3. Move active mcast entries to pending_add_q */
1352 while (!list_empty(&rxf->mcast_active_q)) {
1353 bfa_q_deq(&rxf->mcast_active_q, &qe);
1354 bfa_q_qe_init(qe);
1355 list_add_tail(qe, &rxf->mcast_pending_add_q);
1356 }
1357
1358 /* 4. Throw away delete pending mcast entries */
1359 while (!list_empty(&rxf->mcast_pending_del_q)) {
1360 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1361 bfa_q_qe_init(qe);
1362 mac = (struct bna_mac *)qe;
1363 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1364 }
1365 }
1366
1367 /**
1368 * In the rxf fail path, throws away the ucast/mcast entries pending for
1369 * deletion, moves all active ucast/mcast entries to pending queue so that
1370 * they are added back to CAM in the rxf start path. Also moves the current
1371 * filter configuration to pending filter configuration.
1372 */
1373 static void
rxf_reset_packet_filter(struct bna_rxf * rxf)1374 rxf_reset_packet_filter(struct bna_rxf *rxf)
1375 {
1376 rxf_reset_packet_filter_ucast(rxf);
1377
1378 rxf_reset_packet_filter_mcast(rxf);
1379
1380 /* 5. Turn off ucast set flag */
1381 rxf->ucast_pending_set = 0;
1382
1383 rxf_reset_packet_filter_promisc(rxf);
1384
1385 rxf_reset_packet_filter_allmulti(rxf);
1386 }
1387
1388 static void
bna_rxf_init(struct bna_rxf * rxf,struct bna_rx * rx,struct bna_rx_config * q_config)1389 bna_rxf_init(struct bna_rxf *rxf,
1390 struct bna_rx *rx,
1391 struct bna_rx_config *q_config)
1392 {
1393 struct list_head *qe;
1394 struct bna_rxp *rxp;
1395
1396 /* rxf_id is initialized during rx_mod init */
1397 rxf->rx = rx;
1398
1399 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1400 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1401 rxf->ucast_pending_set = 0;
1402 INIT_LIST_HEAD(&rxf->ucast_active_q);
1403 rxf->ucast_active_mac = NULL;
1404
1405 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1406 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1407 INIT_LIST_HEAD(&rxf->mcast_active_q);
1408
1409 bfa_q_qe_init(&rxf->mbox_qe.qe);
1410
1411 if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1412 rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1413
1414 rxf->rxf_oper_state = (q_config->paused) ?
1415 BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1416
1417 bna_rxf_adv_init(rxf, rx, q_config);
1418
1419 rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1420 q_config->num_paths);
1421
1422 list_for_each(qe, &rx->rxp_q) {
1423 rxp = (struct bna_rxp *)qe;
1424 if (q_config->rxp_type == BNA_RXP_SINGLE)
1425 rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1426 else
1427 rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1428 break;
1429 }
1430
1431 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1432 memset(rxf->vlan_filter_table, 0,
1433 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1434
1435 /* Set up VLAN 0 for pure priority tagged packets */
1436 rxf->vlan_filter_table[0] |= 1;
1437
1438 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1439 }
1440
1441 static void
bna_rxf_uninit(struct bna_rxf * rxf)1442 bna_rxf_uninit(struct bna_rxf *rxf)
1443 {
1444 struct bna *bna = rxf->rx->bna;
1445 struct bna_mac *mac;
1446
1447 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1448 rxf->rit_segment = NULL;
1449
1450 rxf->ucast_pending_set = 0;
1451
1452 while (!list_empty(&rxf->ucast_pending_add_q)) {
1453 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1454 bfa_q_qe_init(&mac->qe);
1455 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1456 }
1457
1458 if (rxf->ucast_active_mac) {
1459 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1460 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1461 rxf->ucast_active_mac);
1462 rxf->ucast_active_mac = NULL;
1463 }
1464
1465 while (!list_empty(&rxf->mcast_pending_add_q)) {
1466 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1467 bfa_q_qe_init(&mac->qe);
1468 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1469 }
1470
1471 /* Turn off pending promisc mode */
1472 if (is_promisc_enable(rxf->rxmode_pending,
1473 rxf->rxmode_pending_bitmask)) {
1474 /* system promisc state should be pending */
1475 BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
1476 promisc_inactive(rxf->rxmode_pending,
1477 rxf->rxmode_pending_bitmask);
1478 bna->rxf_promisc_id = BFI_MAX_RXF;
1479 }
1480 /* Promisc mode should not be active */
1481 BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
1482
1483 /* Turn off pending all-multi mode */
1484 if (is_allmulti_enable(rxf->rxmode_pending,
1485 rxf->rxmode_pending_bitmask)) {
1486 allmulti_inactive(rxf->rxmode_pending,
1487 rxf->rxmode_pending_bitmask);
1488 }
1489 /* Allmulti mode should not be active */
1490 BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
1491
1492 rxf->rx = NULL;
1493 }
1494
1495 static void
bna_rx_cb_rxf_started(struct bna_rx * rx,enum bna_cb_status status)1496 bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1497 {
1498 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1499 if (rx->rxf.rxf_id < 32)
1500 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1501 else
1502 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1503 1 << (rx->rxf.rxf_id - 32));
1504 }
1505
1506 static void
bna_rxf_start(struct bna_rxf * rxf)1507 bna_rxf_start(struct bna_rxf *rxf)
1508 {
1509 rxf->start_cbfn = bna_rx_cb_rxf_started;
1510 rxf->start_cbarg = rxf->rx;
1511 rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1512 bfa_fsm_send_event(rxf, RXF_E_START);
1513 }
1514
1515 static void
bna_rx_cb_rxf_stopped(struct bna_rx * rx,enum bna_cb_status status)1516 bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1517 {
1518 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1519 if (rx->rxf.rxf_id < 32)
1520 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1521 else
1522 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1523 1 << (rx->rxf.rxf_id - 32);
1524 }
1525
1526 static void
bna_rxf_stop(struct bna_rxf * rxf)1527 bna_rxf_stop(struct bna_rxf *rxf)
1528 {
1529 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1530 rxf->stop_cbarg = rxf->rx;
1531 bfa_fsm_send_event(rxf, RXF_E_STOP);
1532 }
1533
1534 static void
bna_rxf_fail(struct bna_rxf * rxf)1535 bna_rxf_fail(struct bna_rxf *rxf)
1536 {
1537 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1538 bfa_fsm_send_event(rxf, RXF_E_FAIL);
1539 }
1540
1541 int
bna_rxf_state_get(struct bna_rxf * rxf)1542 bna_rxf_state_get(struct bna_rxf *rxf)
1543 {
1544 return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1545 }
1546
1547 enum bna_cb_status
bna_rx_ucast_set(struct bna_rx * rx,u8 * ucmac,void (* cbfn)(struct bnad *,struct bna_rx *,enum bna_cb_status))1548 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1549 void (*cbfn)(struct bnad *, struct bna_rx *,
1550 enum bna_cb_status))
1551 {
1552 struct bna_rxf *rxf = &rx->rxf;
1553
1554 if (rxf->ucast_active_mac == NULL) {
1555 rxf->ucast_active_mac =
1556 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1557 if (rxf->ucast_active_mac == NULL)
1558 return BNA_CB_UCAST_CAM_FULL;
1559 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1560 }
1561
1562 memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1563 rxf->ucast_pending_set++;
1564 rxf->cam_fltr_cbfn = cbfn;
1565 rxf->cam_fltr_cbarg = rx->bna->bnad;
1566
1567 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1568
1569 return BNA_CB_SUCCESS;
1570 }
1571
1572 enum bna_cb_status
bna_rx_mcast_add(struct bna_rx * rx,u8 * addr,void (* cbfn)(struct bnad *,struct bna_rx *,enum bna_cb_status))1573 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1574 void (*cbfn)(struct bnad *, struct bna_rx *,
1575 enum bna_cb_status))
1576 {
1577 struct bna_rxf *rxf = &rx->rxf;
1578 struct list_head *qe;
1579 struct bna_mac *mac;
1580
1581 /* Check if already added */
1582 list_for_each(qe, &rxf->mcast_active_q) {
1583 mac = (struct bna_mac *)qe;
1584 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1585 if (cbfn)
1586 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1587 return BNA_CB_SUCCESS;
1588 }
1589 }
1590
1591 /* Check if pending addition */
1592 list_for_each(qe, &rxf->mcast_pending_add_q) {
1593 mac = (struct bna_mac *)qe;
1594 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1595 if (cbfn)
1596 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1597 return BNA_CB_SUCCESS;
1598 }
1599 }
1600
1601 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1602 if (mac == NULL)
1603 return BNA_CB_MCAST_LIST_FULL;
1604 bfa_q_qe_init(&mac->qe);
1605 memcpy(mac->addr, addr, ETH_ALEN);
1606 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1607
1608 rxf->cam_fltr_cbfn = cbfn;
1609 rxf->cam_fltr_cbarg = rx->bna->bnad;
1610
1611 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1612
1613 return BNA_CB_SUCCESS;
1614 }
1615
1616 enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx * rx,int count,u8 * mclist,void (* cbfn)(struct bnad *,struct bna_rx *,enum bna_cb_status))1617 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1618 void (*cbfn)(struct bnad *, struct bna_rx *,
1619 enum bna_cb_status))
1620 {
1621 struct bna_rxf *rxf = &rx->rxf;
1622 struct list_head list_head;
1623 struct list_head *qe;
1624 u8 *mcaddr;
1625 struct bna_mac *mac;
1626 struct bna_mac *mac1;
1627 int skip;
1628 int delete;
1629 int need_hw_config = 0;
1630 int i;
1631
1632 /* Allocate nodes */
1633 INIT_LIST_HEAD(&list_head);
1634 for (i = 0, mcaddr = mclist; i < count; i++) {
1635 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1636 if (mac == NULL)
1637 goto err_return;
1638 bfa_q_qe_init(&mac->qe);
1639 memcpy(mac->addr, mcaddr, ETH_ALEN);
1640 list_add_tail(&mac->qe, &list_head);
1641
1642 mcaddr += ETH_ALEN;
1643 }
1644
1645 /* Schedule for addition */
1646 while (!list_empty(&list_head)) {
1647 bfa_q_deq(&list_head, &qe);
1648 mac = (struct bna_mac *)qe;
1649 bfa_q_qe_init(&mac->qe);
1650
1651 skip = 0;
1652
1653 /* Skip if already added */
1654 list_for_each(qe, &rxf->mcast_active_q) {
1655 mac1 = (struct bna_mac *)qe;
1656 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1657 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1658 mac);
1659 skip = 1;
1660 break;
1661 }
1662 }
1663
1664 if (skip)
1665 continue;
1666
1667 /* Skip if pending addition */
1668 list_for_each(qe, &rxf->mcast_pending_add_q) {
1669 mac1 = (struct bna_mac *)qe;
1670 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1671 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1672 mac);
1673 skip = 1;
1674 break;
1675 }
1676 }
1677
1678 if (skip)
1679 continue;
1680
1681 need_hw_config = 1;
1682 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1683 }
1684
1685 /**
1686 * Delete the entries that are in the pending_add_q but not
1687 * in the new list
1688 */
1689 while (!list_empty(&rxf->mcast_pending_add_q)) {
1690 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1691 mac = (struct bna_mac *)qe;
1692 bfa_q_qe_init(&mac->qe);
1693 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1694 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1695 delete = 0;
1696 break;
1697 }
1698 mcaddr += ETH_ALEN;
1699 }
1700 if (delete)
1701 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1702 else
1703 list_add_tail(&mac->qe, &list_head);
1704 }
1705 while (!list_empty(&list_head)) {
1706 bfa_q_deq(&list_head, &qe);
1707 mac = (struct bna_mac *)qe;
1708 bfa_q_qe_init(&mac->qe);
1709 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1710 }
1711
1712 /**
1713 * Schedule entries for deletion that are in the active_q but not
1714 * in the new list
1715 */
1716 while (!list_empty(&rxf->mcast_active_q)) {
1717 bfa_q_deq(&rxf->mcast_active_q, &qe);
1718 mac = (struct bna_mac *)qe;
1719 bfa_q_qe_init(&mac->qe);
1720 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1721 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1722 delete = 0;
1723 break;
1724 }
1725 mcaddr += ETH_ALEN;
1726 }
1727 if (delete) {
1728 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1729 need_hw_config = 1;
1730 } else {
1731 list_add_tail(&mac->qe, &list_head);
1732 }
1733 }
1734 while (!list_empty(&list_head)) {
1735 bfa_q_deq(&list_head, &qe);
1736 mac = (struct bna_mac *)qe;
1737 bfa_q_qe_init(&mac->qe);
1738 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1739 }
1740
1741 if (need_hw_config) {
1742 rxf->cam_fltr_cbfn = cbfn;
1743 rxf->cam_fltr_cbarg = rx->bna->bnad;
1744 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1745 } else if (cbfn)
1746 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1747
1748 return BNA_CB_SUCCESS;
1749
1750 err_return:
1751 while (!list_empty(&list_head)) {
1752 bfa_q_deq(&list_head, &qe);
1753 mac = (struct bna_mac *)qe;
1754 bfa_q_qe_init(&mac->qe);
1755 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1756 }
1757
1758 return BNA_CB_MCAST_LIST_FULL;
1759 }
1760
1761 void
bna_rx_vlan_add(struct bna_rx * rx,int vlan_id)1762 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1763 {
1764 struct bna_rxf *rxf = &rx->rxf;
1765 int index = (vlan_id >> 5);
1766 int bit = (1 << (vlan_id & 0x1F));
1767
1768 rxf->vlan_filter_table[index] |= bit;
1769 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1770 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1771 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1772 }
1773 }
1774
1775 void
bna_rx_vlan_del(struct bna_rx * rx,int vlan_id)1776 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1777 {
1778 struct bna_rxf *rxf = &rx->rxf;
1779 int index = (vlan_id >> 5);
1780 int bit = (1 << (vlan_id & 0x1F));
1781
1782 rxf->vlan_filter_table[index] &= ~bit;
1783 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1784 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1785 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1786 }
1787 }
1788
1789 /**
1790 * RX
1791 */
1792 #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1793 struct bna_doorbell_qset *_qset; \
1794 unsigned long off; \
1795 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1796 (q)->rcb->q_depth = (qdepth); \
1797 (q)->rcb->unmap_q = unmapq_mem; \
1798 (q)->rcb->rxq = (q); \
1799 (q)->rcb->cq = &(rxp)->cq; \
1800 (q)->rcb->bnad = (bna)->bnad; \
1801 _qset = (struct bna_doorbell_qset *)0; \
1802 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1803 (q)->rcb->q_dbell = off + \
1804 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1805 (q)->rcb->id = _id; \
1806 } while (0)
1807
1808 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1809 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1810
1811 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1812 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1813
1814 #define call_rx_stop_callback(rx, status) \
1815 if ((rx)->stop_cbfn) { \
1816 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1817 (rx)->stop_cbfn = NULL; \
1818 (rx)->stop_cbarg = NULL; \
1819 }
1820
1821 /*
1822 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1823 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1824 * for each rxpath.
1825 */
1826
1827 #define call_rx_disable_cbfn(rx, status) \
1828 if ((rx)->disable_cbfn) { \
1829 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1830 status); \
1831 (rx)->disable_cbfn = NULL; \
1832 (rx)->disable_cbarg = NULL; \
1833 } \
1834
1835 #define rxqs_reqd(type, num_rxqs) \
1836 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1837
1838 #define rx_ib_fail(rx) \
1839 do { \
1840 struct bna_rxp *rxp; \
1841 struct list_head *qe; \
1842 list_for_each(qe, &(rx)->rxp_q) { \
1843 rxp = (struct bna_rxp *)qe; \
1844 bna_ib_fail(rxp->cq.ib); \
1845 } \
1846 } while (0)
1847
1848 static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1849 static void __bna_rxq_start(struct bna_rxq *rxq);
1850 static void __bna_cq_start(struct bna_cq *cq);
1851 static void bna_rit_create(struct bna_rx *rx);
1852 static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1853 static void bna_rx_cb_rxq_stopped_all(void *arg);
1854
1855 bfa_fsm_state_decl(bna_rx, stopped,
1856 struct bna_rx, enum bna_rx_event);
1857 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1858 struct bna_rx, enum bna_rx_event);
1859 bfa_fsm_state_decl(bna_rx, started,
1860 struct bna_rx, enum bna_rx_event);
1861 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1862 struct bna_rx, enum bna_rx_event);
1863 bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1864 struct bna_rx, enum bna_rx_event);
1865
1866 static const struct bfa_sm_table rx_sm_table[] = {
1867 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1868 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1869 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1870 {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1871 {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1872 };
1873
bna_rx_sm_stopped_entry(struct bna_rx * rx)1874 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1875 {
1876 struct bna_rxp *rxp;
1877 struct list_head *qe_rxp;
1878
1879 list_for_each(qe_rxp, &rx->rxp_q) {
1880 rxp = (struct bna_rxp *)qe_rxp;
1881 rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1882 }
1883
1884 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1885 }
1886
bna_rx_sm_stopped(struct bna_rx * rx,enum bna_rx_event event)1887 static void bna_rx_sm_stopped(struct bna_rx *rx,
1888 enum bna_rx_event event)
1889 {
1890 switch (event) {
1891 case RX_E_START:
1892 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1893 break;
1894 case RX_E_STOP:
1895 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1896 break;
1897 case RX_E_FAIL:
1898 /* no-op */
1899 break;
1900 default:
1901 bfa_sm_fault(rx->bna, event);
1902 break;
1903 }
1904
1905 }
1906
bna_rx_sm_rxf_start_wait_entry(struct bna_rx * rx)1907 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1908 {
1909 struct bna_rxp *rxp;
1910 struct list_head *qe_rxp;
1911 struct bna_rxq *q0 = NULL, *q1 = NULL;
1912
1913 /* Setup the RIT */
1914 bna_rit_create(rx);
1915
1916 list_for_each(qe_rxp, &rx->rxp_q) {
1917 rxp = (struct bna_rxp *)qe_rxp;
1918 bna_ib_start(rxp->cq.ib);
1919 GET_RXQS(rxp, q0, q1);
1920 q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1921 __bna_rxq_start(q0);
1922 rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1923 if (q1) {
1924 __bna_rxq_start(q1);
1925 rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1926 }
1927 __bna_cq_start(&rxp->cq);
1928 }
1929
1930 bna_rxf_start(&rx->rxf);
1931 }
1932
bna_rx_sm_rxf_start_wait(struct bna_rx * rx,enum bna_rx_event event)1933 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1934 enum bna_rx_event event)
1935 {
1936 switch (event) {
1937 case RX_E_STOP:
1938 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1939 break;
1940 case RX_E_FAIL:
1941 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1942 rx_ib_fail(rx);
1943 bna_rxf_fail(&rx->rxf);
1944 break;
1945 case RX_E_RXF_STARTED:
1946 bfa_fsm_set_state(rx, bna_rx_sm_started);
1947 break;
1948 default:
1949 bfa_sm_fault(rx->bna, event);
1950 break;
1951 }
1952 }
1953
1954 void
bna_rx_sm_started_entry(struct bna_rx * rx)1955 bna_rx_sm_started_entry(struct bna_rx *rx)
1956 {
1957 struct bna_rxp *rxp;
1958 struct list_head *qe_rxp;
1959
1960 /* Start IB */
1961 list_for_each(qe_rxp, &rx->rxp_q) {
1962 rxp = (struct bna_rxp *)qe_rxp;
1963 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1964 }
1965
1966 bna_llport_rx_started(&rx->bna->port.llport);
1967 }
1968
1969 void
bna_rx_sm_started(struct bna_rx * rx,enum bna_rx_event event)1970 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1971 {
1972 switch (event) {
1973 case RX_E_FAIL:
1974 bna_llport_rx_stopped(&rx->bna->port.llport);
1975 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1976 rx_ib_fail(rx);
1977 bna_rxf_fail(&rx->rxf);
1978 break;
1979 case RX_E_STOP:
1980 bna_llport_rx_stopped(&rx->bna->port.llport);
1981 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1982 break;
1983 default:
1984 bfa_sm_fault(rx->bna, event);
1985 break;
1986 }
1987 }
1988
1989 void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx * rx)1990 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1991 {
1992 bna_rxf_stop(&rx->rxf);
1993 }
1994
1995 void
bna_rx_sm_rxf_stop_wait(struct bna_rx * rx,enum bna_rx_event event)1996 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1997 {
1998 switch (event) {
1999 case RX_E_RXF_STOPPED:
2000 bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
2001 break;
2002 case RX_E_RXF_STARTED:
2003 /**
2004 * RxF was in the process of starting up when
2005 * RXF_E_STOP was issued. Ignore this event
2006 */
2007 break;
2008 case RX_E_FAIL:
2009 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2010 rx_ib_fail(rx);
2011 bna_rxf_fail(&rx->rxf);
2012 break;
2013 default:
2014 bfa_sm_fault(rx->bna, event);
2015 break;
2016 }
2017
2018 }
2019
2020 void
bna_rx_sm_rxq_stop_wait_entry(struct bna_rx * rx)2021 bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2022 {
2023 struct bna_rxp *rxp = NULL;
2024 struct bna_rxq *q0 = NULL;
2025 struct bna_rxq *q1 = NULL;
2026 struct list_head *qe;
2027 u32 rxq_mask[2] = {0, 0};
2028
2029 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2030 bfa_wc_up(&rx->rxq_stop_wc);
2031 list_for_each(qe, &rx->rxp_q) {
2032 rxp = (struct bna_rxp *)qe;
2033 GET_RXQS(rxp, q0, q1);
2034 if (q0->rxq_id < 32)
2035 rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2036 else
2037 rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2038 if (q1) {
2039 if (q1->rxq_id < 32)
2040 rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2041 else
2042 rxq_mask[1] |= ((u32)
2043 1 << (q1->rxq_id - 32));
2044 }
2045 }
2046
2047 __bna_multi_rxq_stop(rxp, rxq_mask);
2048 }
2049
2050 void
bna_rx_sm_rxq_stop_wait(struct bna_rx * rx,enum bna_rx_event event)2051 bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2052 {
2053 struct bna_rxp *rxp = NULL;
2054 struct list_head *qe;
2055
2056 switch (event) {
2057 case RX_E_RXQ_STOPPED:
2058 list_for_each(qe, &rx->rxp_q) {
2059 rxp = (struct bna_rxp *)qe;
2060 bna_ib_stop(rxp->cq.ib);
2061 }
2062 /* Fall through */
2063 case RX_E_FAIL:
2064 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2065 break;
2066 default:
2067 bfa_sm_fault(rx->bna, event);
2068 break;
2069 }
2070 }
2071
2072 void
__bna_multi_rxq_stop(struct bna_rxp * rxp,u32 * rxq_id_mask)2073 __bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
2074 {
2075 struct bfi_ll_q_stop_req ll_req;
2076
2077 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
2078 ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
2079 ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
2080 bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
2081 bna_rx_cb_multi_rxq_stopped, rxp);
2082 bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
2083 }
2084
2085 void
__bna_rxq_start(struct bna_rxq * rxq)2086 __bna_rxq_start(struct bna_rxq *rxq)
2087 {
2088 struct bna_rxtx_q_mem *q_mem;
2089 struct bna_rxq_mem rxq_cfg, *rxq_mem;
2090 struct bna_dma_addr cur_q_addr;
2091 /* struct bna_doorbell_qset *qset; */
2092 struct bna_qpt *qpt;
2093 u32 pg_num;
2094 struct bna *bna = rxq->rx->bna;
2095 void __iomem *base_addr;
2096 unsigned long off;
2097
2098 qpt = &rxq->qpt;
2099 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2100
2101 rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2102 rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2103 rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2104 rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2105
2106 rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
2107 rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
2108 (qpt->page_size >> 2);
2109 rxq_cfg.sg_n_cq_n_cns_ptr =
2110 ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
2111 rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
2112 BNA_Q_IDLE_STATE;
2113 rxq_cfg.next_qid = 0x0 | (0x3 << 8);
2114
2115 /* Write the page number register */
2116 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2117 HQM_RXTX_Q_RAM_BASE_OFFSET);
2118 writel(pg_num, bna->regs.page_addr);
2119
2120 /* Write to h/w */
2121 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2122 HQM_RXTX_Q_RAM_BASE_OFFSET);
2123
2124 q_mem = (struct bna_rxtx_q_mem *)0;
2125 rxq_mem = &q_mem[rxq->rxq_id].rxq;
2126
2127 off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
2128 writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
2129
2130 off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
2131 writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
2132
2133 off = (unsigned long)&rxq_mem->cur_q_entry_lo;
2134 writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
2135
2136 off = (unsigned long)&rxq_mem->cur_q_entry_hi;
2137 writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
2138
2139 off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
2140 writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2141
2142 off = (unsigned long)&rxq_mem->entry_n_pg_size;
2143 writel(rxq_cfg.entry_n_pg_size, base_addr + off);
2144
2145 off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
2146 writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
2147
2148 off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
2149 writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
2150
2151 off = (unsigned long)&rxq_mem->next_qid;
2152 writel(rxq_cfg.next_qid, base_addr + off);
2153
2154 rxq->rcb->producer_index = 0;
2155 rxq->rcb->consumer_index = 0;
2156 }
2157
2158 void
__bna_cq_start(struct bna_cq * cq)2159 __bna_cq_start(struct bna_cq *cq)
2160 {
2161 struct bna_cq_mem cq_cfg, *cq_mem;
2162 const struct bna_qpt *qpt;
2163 struct bna_dma_addr cur_q_addr;
2164 u32 pg_num;
2165 struct bna *bna = cq->rx->bna;
2166 void __iomem *base_addr;
2167 unsigned long off;
2168
2169 qpt = &cq->qpt;
2170 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2171
2172 /*
2173 * Fill out structure, to be subsequently written
2174 * to hardware
2175 */
2176 cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2177 cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2178 cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2179 cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2180
2181 cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
2182 cq_cfg.entry_n_pg_size =
2183 ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
2184 cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
2185 ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
2186 cq_cfg.q_state = BNA_Q_IDLE_STATE;
2187
2188 /* Write the page number register */
2189 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2190 HQM_CQ_RAM_BASE_OFFSET);
2191
2192 writel(pg_num, bna->regs.page_addr);
2193
2194 /* H/W write */
2195 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2196 HQM_CQ_RAM_BASE_OFFSET);
2197
2198 cq_mem = (struct bna_cq_mem *)0;
2199
2200 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
2201 writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
2202
2203 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
2204 writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
2205
2206 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
2207 writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
2208
2209 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
2210 writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
2211
2212 off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
2213 writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2214
2215 off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
2216 writel(cq_cfg.entry_n_pg_size, base_addr + off);
2217
2218 off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
2219 writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
2220
2221 off = (unsigned long)&cq_mem[cq->cq_id].q_state;
2222 writel(cq_cfg.q_state, base_addr + off);
2223
2224 cq->ccb->producer_index = 0;
2225 *(cq->ccb->hw_producer_index) = 0;
2226 }
2227
2228 void
bna_rit_create(struct bna_rx * rx)2229 bna_rit_create(struct bna_rx *rx)
2230 {
2231 struct list_head *qe_rxp;
2232 struct bna *bna;
2233 struct bna_rxp *rxp;
2234 struct bna_rxq *q0 = NULL;
2235 struct bna_rxq *q1 = NULL;
2236 int offset;
2237
2238 bna = rx->bna;
2239
2240 offset = 0;
2241 list_for_each(qe_rxp, &rx->rxp_q) {
2242 rxp = (struct bna_rxp *)qe_rxp;
2243 GET_RXQS(rxp, q0, q1);
2244 rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
2245 rx->rxf.rit_segment->rit[offset].small_rxq_id =
2246 (q1 ? q1->rxq_id : 0);
2247 offset++;
2248 }
2249 }
2250
2251 static int
_rx_can_satisfy(struct bna_rx_mod * rx_mod,struct bna_rx_config * rx_cfg)2252 _rx_can_satisfy(struct bna_rx_mod *rx_mod,
2253 struct bna_rx_config *rx_cfg)
2254 {
2255 if ((rx_mod->rx_free_count == 0) ||
2256 (rx_mod->rxp_free_count == 0) ||
2257 (rx_mod->rxq_free_count == 0))
2258 return 0;
2259
2260 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
2261 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2262 (rx_mod->rxq_free_count < rx_cfg->num_paths))
2263 return 0;
2264 } else {
2265 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2266 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
2267 return 0;
2268 }
2269
2270 if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
2271 return 0;
2272
2273 return 1;
2274 }
2275
2276 static struct bna_rxq *
_get_free_rxq(struct bna_rx_mod * rx_mod)2277 _get_free_rxq(struct bna_rx_mod *rx_mod)
2278 {
2279 struct bna_rxq *rxq = NULL;
2280 struct list_head *qe = NULL;
2281
2282 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
2283 if (qe) {
2284 rx_mod->rxq_free_count--;
2285 rxq = (struct bna_rxq *)qe;
2286 }
2287 return rxq;
2288 }
2289
2290 static void
_put_free_rxq(struct bna_rx_mod * rx_mod,struct bna_rxq * rxq)2291 _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2292 {
2293 bfa_q_qe_init(&rxq->qe);
2294 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
2295 rx_mod->rxq_free_count++;
2296 }
2297
2298 static struct bna_rxp *
_get_free_rxp(struct bna_rx_mod * rx_mod)2299 _get_free_rxp(struct bna_rx_mod *rx_mod)
2300 {
2301 struct list_head *qe = NULL;
2302 struct bna_rxp *rxp = NULL;
2303
2304 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
2305 if (qe) {
2306 rx_mod->rxp_free_count--;
2307
2308 rxp = (struct bna_rxp *)qe;
2309 }
2310
2311 return rxp;
2312 }
2313
2314 static void
_put_free_rxp(struct bna_rx_mod * rx_mod,struct bna_rxp * rxp)2315 _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2316 {
2317 bfa_q_qe_init(&rxp->qe);
2318 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
2319 rx_mod->rxp_free_count++;
2320 }
2321
2322 static struct bna_rx *
_get_free_rx(struct bna_rx_mod * rx_mod)2323 _get_free_rx(struct bna_rx_mod *rx_mod)
2324 {
2325 struct list_head *qe = NULL;
2326 struct bna_rx *rx = NULL;
2327
2328 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2329 if (qe) {
2330 rx_mod->rx_free_count--;
2331
2332 rx = (struct bna_rx *)qe;
2333 bfa_q_qe_init(qe);
2334 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2335 }
2336
2337 return rx;
2338 }
2339
2340 static void
_put_free_rx(struct bna_rx_mod * rx_mod,struct bna_rx * rx)2341 _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2342 {
2343 bfa_q_qe_init(&rx->qe);
2344 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2345 rx_mod->rx_free_count++;
2346 }
2347
2348 static void
_rx_init(struct bna_rx * rx,struct bna * bna)2349 _rx_init(struct bna_rx *rx, struct bna *bna)
2350 {
2351 rx->bna = bna;
2352 rx->rx_flags = 0;
2353
2354 INIT_LIST_HEAD(&rx->rxp_q);
2355
2356 rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
2357 rx->rxq_stop_wc.wc_cbarg = rx;
2358 rx->rxq_stop_wc.wc_count = 0;
2359
2360 rx->stop_cbfn = NULL;
2361 rx->stop_cbarg = NULL;
2362 }
2363
2364 static void
_rxp_add_rxqs(struct bna_rxp * rxp,struct bna_rxq * q0,struct bna_rxq * q1)2365 _rxp_add_rxqs(struct bna_rxp *rxp,
2366 struct bna_rxq *q0,
2367 struct bna_rxq *q1)
2368 {
2369 switch (rxp->type) {
2370 case BNA_RXP_SINGLE:
2371 rxp->rxq.single.only = q0;
2372 rxp->rxq.single.reserved = NULL;
2373 break;
2374 case BNA_RXP_SLR:
2375 rxp->rxq.slr.large = q0;
2376 rxp->rxq.slr.small = q1;
2377 break;
2378 case BNA_RXP_HDS:
2379 rxp->rxq.hds.data = q0;
2380 rxp->rxq.hds.hdr = q1;
2381 break;
2382 default:
2383 break;
2384 }
2385 }
2386
2387 static void
_rxq_qpt_init(struct bna_rxq * rxq,struct bna_rxp * rxp,u32 page_count,u32 page_size,struct bna_mem_descr * qpt_mem,struct bna_mem_descr * swqpt_mem,struct bna_mem_descr * page_mem)2388 _rxq_qpt_init(struct bna_rxq *rxq,
2389 struct bna_rxp *rxp,
2390 u32 page_count,
2391 u32 page_size,
2392 struct bna_mem_descr *qpt_mem,
2393 struct bna_mem_descr *swqpt_mem,
2394 struct bna_mem_descr *page_mem)
2395 {
2396 int i;
2397
2398 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2399 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2400 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2401 rxq->qpt.page_count = page_count;
2402 rxq->qpt.page_size = page_size;
2403
2404 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2405
2406 for (i = 0; i < rxq->qpt.page_count; i++) {
2407 rxq->rcb->sw_qpt[i] = page_mem[i].kva;
2408 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2409 page_mem[i].dma.lsb;
2410 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2411 page_mem[i].dma.msb;
2412
2413 }
2414 }
2415
2416 static void
_rxp_cqpt_setup(struct bna_rxp * rxp,u32 page_count,u32 page_size,struct bna_mem_descr * qpt_mem,struct bna_mem_descr * swqpt_mem,struct bna_mem_descr * page_mem)2417 _rxp_cqpt_setup(struct bna_rxp *rxp,
2418 u32 page_count,
2419 u32 page_size,
2420 struct bna_mem_descr *qpt_mem,
2421 struct bna_mem_descr *swqpt_mem,
2422 struct bna_mem_descr *page_mem)
2423 {
2424 int i;
2425
2426 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2427 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2428 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2429 rxp->cq.qpt.page_count = page_count;
2430 rxp->cq.qpt.page_size = page_size;
2431
2432 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2433
2434 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2435 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
2436
2437 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2438 page_mem[i].dma.lsb;
2439 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2440 page_mem[i].dma.msb;
2441
2442 }
2443 }
2444
2445 static void
_rx_add_rxp(struct bna_rx * rx,struct bna_rxp * rxp)2446 _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2447 {
2448 list_add_tail(&rxp->qe, &rx->rxp_q);
2449 }
2450
2451 static void
_init_rxmod_queues(struct bna_rx_mod * rx_mod)2452 _init_rxmod_queues(struct bna_rx_mod *rx_mod)
2453 {
2454 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2455 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2456 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2457 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2458
2459 rx_mod->rx_free_count = 0;
2460 rx_mod->rxq_free_count = 0;
2461 rx_mod->rxp_free_count = 0;
2462 }
2463
2464 static void
_rx_ctor(struct bna_rx * rx,int id)2465 _rx_ctor(struct bna_rx *rx, int id)
2466 {
2467 bfa_q_qe_init(&rx->qe);
2468 INIT_LIST_HEAD(&rx->rxp_q);
2469 rx->bna = NULL;
2470
2471 rx->rxf.rxf_id = id;
2472
2473 /* FIXME: mbox_qe ctor()?? */
2474 bfa_q_qe_init(&rx->mbox_qe.qe);
2475
2476 rx->stop_cbfn = NULL;
2477 rx->stop_cbarg = NULL;
2478 }
2479
2480 void
bna_rx_cb_multi_rxq_stopped(void * arg,int status)2481 bna_rx_cb_multi_rxq_stopped(void *arg, int status)
2482 {
2483 struct bna_rxp *rxp = (struct bna_rxp *)arg;
2484
2485 bfa_wc_down(&rxp->rx->rxq_stop_wc);
2486 }
2487
2488 void
bna_rx_cb_rxq_stopped_all(void * arg)2489 bna_rx_cb_rxq_stopped_all(void *arg)
2490 {
2491 struct bna_rx *rx = (struct bna_rx *)arg;
2492
2493 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2494 }
2495
2496 static void
bna_rx_mod_cb_rx_stopped(void * arg,struct bna_rx * rx,enum bna_cb_status status)2497 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2498 enum bna_cb_status status)
2499 {
2500 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2501
2502 bfa_wc_down(&rx_mod->rx_stop_wc);
2503 }
2504
2505 static void
bna_rx_mod_cb_rx_stopped_all(void * arg)2506 bna_rx_mod_cb_rx_stopped_all(void *arg)
2507 {
2508 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2509
2510 if (rx_mod->stop_cbfn)
2511 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2512 rx_mod->stop_cbfn = NULL;
2513 }
2514
2515 static void
bna_rx_start(struct bna_rx * rx)2516 bna_rx_start(struct bna_rx *rx)
2517 {
2518 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2519 if (rx->rx_flags & BNA_RX_F_ENABLE)
2520 bfa_fsm_send_event(rx, RX_E_START);
2521 }
2522
2523 static void
bna_rx_stop(struct bna_rx * rx)2524 bna_rx_stop(struct bna_rx *rx)
2525 {
2526 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2527 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2528 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
2529 else {
2530 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2531 rx->stop_cbarg = &rx->bna->rx_mod;
2532 bfa_fsm_send_event(rx, RX_E_STOP);
2533 }
2534 }
2535
2536 static void
bna_rx_fail(struct bna_rx * rx)2537 bna_rx_fail(struct bna_rx *rx)
2538 {
2539 /* Indicate port is not enabled, and failed */
2540 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2541 rx->rx_flags |= BNA_RX_F_PORT_FAILED;
2542 bfa_fsm_send_event(rx, RX_E_FAIL);
2543 }
2544
2545 void
bna_rx_mod_start(struct bna_rx_mod * rx_mod,enum bna_rx_type type)2546 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2547 {
2548 struct bna_rx *rx;
2549 struct list_head *qe;
2550
2551 rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
2552 if (type == BNA_RX_T_LOOPBACK)
2553 rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
2554
2555 list_for_each(qe, &rx_mod->rx_active_q) {
2556 rx = (struct bna_rx *)qe;
2557 if (rx->type == type)
2558 bna_rx_start(rx);
2559 }
2560 }
2561
2562 void
bna_rx_mod_stop(struct bna_rx_mod * rx_mod,enum bna_rx_type type)2563 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2564 {
2565 struct bna_rx *rx;
2566 struct list_head *qe;
2567
2568 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2569 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2570
2571 rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
2572
2573 /**
2574 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2575 * as we are going to call bna_rx_stop
2576 */
2577 list_for_each(qe, &rx_mod->rx_active_q) {
2578 rx = (struct bna_rx *)qe;
2579 if (rx->type == type)
2580 bfa_wc_up(&rx_mod->rx_stop_wc);
2581 }
2582
2583 if (rx_mod->rx_stop_wc.wc_count == 0) {
2584 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2585 rx_mod->stop_cbfn = NULL;
2586 return;
2587 }
2588
2589 list_for_each(qe, &rx_mod->rx_active_q) {
2590 rx = (struct bna_rx *)qe;
2591 if (rx->type == type)
2592 bna_rx_stop(rx);
2593 }
2594 }
2595
2596 void
bna_rx_mod_fail(struct bna_rx_mod * rx_mod)2597 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2598 {
2599 struct bna_rx *rx;
2600 struct list_head *qe;
2601
2602 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2603 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2604
2605 list_for_each(qe, &rx_mod->rx_active_q) {
2606 rx = (struct bna_rx *)qe;
2607 bna_rx_fail(rx);
2608 }
2609 }
2610
bna_rx_mod_init(struct bna_rx_mod * rx_mod,struct bna * bna,struct bna_res_info * res_info)2611 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2612 struct bna_res_info *res_info)
2613 {
2614 int index;
2615 struct bna_rx *rx_ptr;
2616 struct bna_rxp *rxp_ptr;
2617 struct bna_rxq *rxq_ptr;
2618
2619 rx_mod->bna = bna;
2620 rx_mod->flags = 0;
2621
2622 rx_mod->rx = (struct bna_rx *)
2623 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2624 rx_mod->rxp = (struct bna_rxp *)
2625 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2626 rx_mod->rxq = (struct bna_rxq *)
2627 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2628
2629 /* Initialize the queues */
2630 _init_rxmod_queues(rx_mod);
2631
2632 /* Build RX queues */
2633 for (index = 0; index < BFI_MAX_RXQ; index++) {
2634 rx_ptr = &rx_mod->rx[index];
2635 _rx_ctor(rx_ptr, index);
2636 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2637 rx_mod->rx_free_count++;
2638 }
2639
2640 /* build RX-path queue */
2641 for (index = 0; index < BFI_MAX_RXQ; index++) {
2642 rxp_ptr = &rx_mod->rxp[index];
2643 rxp_ptr->cq.cq_id = index;
2644 bfa_q_qe_init(&rxp_ptr->qe);
2645 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2646 rx_mod->rxp_free_count++;
2647 }
2648
2649 /* build RXQ queue */
2650 for (index = 0; index < BFI_MAX_RXQ; index++) {
2651 rxq_ptr = &rx_mod->rxq[index];
2652 rxq_ptr->rxq_id = index;
2653
2654 bfa_q_qe_init(&rxq_ptr->qe);
2655 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2656 rx_mod->rxq_free_count++;
2657 }
2658
2659 rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
2660 rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
2661 rx_mod->rx_stop_wc.wc_count = 0;
2662 }
2663
2664 void
bna_rx_mod_uninit(struct bna_rx_mod * rx_mod)2665 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2666 {
2667 struct list_head *qe;
2668 int i;
2669
2670 i = 0;
2671 list_for_each(qe, &rx_mod->rx_free_q)
2672 i++;
2673
2674 i = 0;
2675 list_for_each(qe, &rx_mod->rxp_free_q)
2676 i++;
2677
2678 i = 0;
2679 list_for_each(qe, &rx_mod->rxq_free_q)
2680 i++;
2681
2682 rx_mod->bna = NULL;
2683 }
2684
2685 int
bna_rx_state_get(struct bna_rx * rx)2686 bna_rx_state_get(struct bna_rx *rx)
2687 {
2688 return bfa_sm_to_state(rx_sm_table, rx->fsm);
2689 }
2690
2691 void
bna_rx_res_req(struct bna_rx_config * q_cfg,struct bna_res_info * res_info)2692 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2693 {
2694 u32 cq_size, hq_size, dq_size;
2695 u32 cpage_count, hpage_count, dpage_count;
2696 struct bna_mem_info *mem_info;
2697 u32 cq_depth;
2698 u32 hq_depth;
2699 u32 dq_depth;
2700
2701 dq_depth = q_cfg->q_depth;
2702 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2703 cq_depth = dq_depth + hq_depth;
2704
2705 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2706 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2707 cq_size = ALIGN(cq_size, PAGE_SIZE);
2708 cpage_count = SIZE_TO_PAGES(cq_size);
2709
2710 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2711 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2712 dq_size = ALIGN(dq_size, PAGE_SIZE);
2713 dpage_count = SIZE_TO_PAGES(dq_size);
2714
2715 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2716 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2717 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2718 hq_size = ALIGN(hq_size, PAGE_SIZE);
2719 hpage_count = SIZE_TO_PAGES(hq_size);
2720 } else {
2721 hpage_count = 0;
2722 }
2723
2724 /* CCB structures */
2725 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2726 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2727 mem_info->mem_type = BNA_MEM_T_KVA;
2728 mem_info->len = sizeof(struct bna_ccb);
2729 mem_info->num = q_cfg->num_paths;
2730
2731 /* RCB structures */
2732 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2733 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2734 mem_info->mem_type = BNA_MEM_T_KVA;
2735 mem_info->len = sizeof(struct bna_rcb);
2736 mem_info->num = BNA_GET_RXQS(q_cfg);
2737
2738 /* Completion QPT */
2739 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2740 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2741 mem_info->mem_type = BNA_MEM_T_DMA;
2742 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2743 mem_info->num = q_cfg->num_paths;
2744
2745 /* Completion s/w QPT */
2746 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2747 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2748 mem_info->mem_type = BNA_MEM_T_KVA;
2749 mem_info->len = cpage_count * sizeof(void *);
2750 mem_info->num = q_cfg->num_paths;
2751
2752 /* Completion QPT pages */
2753 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2754 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2755 mem_info->mem_type = BNA_MEM_T_DMA;
2756 mem_info->len = PAGE_SIZE;
2757 mem_info->num = cpage_count * q_cfg->num_paths;
2758
2759 /* Data QPTs */
2760 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2761 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2762 mem_info->mem_type = BNA_MEM_T_DMA;
2763 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2764 mem_info->num = q_cfg->num_paths;
2765
2766 /* Data s/w QPTs */
2767 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2768 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2769 mem_info->mem_type = BNA_MEM_T_KVA;
2770 mem_info->len = dpage_count * sizeof(void *);
2771 mem_info->num = q_cfg->num_paths;
2772
2773 /* Data QPT pages */
2774 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2775 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2776 mem_info->mem_type = BNA_MEM_T_DMA;
2777 mem_info->len = PAGE_SIZE;
2778 mem_info->num = dpage_count * q_cfg->num_paths;
2779
2780 /* Hdr QPTs */
2781 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2782 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2783 mem_info->mem_type = BNA_MEM_T_DMA;
2784 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2785 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2786
2787 /* Hdr s/w QPTs */
2788 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2789 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2790 mem_info->mem_type = BNA_MEM_T_KVA;
2791 mem_info->len = hpage_count * sizeof(void *);
2792 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2793
2794 /* Hdr QPT pages */
2795 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2796 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2797 mem_info->mem_type = BNA_MEM_T_DMA;
2798 mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2799 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2800
2801 /* RX Interrupts */
2802 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2803 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2804 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2805 }
2806
2807 struct bna_rx *
bna_rx_create(struct bna * bna,struct bnad * bnad,struct bna_rx_config * rx_cfg,struct bna_rx_event_cbfn * rx_cbfn,struct bna_res_info * res_info,void * priv)2808 bna_rx_create(struct bna *bna, struct bnad *bnad,
2809 struct bna_rx_config *rx_cfg,
2810 struct bna_rx_event_cbfn *rx_cbfn,
2811 struct bna_res_info *res_info,
2812 void *priv)
2813 {
2814 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2815 struct bna_rx *rx;
2816 struct bna_rxp *rxp;
2817 struct bna_rxq *q0;
2818 struct bna_rxq *q1;
2819 struct bna_intr_info *intr_info;
2820 u32 page_count;
2821 struct bna_mem_descr *ccb_mem;
2822 struct bna_mem_descr *rcb_mem;
2823 struct bna_mem_descr *unmapq_mem;
2824 struct bna_mem_descr *cqpt_mem;
2825 struct bna_mem_descr *cswqpt_mem;
2826 struct bna_mem_descr *cpage_mem;
2827 struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
2828 struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
2829 struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
2830 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2831 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2832 struct bna_mem_descr *dpage_mem; /* data page mem */
2833 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
2834 int dpage_count, hpage_count, rcb_idx;
2835 struct bna_ib_config ibcfg;
2836 /* Fail if we don't have enough RXPs, RXQs */
2837 if (!_rx_can_satisfy(rx_mod, rx_cfg))
2838 return NULL;
2839
2840 /* Initialize resource pointers */
2841 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2842 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2843 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2844 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2845 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2846 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2847 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2848 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2849 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2850 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2851 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2852 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2853 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2854
2855 /* Compute q depth & page count */
2856 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2857 rx_cfg->num_paths;
2858
2859 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2860 rx_cfg->num_paths;
2861
2862 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2863 rx_cfg->num_paths;
2864 /* Get RX pointer */
2865 rx = _get_free_rx(rx_mod);
2866 _rx_init(rx, bna);
2867 rx->priv = priv;
2868 rx->type = rx_cfg->rx_type;
2869
2870 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2871 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2872 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2873 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2874 /* Following callbacks are mandatory */
2875 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2876 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2877
2878 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
2879 switch (rx->type) {
2880 case BNA_RX_T_REGULAR:
2881 if (!(rx->bna->rx_mod.flags &
2882 BNA_RX_MOD_F_PORT_LOOPBACK))
2883 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2884 break;
2885 case BNA_RX_T_LOOPBACK:
2886 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
2887 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2888 break;
2889 }
2890 }
2891
2892 for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
2893 rxp = _get_free_rxp(rx_mod);
2894 rxp->type = rx_cfg->rxp_type;
2895 rxp->rx = rx;
2896 rxp->cq.rx = rx;
2897
2898 /* Get required RXQs, and queue them to rx-path */
2899 q0 = _get_free_rxq(rx_mod);
2900 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2901 q1 = NULL;
2902 else
2903 q1 = _get_free_rxq(rx_mod);
2904
2905 /* Initialize IB */
2906 if (1 == intr_info->num) {
2907 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2908 intr_info->intr_type,
2909 intr_info->idl[0].vector);
2910 rxp->vector = intr_info->idl[0].vector;
2911 } else {
2912 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2913 intr_info->intr_type,
2914 intr_info->idl[i].vector);
2915
2916 /* Map the MSI-x vector used for this RXP */
2917 rxp->vector = intr_info->idl[i].vector;
2918 }
2919
2920 rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
2921
2922 ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2923 ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
2924 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2925 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2926
2927 ret = bna_ib_config(rxp->cq.ib, &ibcfg);
2928
2929 /* Link rxqs to rxp */
2930 _rxp_add_rxqs(rxp, q0, q1);
2931
2932 /* Link rxp to rx */
2933 _rx_add_rxp(rx, rxp);
2934
2935 q0->rx = rx;
2936 q0->rxp = rxp;
2937
2938 /* Initialize RCB for the large / data q */
2939 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2940 RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
2941 (void *)unmapq_mem[rcb_idx].kva);
2942 rcb_idx++;
2943 (q0)->rx_packets = (q0)->rx_bytes = 0;
2944 (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
2945
2946 /* Initialize RXQs */
2947 _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
2948 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2949 q0->rcb->page_idx = dpage_idx;
2950 q0->rcb->page_count = dpage_count;
2951 dpage_idx += dpage_count;
2952
2953 /* Call bnad to complete rcb setup */
2954 if (rx->rcb_setup_cbfn)
2955 rx->rcb_setup_cbfn(bnad, q0->rcb);
2956
2957 if (q1) {
2958 q1->rx = rx;
2959 q1->rxp = rxp;
2960
2961 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2962 RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
2963 (void *)unmapq_mem[rcb_idx].kva);
2964 rcb_idx++;
2965 (q1)->buffer_size = (rx_cfg)->small_buff_size;
2966 (q1)->rx_packets = (q1)->rx_bytes = 0;
2967 (q1)->rx_packets_with_error =
2968 (q1)->rxbuf_alloc_failed = 0;
2969
2970 _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
2971 &hqpt_mem[i], &hsqpt_mem[i],
2972 &hpage_mem[hpage_idx]);
2973 q1->rcb->page_idx = hpage_idx;
2974 q1->rcb->page_count = hpage_count;
2975 hpage_idx += hpage_count;
2976
2977 /* Call bnad to complete rcb setup */
2978 if (rx->rcb_setup_cbfn)
2979 rx->rcb_setup_cbfn(bnad, q1->rcb);
2980 }
2981 /* Setup RXP::CQ */
2982 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2983 _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2984 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2985 rxp->cq.ccb->page_idx = cpage_idx;
2986 rxp->cq.ccb->page_count = page_count;
2987 cpage_idx += page_count;
2988
2989 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2990 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2991
2992 rxp->cq.ccb->producer_index = 0;
2993 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2994 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2995 0 : rx_cfg->q_depth);
2996 rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
2997 rxp->cq.ccb->rcb[0] = q0->rcb;
2998 if (q1)
2999 rxp->cq.ccb->rcb[1] = q1->rcb;
3000 rxp->cq.ccb->cq = &rxp->cq;
3001 rxp->cq.ccb->bnad = bna->bnad;
3002 rxp->cq.ccb->hw_producer_index =
3003 ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
3004 (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
3005 *(rxp->cq.ccb->hw_producer_index) = 0;
3006 rxp->cq.ccb->intr_type = intr_info->intr_type;
3007 rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
3008 intr_info->idl[0].vector :
3009 intr_info->idl[i].vector;
3010 rxp->cq.ccb->rx_coalescing_timeo =
3011 rxp->cq.ib->ib_config.coalescing_timeo;
3012 rxp->cq.ccb->id = i;
3013
3014 /* Call bnad to complete CCB setup */
3015 if (rx->ccb_setup_cbfn)
3016 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
3017
3018 } /* for each rx-path */
3019
3020 bna_rxf_init(&rx->rxf, rx, rx_cfg);
3021
3022 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
3023
3024 return rx;
3025 }
3026
3027 void
bna_rx_destroy(struct bna_rx * rx)3028 bna_rx_destroy(struct bna_rx *rx)
3029 {
3030 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
3031 struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
3032 struct bna_rxq *q0 = NULL;
3033 struct bna_rxq *q1 = NULL;
3034 struct bna_rxp *rxp;
3035 struct list_head *qe;
3036
3037 bna_rxf_uninit(&rx->rxf);
3038
3039 while (!list_empty(&rx->rxp_q)) {
3040 bfa_q_deq(&rx->rxp_q, &rxp);
3041 GET_RXQS(rxp, q0, q1);
3042 /* Callback to bnad for destroying RCB */
3043 if (rx->rcb_destroy_cbfn)
3044 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
3045 q0->rcb = NULL;
3046 q0->rxp = NULL;
3047 q0->rx = NULL;
3048 _put_free_rxq(rx_mod, q0);
3049 if (q1) {
3050 /* Callback to bnad for destroying RCB */
3051 if (rx->rcb_destroy_cbfn)
3052 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
3053 q1->rcb = NULL;
3054 q1->rxp = NULL;
3055 q1->rx = NULL;
3056 _put_free_rxq(rx_mod, q1);
3057 }
3058 rxp->rxq.slr.large = NULL;
3059 rxp->rxq.slr.small = NULL;
3060 if (rxp->cq.ib) {
3061 if (rxp->cq.ib_seg_offset != 0xff)
3062 bna_ib_release_idx(rxp->cq.ib,
3063 rxp->cq.ib_seg_offset);
3064 bna_ib_put(ib_mod, rxp->cq.ib);
3065 rxp->cq.ib = NULL;
3066 }
3067 /* Callback to bnad for destroying CCB */
3068 if (rx->ccb_destroy_cbfn)
3069 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
3070 rxp->cq.ccb = NULL;
3071 rxp->rx = NULL;
3072 _put_free_rxp(rx_mod, rxp);
3073 }
3074
3075 list_for_each(qe, &rx_mod->rx_active_q) {
3076 if (qe == &rx->qe) {
3077 list_del(&rx->qe);
3078 bfa_q_qe_init(&rx->qe);
3079 break;
3080 }
3081 }
3082
3083 rx->bna = NULL;
3084 rx->priv = NULL;
3085 _put_free_rx(rx_mod, rx);
3086 }
3087
3088 void
bna_rx_enable(struct bna_rx * rx)3089 bna_rx_enable(struct bna_rx *rx)
3090 {
3091 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
3092 return;
3093
3094 rx->rx_flags |= BNA_RX_F_ENABLE;
3095 if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
3096 bfa_fsm_send_event(rx, RX_E_START);
3097 }
3098
3099 void
bna_rx_disable(struct bna_rx * rx,enum bna_cleanup_type type,void (* cbfn)(void *,struct bna_rx *,enum bna_cb_status))3100 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
3101 void (*cbfn)(void *, struct bna_rx *,
3102 enum bna_cb_status))
3103 {
3104 if (type == BNA_SOFT_CLEANUP) {
3105 /* h/w should not be accessed. Treat we're stopped */
3106 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
3107 } else {
3108 rx->stop_cbfn = cbfn;
3109 rx->stop_cbarg = rx->bna->bnad;
3110
3111 rx->rx_flags &= ~BNA_RX_F_ENABLE;
3112
3113 bfa_fsm_send_event(rx, RX_E_STOP);
3114 }
3115 }
3116
3117 /**
3118 * TX
3119 */
3120 #define call_tx_stop_cbfn(tx, status)\
3121 do {\
3122 if ((tx)->stop_cbfn)\
3123 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3124 (tx)->stop_cbfn = NULL;\
3125 (tx)->stop_cbarg = NULL;\
3126 } while (0)
3127
3128 #define call_tx_prio_change_cbfn(tx, status)\
3129 do {\
3130 if ((tx)->prio_change_cbfn)\
3131 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3132 (tx)->prio_change_cbfn = NULL;\
3133 } while (0)
3134
3135 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
3136 enum bna_cb_status status);
3137 static void bna_tx_cb_txq_stopped(void *arg, int status);
3138 static void bna_tx_cb_stats_cleared(void *arg, int status);
3139 static void __bna_tx_stop(struct bna_tx *tx);
3140 static void __bna_tx_start(struct bna_tx *tx);
3141 static void __bna_txf_stat_clr(struct bna_tx *tx);
3142
3143 enum bna_tx_event {
3144 TX_E_START = 1,
3145 TX_E_STOP = 2,
3146 TX_E_FAIL = 3,
3147 TX_E_TXQ_STOPPED = 4,
3148 TX_E_PRIO_CHANGE = 5,
3149 TX_E_STAT_CLEARED = 6,
3150 };
3151
3152 enum bna_tx_state {
3153 BNA_TX_STOPPED = 1,
3154 BNA_TX_STARTED = 2,
3155 BNA_TX_TXQ_STOP_WAIT = 3,
3156 BNA_TX_PRIO_STOP_WAIT = 4,
3157 BNA_TX_STAT_CLR_WAIT = 5,
3158 };
3159
3160 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
3161 enum bna_tx_event);
3162 bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
3163 enum bna_tx_event);
3164 bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
3165 enum bna_tx_event);
3166 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3167 enum bna_tx_event);
3168 bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
3169 enum bna_tx_event);
3170
3171 static struct bfa_sm_table tx_sm_table[] = {
3172 {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
3173 {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
3174 {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
3175 {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
3176 {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
3177 };
3178
3179 static void
bna_tx_sm_stopped_entry(struct bna_tx * tx)3180 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3181 {
3182 struct bna_txq *txq;
3183 struct list_head *qe;
3184
3185 list_for_each(qe, &tx->txq_q) {
3186 txq = (struct bna_txq *)qe;
3187 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3188 }
3189
3190 call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
3191 }
3192
3193 static void
bna_tx_sm_stopped(struct bna_tx * tx,enum bna_tx_event event)3194 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3195 {
3196 switch (event) {
3197 case TX_E_START:
3198 bfa_fsm_set_state(tx, bna_tx_sm_started);
3199 break;
3200
3201 case TX_E_STOP:
3202 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3203 break;
3204
3205 case TX_E_FAIL:
3206 /* No-op */
3207 break;
3208
3209 case TX_E_PRIO_CHANGE:
3210 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3211 break;
3212
3213 case TX_E_TXQ_STOPPED:
3214 /**
3215 * This event is received due to flushing of mbox when
3216 * device fails
3217 */
3218 /* No-op */
3219 break;
3220
3221 default:
3222 bfa_sm_fault(tx->bna, event);
3223 }
3224 }
3225
3226 static void
bna_tx_sm_started_entry(struct bna_tx * tx)3227 bna_tx_sm_started_entry(struct bna_tx *tx)
3228 {
3229 struct bna_txq *txq;
3230 struct list_head *qe;
3231
3232 __bna_tx_start(tx);
3233
3234 /* Start IB */
3235 list_for_each(qe, &tx->txq_q) {
3236 txq = (struct bna_txq *)qe;
3237 bna_ib_ack(&txq->ib->door_bell, 0);
3238 }
3239 }
3240
3241 static void
bna_tx_sm_started(struct bna_tx * tx,enum bna_tx_event event)3242 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3243 {
3244 struct bna_txq *txq;
3245 struct list_head *qe;
3246
3247 switch (event) {
3248 case TX_E_STOP:
3249 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3250 __bna_tx_stop(tx);
3251 break;
3252
3253 case TX_E_FAIL:
3254 list_for_each(qe, &tx->txq_q) {
3255 txq = (struct bna_txq *)qe;
3256 bna_ib_fail(txq->ib);
3257 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3258 }
3259 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3260 break;
3261
3262 case TX_E_PRIO_CHANGE:
3263 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3264 break;
3265
3266 default:
3267 bfa_sm_fault(tx->bna, event);
3268 }
3269 }
3270
3271 static void
bna_tx_sm_txq_stop_wait_entry(struct bna_tx * tx)3272 bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
3273 {
3274 }
3275
3276 static void
bna_tx_sm_txq_stop_wait(struct bna_tx * tx,enum bna_tx_event event)3277 bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3278 {
3279 struct bna_txq *txq;
3280 struct list_head *qe;
3281
3282 switch (event) {
3283 case TX_E_FAIL:
3284 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3285 break;
3286
3287 case TX_E_TXQ_STOPPED:
3288 list_for_each(qe, &tx->txq_q) {
3289 txq = (struct bna_txq *)qe;
3290 bna_ib_stop(txq->ib);
3291 }
3292 bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
3293 break;
3294
3295 case TX_E_PRIO_CHANGE:
3296 /* No-op */
3297 break;
3298
3299 default:
3300 bfa_sm_fault(tx->bna, event);
3301 }
3302 }
3303
3304 static void
bna_tx_sm_prio_stop_wait_entry(struct bna_tx * tx)3305 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3306 {
3307 __bna_tx_stop(tx);
3308 }
3309
3310 static void
bna_tx_sm_prio_stop_wait(struct bna_tx * tx,enum bna_tx_event event)3311 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3312 {
3313 struct bna_txq *txq;
3314 struct list_head *qe;
3315
3316 switch (event) {
3317 case TX_E_STOP:
3318 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3319 break;
3320
3321 case TX_E_FAIL:
3322 call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
3323 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3324 break;
3325
3326 case TX_E_TXQ_STOPPED:
3327 list_for_each(qe, &tx->txq_q) {
3328 txq = (struct bna_txq *)qe;
3329 bna_ib_stop(txq->ib);
3330 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3331 }
3332 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3333 bfa_fsm_set_state(tx, bna_tx_sm_started);
3334 break;
3335
3336 case TX_E_PRIO_CHANGE:
3337 /* No-op */
3338 break;
3339
3340 default:
3341 bfa_sm_fault(tx->bna, event);
3342 }
3343 }
3344
3345 static void
bna_tx_sm_stat_clr_wait_entry(struct bna_tx * tx)3346 bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
3347 {
3348 __bna_txf_stat_clr(tx);
3349 }
3350
3351 static void
bna_tx_sm_stat_clr_wait(struct bna_tx * tx,enum bna_tx_event event)3352 bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
3353 {
3354 switch (event) {
3355 case TX_E_FAIL:
3356 case TX_E_STAT_CLEARED:
3357 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3358 break;
3359
3360 default:
3361 bfa_sm_fault(tx->bna, event);
3362 }
3363 }
3364
3365 static void
__bna_txq_start(struct bna_tx * tx,struct bna_txq * txq)3366 __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3367 {
3368 struct bna_rxtx_q_mem *q_mem;
3369 struct bna_txq_mem txq_cfg;
3370 struct bna_txq_mem *txq_mem;
3371 struct bna_dma_addr cur_q_addr;
3372 u32 pg_num;
3373 void __iomem *base_addr;
3374 unsigned long off;
3375
3376 /* Fill out structure, to be subsequently written to hardware */
3377 txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
3378 txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
3379 cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
3380 txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
3381 txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
3382
3383 txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
3384
3385 txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
3386 (txq->qpt.page_size >> 2);
3387 txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
3388 ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
3389
3390 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3391 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
3392 (txq->priority & 0x7));
3393 txq_cfg.wvc_n_cquota_n_rquota =
3394 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3395 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
3396
3397 /* Setup the page and write to H/W */
3398
3399 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
3400 HQM_RXTX_Q_RAM_BASE_OFFSET);
3401 writel(pg_num, tx->bna->regs.page_addr);
3402
3403 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3404 HQM_RXTX_Q_RAM_BASE_OFFSET);
3405 q_mem = (struct bna_rxtx_q_mem *)0;
3406 txq_mem = &q_mem[txq->txq_id].txq;
3407
3408 /*
3409 * The following 4 lines, is a hack b'cos the H/W needs to read
3410 * these DMA addresses as little endian
3411 */
3412
3413 off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
3414 writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
3415
3416 off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
3417 writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
3418
3419 off = (unsigned long)&txq_mem->cur_q_entry_lo;
3420 writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
3421
3422 off = (unsigned long)&txq_mem->cur_q_entry_hi;
3423 writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
3424
3425 off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
3426 writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
3427
3428 off = (unsigned long)&txq_mem->entry_n_pg_size;
3429 writel(txq_cfg.entry_n_pg_size, base_addr + off);
3430
3431 off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
3432 writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
3433
3434 off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
3435 writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
3436
3437 off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
3438 writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
3439
3440 off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
3441 writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
3442
3443 txq->tcb->producer_index = 0;
3444 txq->tcb->consumer_index = 0;
3445 *(txq->tcb->hw_consumer_index) = 0;
3446
3447 }
3448
3449 static void
__bna_txq_stop(struct bna_tx * tx,struct bna_txq * txq)3450 __bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
3451 {
3452 struct bfi_ll_q_stop_req ll_req;
3453 u32 bit_mask[2] = {0, 0};
3454 if (txq->txq_id < 32)
3455 bit_mask[0] = (u32)1 << txq->txq_id;
3456 else
3457 bit_mask[1] = (u32)1 << (txq->txq_id - 32);
3458
3459 memset(&ll_req, 0, sizeof(ll_req));
3460 ll_req.mh.msg_class = BFI_MC_LL;
3461 ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
3462 ll_req.mh.mtag.h2i.lpu_id = 0;
3463 ll_req.q_id_mask[0] = htonl(bit_mask[0]);
3464 ll_req.q_id_mask[1] = htonl(bit_mask[1]);
3465
3466 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3467 bna_tx_cb_txq_stopped, tx);
3468
3469 bna_mbox_send(tx->bna, &tx->mbox_qe);
3470 }
3471
3472 static void
__bna_txf_start(struct bna_tx * tx)3473 __bna_txf_start(struct bna_tx *tx)
3474 {
3475 struct bna_tx_fndb_ram *tx_fndb;
3476 struct bna_txf *txf = &tx->txf;
3477 void __iomem *base_addr;
3478 unsigned long off;
3479
3480 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3481 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
3482 tx->bna->regs.page_addr);
3483
3484 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3485 TX_FNDB_RAM_BASE_OFFSET);
3486
3487 tx_fndb = (struct bna_tx_fndb_ram *)0;
3488 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3489
3490 writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
3491 base_addr + off);
3492
3493 if (tx->txf.txf_id < 32)
3494 tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
3495 else
3496 tx->bna->tx_mod.txf_bmap[1] |= ((u32)
3497 1 << (tx->txf.txf_id - 32));
3498 }
3499
3500 static void
__bna_txf_stop(struct bna_tx * tx)3501 __bna_txf_stop(struct bna_tx *tx)
3502 {
3503 struct bna_tx_fndb_ram *tx_fndb;
3504 u32 page_num;
3505 u32 ctl_flags;
3506 struct bna_txf *txf = &tx->txf;
3507 void __iomem *base_addr;
3508 unsigned long off;
3509
3510 /* retrieve the running txf_flags & turn off enable bit */
3511 page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3512 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
3513 writel(page_num, tx->bna->regs.page_addr);
3514
3515 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3516 TX_FNDB_RAM_BASE_OFFSET);
3517 tx_fndb = (struct bna_tx_fndb_ram *)0;
3518 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3519
3520 ctl_flags = readl(base_addr + off);
3521 ctl_flags &= ~BFI_TXF_CF_ENABLE;
3522
3523 writel(ctl_flags, base_addr + off);
3524
3525 if (tx->txf.txf_id < 32)
3526 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
3527 else
3528 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
3529 1 << (tx->txf.txf_id - 32));
3530 }
3531
3532 static void
__bna_txf_stat_clr(struct bna_tx * tx)3533 __bna_txf_stat_clr(struct bna_tx *tx)
3534 {
3535 struct bfi_ll_stats_req ll_req;
3536 u32 txf_bmap[2] = {0, 0};
3537 if (tx->txf.txf_id < 32)
3538 txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
3539 else
3540 txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
3541 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
3542 ll_req.stats_mask = 0;
3543 ll_req.rxf_id_mask[0] = 0;
3544 ll_req.rxf_id_mask[1] = 0;
3545 ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
3546 ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
3547
3548 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3549 bna_tx_cb_stats_cleared, tx);
3550 bna_mbox_send(tx->bna, &tx->mbox_qe);
3551 }
3552
3553 static void
__bna_tx_start(struct bna_tx * tx)3554 __bna_tx_start(struct bna_tx *tx)
3555 {
3556 struct bna_txq *txq;
3557 struct list_head *qe;
3558
3559 list_for_each(qe, &tx->txq_q) {
3560 txq = (struct bna_txq *)qe;
3561 bna_ib_start(txq->ib);
3562 __bna_txq_start(tx, txq);
3563 }
3564
3565 __bna_txf_start(tx);
3566
3567 list_for_each(qe, &tx->txq_q) {
3568 txq = (struct bna_txq *)qe;
3569 txq->tcb->priority = txq->priority;
3570 (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
3571 }
3572 }
3573
3574 static void
__bna_tx_stop(struct bna_tx * tx)3575 __bna_tx_stop(struct bna_tx *tx)
3576 {
3577 struct bna_txq *txq;
3578 struct list_head *qe;
3579
3580 list_for_each(qe, &tx->txq_q) {
3581 txq = (struct bna_txq *)qe;
3582 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3583 }
3584
3585 __bna_txf_stop(tx);
3586
3587 list_for_each(qe, &tx->txq_q) {
3588 txq = (struct bna_txq *)qe;
3589 bfa_wc_up(&tx->txq_stop_wc);
3590 }
3591
3592 list_for_each(qe, &tx->txq_q) {
3593 txq = (struct bna_txq *)qe;
3594 __bna_txq_stop(tx, txq);
3595 }
3596 }
3597
3598 static void
bna_txq_qpt_setup(struct bna_txq * txq,int page_count,int page_size,struct bna_mem_descr * qpt_mem,struct bna_mem_descr * swqpt_mem,struct bna_mem_descr * page_mem)3599 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3600 struct bna_mem_descr *qpt_mem,
3601 struct bna_mem_descr *swqpt_mem,
3602 struct bna_mem_descr *page_mem)
3603 {
3604 int i;
3605
3606 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3607 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3608 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3609 txq->qpt.page_count = page_count;
3610 txq->qpt.page_size = page_size;
3611
3612 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3613
3614 for (i = 0; i < page_count; i++) {
3615 txq->tcb->sw_qpt[i] = page_mem[i].kva;
3616
3617 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3618 page_mem[i].dma.lsb;
3619 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3620 page_mem[i].dma.msb;
3621
3622 }
3623 }
3624
3625 static void
bna_tx_free(struct bna_tx * tx)3626 bna_tx_free(struct bna_tx *tx)
3627 {
3628 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3629 struct bna_txq *txq;
3630 struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
3631 struct list_head *qe;
3632
3633 while (!list_empty(&tx->txq_q)) {
3634 bfa_q_deq(&tx->txq_q, &txq);
3635 bfa_q_qe_init(&txq->qe);
3636 if (txq->ib) {
3637 if (txq->ib_seg_offset != -1)
3638 bna_ib_release_idx(txq->ib,
3639 txq->ib_seg_offset);
3640 bna_ib_put(ib_mod, txq->ib);
3641 txq->ib = NULL;
3642 }
3643 txq->tcb = NULL;
3644 txq->tx = NULL;
3645 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3646 }
3647
3648 list_for_each(qe, &tx_mod->tx_active_q) {
3649 if (qe == &tx->qe) {
3650 list_del(&tx->qe);
3651 bfa_q_qe_init(&tx->qe);
3652 break;
3653 }
3654 }
3655
3656 tx->bna = NULL;
3657 tx->priv = NULL;
3658 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3659 }
3660
3661 static void
bna_tx_cb_txq_stopped(void * arg,int status)3662 bna_tx_cb_txq_stopped(void *arg, int status)
3663 {
3664 struct bna_tx *tx = (struct bna_tx *)arg;
3665
3666 bfa_q_qe_init(&tx->mbox_qe.qe);
3667 bfa_wc_down(&tx->txq_stop_wc);
3668 }
3669
3670 static void
bna_tx_cb_txq_stopped_all(void * arg)3671 bna_tx_cb_txq_stopped_all(void *arg)
3672 {
3673 struct bna_tx *tx = (struct bna_tx *)arg;
3674
3675 bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
3676 }
3677
3678 static void
bna_tx_cb_stats_cleared(void * arg,int status)3679 bna_tx_cb_stats_cleared(void *arg, int status)
3680 {
3681 struct bna_tx *tx = (struct bna_tx *)arg;
3682
3683 bfa_q_qe_init(&tx->mbox_qe.qe);
3684
3685 bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
3686 }
3687
3688 static void
bna_tx_start(struct bna_tx * tx)3689 bna_tx_start(struct bna_tx *tx)
3690 {
3691 tx->flags |= BNA_TX_F_PORT_STARTED;
3692 if (tx->flags & BNA_TX_F_ENABLED)
3693 bfa_fsm_send_event(tx, TX_E_START);
3694 }
3695
3696 static void
bna_tx_stop(struct bna_tx * tx)3697 bna_tx_stop(struct bna_tx *tx)
3698 {
3699 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3700 tx->stop_cbarg = &tx->bna->tx_mod;
3701
3702 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3703 bfa_fsm_send_event(tx, TX_E_STOP);
3704 }
3705
3706 static void
bna_tx_fail(struct bna_tx * tx)3707 bna_tx_fail(struct bna_tx *tx)
3708 {
3709 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3710 bfa_fsm_send_event(tx, TX_E_FAIL);
3711 }
3712
3713 static void
bna_tx_prio_changed(struct bna_tx * tx,int prio)3714 bna_tx_prio_changed(struct bna_tx *tx, int prio)
3715 {
3716 struct bna_txq *txq;
3717 struct list_head *qe;
3718
3719 list_for_each(qe, &tx->txq_q) {
3720 txq = (struct bna_txq *)qe;
3721 txq->priority = prio;
3722 }
3723
3724 bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
3725 }
3726
3727 static void
bna_tx_cee_link_status(struct bna_tx * tx,int cee_link)3728 bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
3729 {
3730 if (cee_link)
3731 tx->flags |= BNA_TX_F_PRIO_LOCK;
3732 else
3733 tx->flags &= ~BNA_TX_F_PRIO_LOCK;
3734 }
3735
3736 static void
bna_tx_mod_cb_tx_stopped(void * arg,struct bna_tx * tx,enum bna_cb_status status)3737 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
3738 enum bna_cb_status status)
3739 {
3740 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3741
3742 bfa_wc_down(&tx_mod->tx_stop_wc);
3743 }
3744
3745 static void
bna_tx_mod_cb_tx_stopped_all(void * arg)3746 bna_tx_mod_cb_tx_stopped_all(void *arg)
3747 {
3748 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3749
3750 if (tx_mod->stop_cbfn)
3751 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
3752 tx_mod->stop_cbfn = NULL;
3753 }
3754
3755 void
bna_tx_res_req(int num_txq,int txq_depth,struct bna_res_info * res_info)3756 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3757 {
3758 u32 q_size;
3759 u32 page_count;
3760 struct bna_mem_info *mem_info;
3761
3762 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3763 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3764 mem_info->mem_type = BNA_MEM_T_KVA;
3765 mem_info->len = sizeof(struct bna_tcb);
3766 mem_info->num = num_txq;
3767
3768 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3769 q_size = ALIGN(q_size, PAGE_SIZE);
3770 page_count = q_size >> PAGE_SHIFT;
3771
3772 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3773 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3774 mem_info->mem_type = BNA_MEM_T_DMA;
3775 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3776 mem_info->num = num_txq;
3777
3778 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3779 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3780 mem_info->mem_type = BNA_MEM_T_KVA;
3781 mem_info->len = page_count * sizeof(void *);
3782 mem_info->num = num_txq;
3783
3784 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3785 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3786 mem_info->mem_type = BNA_MEM_T_DMA;
3787 mem_info->len = PAGE_SIZE;
3788 mem_info->num = num_txq * page_count;
3789
3790 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3791 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3792 BNA_INTR_T_MSIX;
3793 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3794 }
3795
3796 struct bna_tx *
bna_tx_create(struct bna * bna,struct bnad * bnad,struct bna_tx_config * tx_cfg,struct bna_tx_event_cbfn * tx_cbfn,struct bna_res_info * res_info,void * priv)3797 bna_tx_create(struct bna *bna, struct bnad *bnad,
3798 struct bna_tx_config *tx_cfg,
3799 struct bna_tx_event_cbfn *tx_cbfn,
3800 struct bna_res_info *res_info, void *priv)
3801 {
3802 struct bna_intr_info *intr_info;
3803 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3804 struct bna_tx *tx;
3805 struct bna_txq *txq;
3806 struct list_head *qe;
3807 struct bna_ib_mod *ib_mod = &bna->ib_mod;
3808 struct bna_doorbell_qset *qset;
3809 struct bna_ib_config ib_config;
3810 int page_count;
3811 int page_size;
3812 int page_idx;
3813 int i;
3814 unsigned long off;
3815
3816 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3817 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3818 tx_cfg->num_txq;
3819 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3820
3821 /**
3822 * Get resources
3823 */
3824
3825 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3826 return NULL;
3827
3828 /* Tx */
3829
3830 if (list_empty(&tx_mod->tx_free_q))
3831 return NULL;
3832 bfa_q_deq(&tx_mod->tx_free_q, &tx);
3833 bfa_q_qe_init(&tx->qe);
3834
3835 /* TxQs */
3836
3837 INIT_LIST_HEAD(&tx->txq_q);
3838 for (i = 0; i < tx_cfg->num_txq; i++) {
3839 if (list_empty(&tx_mod->txq_free_q))
3840 goto err_return;
3841
3842 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3843 bfa_q_qe_init(&txq->qe);
3844 list_add_tail(&txq->qe, &tx->txq_q);
3845 txq->ib = NULL;
3846 txq->ib_seg_offset = -1;
3847 txq->tx = tx;
3848 }
3849
3850 /* IBs */
3851 i = 0;
3852 list_for_each(qe, &tx->txq_q) {
3853 txq = (struct bna_txq *)qe;
3854
3855 if (intr_info->num == 1)
3856 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3857 intr_info->idl[0].vector);
3858 else
3859 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3860 intr_info->idl[i].vector);
3861
3862 if (txq->ib == NULL)
3863 goto err_return;
3864
3865 txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
3866 if (txq->ib_seg_offset == -1)
3867 goto err_return;
3868
3869 i++;
3870 }
3871
3872 /*
3873 * Initialize
3874 */
3875
3876 /* Tx */
3877
3878 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3879 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3880 /* Following callbacks are mandatory */
3881 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3882 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3883 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3884
3885 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3886 tx->bna = bna;
3887 tx->priv = priv;
3888 tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
3889 tx->txq_stop_wc.wc_cbarg = tx;
3890 tx->txq_stop_wc.wc_count = 0;
3891
3892 tx->type = tx_cfg->tx_type;
3893
3894 tx->flags = 0;
3895 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
3896 switch (tx->type) {
3897 case BNA_TX_T_REGULAR:
3898 if (!(tx->bna->tx_mod.flags &
3899 BNA_TX_MOD_F_PORT_LOOPBACK))
3900 tx->flags |= BNA_TX_F_PORT_STARTED;
3901 break;
3902 case BNA_TX_T_LOOPBACK:
3903 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
3904 tx->flags |= BNA_TX_F_PORT_STARTED;
3905 break;
3906 }
3907 }
3908 if (tx->bna->tx_mod.cee_link)
3909 tx->flags |= BNA_TX_F_PRIO_LOCK;
3910
3911 /* TxQ */
3912
3913 i = 0;
3914 page_idx = 0;
3915 list_for_each(qe, &tx->txq_q) {
3916 txq = (struct bna_txq *)qe;
3917 txq->priority = tx_mod->priority;
3918 txq->tcb = (struct bna_tcb *)
3919 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3920 txq->tx_packets = 0;
3921 txq->tx_bytes = 0;
3922
3923 /* IB */
3924
3925 ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3926 ib_config.interpkt_timeo = 0; /* Not used */
3927 ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
3928 ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
3929 BFI_IB_CF_INT_ENABLE |
3930 BFI_IB_CF_COALESCING_MODE);
3931 bna_ib_config(txq->ib, &ib_config);
3932
3933 /* TCB */
3934
3935 txq->tcb->producer_index = 0;
3936 txq->tcb->consumer_index = 0;
3937 txq->tcb->hw_consumer_index = (volatile u32 *)
3938 ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
3939 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
3940 *(txq->tcb->hw_consumer_index) = 0;
3941 txq->tcb->q_depth = tx_cfg->txq_depth;
3942 txq->tcb->unmap_q = (void *)
3943 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3944 qset = (struct bna_doorbell_qset *)0;
3945 off = (unsigned long)&qset[txq->txq_id].txq[0];
3946 txq->tcb->q_dbell = off +
3947 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
3948 txq->tcb->i_dbell = &txq->ib->door_bell;
3949 txq->tcb->intr_type = intr_info->intr_type;
3950 txq->tcb->intr_vector = (intr_info->num == 1) ?
3951 intr_info->idl[0].vector :
3952 intr_info->idl[i].vector;
3953 txq->tcb->txq = txq;
3954 txq->tcb->bnad = bnad;
3955 txq->tcb->id = i;
3956
3957 /* QPT, SWQPT, Pages */
3958 bna_txq_qpt_setup(txq, page_count, page_size,
3959 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3960 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3961 &res_info[BNA_TX_RES_MEM_T_PAGE].
3962 res_u.mem_info.mdl[page_idx]);
3963 txq->tcb->page_idx = page_idx;
3964 txq->tcb->page_count = page_count;
3965 page_idx += page_count;
3966
3967 /* Callback to bnad for setting up TCB */
3968 if (tx->tcb_setup_cbfn)
3969 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3970
3971 i++;
3972 }
3973
3974 /* TxF */
3975
3976 tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
3977 tx->txf.vlan = 0;
3978
3979 /* Mbox element */
3980 bfa_q_qe_init(&tx->mbox_qe.qe);
3981
3982 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3983
3984 return tx;
3985
3986 err_return:
3987 bna_tx_free(tx);
3988 return NULL;
3989 }
3990
3991 void
bna_tx_destroy(struct bna_tx * tx)3992 bna_tx_destroy(struct bna_tx *tx)
3993 {
3994 /* Callback to bnad for destroying TCB */
3995 if (tx->tcb_destroy_cbfn) {
3996 struct bna_txq *txq;
3997 struct list_head *qe;
3998
3999 list_for_each(qe, &tx->txq_q) {
4000 txq = (struct bna_txq *)qe;
4001 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
4002 }
4003 }
4004
4005 bna_tx_free(tx);
4006 }
4007
4008 void
bna_tx_enable(struct bna_tx * tx)4009 bna_tx_enable(struct bna_tx *tx)
4010 {
4011 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
4012 return;
4013
4014 tx->flags |= BNA_TX_F_ENABLED;
4015
4016 if (tx->flags & BNA_TX_F_PORT_STARTED)
4017 bfa_fsm_send_event(tx, TX_E_START);
4018 }
4019
4020 void
bna_tx_disable(struct bna_tx * tx,enum bna_cleanup_type type,void (* cbfn)(void *,struct bna_tx *,enum bna_cb_status))4021 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
4022 void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
4023 {
4024 if (type == BNA_SOFT_CLEANUP) {
4025 (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
4026 return;
4027 }
4028
4029 tx->stop_cbfn = cbfn;
4030 tx->stop_cbarg = tx->bna->bnad;
4031
4032 tx->flags &= ~BNA_TX_F_ENABLED;
4033
4034 bfa_fsm_send_event(tx, TX_E_STOP);
4035 }
4036
4037 int
bna_tx_state_get(struct bna_tx * tx)4038 bna_tx_state_get(struct bna_tx *tx)
4039 {
4040 return bfa_sm_to_state(tx_sm_table, tx->fsm);
4041 }
4042
4043 void
bna_tx_mod_init(struct bna_tx_mod * tx_mod,struct bna * bna,struct bna_res_info * res_info)4044 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
4045 struct bna_res_info *res_info)
4046 {
4047 int i;
4048
4049 tx_mod->bna = bna;
4050 tx_mod->flags = 0;
4051
4052 tx_mod->tx = (struct bna_tx *)
4053 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
4054 tx_mod->txq = (struct bna_txq *)
4055 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
4056
4057 INIT_LIST_HEAD(&tx_mod->tx_free_q);
4058 INIT_LIST_HEAD(&tx_mod->tx_active_q);
4059
4060 INIT_LIST_HEAD(&tx_mod->txq_free_q);
4061
4062 for (i = 0; i < BFI_MAX_TXQ; i++) {
4063 tx_mod->tx[i].txf.txf_id = i;
4064 bfa_q_qe_init(&tx_mod->tx[i].qe);
4065 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
4066
4067 tx_mod->txq[i].txq_id = i;
4068 bfa_q_qe_init(&tx_mod->txq[i].qe);
4069 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
4070 }
4071
4072 tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
4073 tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
4074 tx_mod->tx_stop_wc.wc_count = 0;
4075 }
4076
4077 void
bna_tx_mod_uninit(struct bna_tx_mod * tx_mod)4078 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
4079 {
4080 struct list_head *qe;
4081 int i;
4082
4083 i = 0;
4084 list_for_each(qe, &tx_mod->tx_free_q)
4085 i++;
4086
4087 i = 0;
4088 list_for_each(qe, &tx_mod->txq_free_q)
4089 i++;
4090
4091 tx_mod->bna = NULL;
4092 }
4093
4094 void
bna_tx_mod_start(struct bna_tx_mod * tx_mod,enum bna_tx_type type)4095 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4096 {
4097 struct bna_tx *tx;
4098 struct list_head *qe;
4099
4100 tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
4101 if (type == BNA_TX_T_LOOPBACK)
4102 tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
4103
4104 list_for_each(qe, &tx_mod->tx_active_q) {
4105 tx = (struct bna_tx *)qe;
4106 if (tx->type == type)
4107 bna_tx_start(tx);
4108 }
4109 }
4110
4111 void
bna_tx_mod_stop(struct bna_tx_mod * tx_mod,enum bna_tx_type type)4112 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4113 {
4114 struct bna_tx *tx;
4115 struct list_head *qe;
4116
4117 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4118 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4119
4120 tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
4121
4122 /**
4123 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4124 * as we are going to call bna_tx_stop
4125 */
4126 list_for_each(qe, &tx_mod->tx_active_q) {
4127 tx = (struct bna_tx *)qe;
4128 if (tx->type == type)
4129 bfa_wc_up(&tx_mod->tx_stop_wc);
4130 }
4131
4132 if (tx_mod->tx_stop_wc.wc_count == 0) {
4133 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
4134 tx_mod->stop_cbfn = NULL;
4135 return;
4136 }
4137
4138 list_for_each(qe, &tx_mod->tx_active_q) {
4139 tx = (struct bna_tx *)qe;
4140 if (tx->type == type)
4141 bna_tx_stop(tx);
4142 }
4143 }
4144
4145 void
bna_tx_mod_fail(struct bna_tx_mod * tx_mod)4146 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
4147 {
4148 struct bna_tx *tx;
4149 struct list_head *qe;
4150
4151 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4152 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4153
4154 list_for_each(qe, &tx_mod->tx_active_q) {
4155 tx = (struct bna_tx *)qe;
4156 bna_tx_fail(tx);
4157 }
4158 }
4159
4160 void
bna_tx_mod_prio_changed(struct bna_tx_mod * tx_mod,int prio)4161 bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
4162 {
4163 struct bna_tx *tx;
4164 struct list_head *qe;
4165
4166 if (prio != tx_mod->priority) {
4167 tx_mod->priority = prio;
4168
4169 list_for_each(qe, &tx_mod->tx_active_q) {
4170 tx = (struct bna_tx *)qe;
4171 bna_tx_prio_changed(tx, prio);
4172 }
4173 }
4174 }
4175
4176 void
bna_tx_mod_cee_link_status(struct bna_tx_mod * tx_mod,int cee_link)4177 bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
4178 {
4179 struct bna_tx *tx;
4180 struct list_head *qe;
4181
4182 tx_mod->cee_link = cee_link;
4183
4184 list_for_each(qe, &tx_mod->tx_active_q) {
4185 tx = (struct bna_tx *)qe;
4186 bna_tx_cee_link_status(tx, cee_link);
4187 }
4188 }
4189