1 /* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2011-2012 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30
31 #define BNX2X_MAX_EMUL_MULTI 16
32
33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
35 /**** Exe Queue interfaces ****/
36
37 /**
38 * bnx2x_exe_queue_init - init the Exe Queue object
39 *
40 * @o: poiter to the object
41 * @exe_len: length
42 * @owner: poiter to the owner
43 * @validate: validate function pointer
44 * @optimize: optimize function pointer
45 * @exec: execute function pointer
46 * @get: get function pointer
47 */
bnx2x_exe_queue_init(struct bnx2x * bp,struct bnx2x_exe_queue_obj * o,int exe_len,union bnx2x_qable_obj * owner,exe_q_validate validate,exe_q_remove remove,exe_q_optimize optimize,exe_q_execute exec,exe_q_get get)48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
50 int exe_len,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
53 exe_q_remove remove,
54 exe_q_optimize optimize,
55 exe_q_execute exec,
56 exe_q_get get)
57 {
58 memset(o, 0, sizeof(*o));
59
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
62
63 spin_lock_init(&o->lock);
64
65 o->exe_chunk_len = exe_len;
66 o->owner = owner;
67
68 /* Owner specific callbacks */
69 o->validate = validate;
70 o->remove = remove;
71 o->optimize = optimize;
72 o->execute = exec;
73 o->get = get;
74
75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76 exe_len);
77 }
78
bnx2x_exe_queue_free_elem(struct bnx2x * bp,struct bnx2x_exeq_elem * elem)79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
81 {
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 kfree(elem);
84 }
85
bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj * o)86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87 {
88 struct bnx2x_exeq_elem *elem;
89 int cnt = 0;
90
91 spin_lock_bh(&o->lock);
92
93 list_for_each_entry(elem, &o->exe_queue, link)
94 cnt++;
95
96 spin_unlock_bh(&o->lock);
97
98 return cnt;
99 }
100
101 /**
102 * bnx2x_exe_queue_add - add a new element to the execution queue
103 *
104 * @bp: driver handle
105 * @o: queue
106 * @cmd: new command to add
107 * @restore: true - do not optimize the command
108 *
109 * If the element is optimized or is illegal, frees it.
110 */
bnx2x_exe_queue_add(struct bnx2x * bp,struct bnx2x_exe_queue_obj * o,struct bnx2x_exeq_elem * elem,bool restore)111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
114 bool restore)
115 {
116 int rc;
117
118 spin_lock_bh(&o->lock);
119
120 if (!restore) {
121 /* Try to cancel this element queue */
122 rc = o->optimize(bp, o->owner, elem);
123 if (rc)
124 goto free_and_exit;
125
126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem);
128 if (rc) {
129 BNX2X_ERR("Preamble failed: %d\n", rc);
130 goto free_and_exit;
131 }
132 }
133
134 /* If so, add it to the execution queue */
135 list_add_tail(&elem->link, &o->exe_queue);
136
137 spin_unlock_bh(&o->lock);
138
139 return 0;
140
141 free_and_exit:
142 bnx2x_exe_queue_free_elem(bp, elem);
143
144 spin_unlock_bh(&o->lock);
145
146 return rc;
147
148 }
149
__bnx2x_exe_queue_reset_pending(struct bnx2x * bp,struct bnx2x_exe_queue_obj * o)150 static inline void __bnx2x_exe_queue_reset_pending(
151 struct bnx2x *bp,
152 struct bnx2x_exe_queue_obj *o)
153 {
154 struct bnx2x_exeq_elem *elem;
155
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
159
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
162 }
163 }
164
bnx2x_exe_queue_reset_pending(struct bnx2x * bp,struct bnx2x_exe_queue_obj * o)165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
167 {
168
169 spin_lock_bh(&o->lock);
170
171 __bnx2x_exe_queue_reset_pending(bp, o);
172
173 spin_unlock_bh(&o->lock);
174
175 }
176
177 /**
178 * bnx2x_exe_queue_step - execute one execution chunk atomically
179 *
180 * @bp: driver handle
181 * @o: queue
182 * @ramrod_flags: flags
183 *
184 * (Atomicy is ensured using the exe_queue->lock).
185 */
bnx2x_exe_queue_step(struct bnx2x * bp,struct bnx2x_exe_queue_obj * o,unsigned long * ramrod_flags)186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
189 {
190 struct bnx2x_exeq_elem *elem, spacer;
191 int cur_len = 0, rc;
192
193 memset(&spacer, 0, sizeof(spacer));
194
195 spin_lock_bh(&o->lock);
196
197 /*
198 * Next step should not be performed until the current is finished,
199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 * properly clear object internals without sending any command to the FW
201 * which also implies there won't be any completion to clear the
202 * 'pending' list.
203 */
204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
207 __bnx2x_exe_queue_reset_pending(bp, o);
208 } else {
209 spin_unlock_bh(&o->lock);
210 return 1;
211 }
212 }
213
214 /*
215 * Run through the pending commands list and create a next
216 * execution chunk.
217 */
218 while (!list_empty(&o->exe_queue)) {
219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
220 link);
221 WARN_ON(!elem->cmd_len);
222
223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224 cur_len += elem->cmd_len;
225 /*
226 * Prevent from both lists being empty when moving an
227 * element. This will allow the call of
228 * bnx2x_exe_queue_empty() without locking.
229 */
230 list_add_tail(&spacer.link, &o->pending_comp);
231 mb();
232 list_del(&elem->link);
233 list_add_tail(&elem->link, &o->pending_comp);
234 list_del(&spacer.link);
235 } else
236 break;
237 }
238
239 /* Sanity check */
240 if (!cur_len) {
241 spin_unlock_bh(&o->lock);
242 return 0;
243 }
244
245 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
246 if (rc < 0)
247 /*
248 * In case of an error return the commands back to the queue
249 * and reset the pending_comp.
250 */
251 list_splice_init(&o->pending_comp, &o->exe_queue);
252 else if (!rc)
253 /*
254 * If zero is returned, means there are no outstanding pending
255 * completions and we may dismiss the pending list.
256 */
257 __bnx2x_exe_queue_reset_pending(bp, o);
258
259 spin_unlock_bh(&o->lock);
260 return rc;
261 }
262
bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj * o)263 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
264 {
265 bool empty = list_empty(&o->exe_queue);
266
267 /* Don't reorder!!! */
268 mb();
269
270 return empty && list_empty(&o->pending_comp);
271 }
272
bnx2x_exe_queue_alloc_elem(struct bnx2x * bp)273 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
274 struct bnx2x *bp)
275 {
276 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
277 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
278 }
279
280 /************************ raw_obj functions ***********************************/
bnx2x_raw_check_pending(struct bnx2x_raw_obj * o)281 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
282 {
283 return !!test_bit(o->state, o->pstate);
284 }
285
bnx2x_raw_clear_pending(struct bnx2x_raw_obj * o)286 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
287 {
288 smp_mb__before_clear_bit();
289 clear_bit(o->state, o->pstate);
290 smp_mb__after_clear_bit();
291 }
292
bnx2x_raw_set_pending(struct bnx2x_raw_obj * o)293 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
294 {
295 smp_mb__before_clear_bit();
296 set_bit(o->state, o->pstate);
297 smp_mb__after_clear_bit();
298 }
299
300 /**
301 * bnx2x_state_wait - wait until the given bit(state) is cleared
302 *
303 * @bp: device handle
304 * @state: state which is to be cleared
305 * @state_p: state buffer
306 *
307 */
bnx2x_state_wait(struct bnx2x * bp,int state,unsigned long * pstate)308 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
309 unsigned long *pstate)
310 {
311 /* can take a while if any port is running */
312 int cnt = 5000;
313
314
315 if (CHIP_REV_IS_EMUL(bp))
316 cnt *= 20;
317
318 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
319
320 might_sleep();
321 while (cnt--) {
322 if (!test_bit(state, pstate)) {
323 #ifdef BNX2X_STOP_ON_ERROR
324 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
325 #endif
326 return 0;
327 }
328
329 usleep_range(1000, 1000);
330
331 if (bp->panic)
332 return -EIO;
333 }
334
335 /* timeout! */
336 BNX2X_ERR("timeout waiting for state %d\n", state);
337 #ifdef BNX2X_STOP_ON_ERROR
338 bnx2x_panic();
339 #endif
340
341 return -EBUSY;
342 }
343
bnx2x_raw_wait(struct bnx2x * bp,struct bnx2x_raw_obj * raw)344 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
345 {
346 return bnx2x_state_wait(bp, raw->state, raw->pstate);
347 }
348
349 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
350 /* credit handling callbacks */
bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj * o,int * offset)351 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
352 {
353 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
354
355 WARN_ON(!mp);
356
357 return mp->get_entry(mp, offset);
358 }
359
bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj * o)360 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
361 {
362 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
363
364 WARN_ON(!mp);
365
366 return mp->get(mp, 1);
367 }
368
bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj * o,int * offset)369 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
370 {
371 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
372
373 WARN_ON(!vp);
374
375 return vp->get_entry(vp, offset);
376 }
377
bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj * o)378 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
379 {
380 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
381
382 WARN_ON(!vp);
383
384 return vp->get(vp, 1);
385 }
386
bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj * o)387 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
388 {
389 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
390 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
391
392 if (!mp->get(mp, 1))
393 return false;
394
395 if (!vp->get(vp, 1)) {
396 mp->put(mp, 1);
397 return false;
398 }
399
400 return true;
401 }
402
bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj * o,int offset)403 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
404 {
405 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406
407 return mp->put_entry(mp, offset);
408 }
409
bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj * o)410 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
411 {
412 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
413
414 return mp->put(mp, 1);
415 }
416
bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj * o,int offset)417 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
418 {
419 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
420
421 return vp->put_entry(vp, offset);
422 }
423
bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj * o)424 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
425 {
426 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
427
428 return vp->put(vp, 1);
429 }
430
bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj * o)431 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
432 {
433 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
434 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
435
436 if (!mp->put(mp, 1))
437 return false;
438
439 if (!vp->put(vp, 1)) {
440 mp->get(mp, 1);
441 return false;
442 }
443
444 return true;
445 }
446
bnx2x_get_n_elements(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,int n,u8 * buf)447 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
448 int n, u8 *buf)
449 {
450 struct bnx2x_vlan_mac_registry_elem *pos;
451 u8 *next = buf;
452 int counter = 0;
453
454 /* traverse list */
455 list_for_each_entry(pos, &o->head, link) {
456 if (counter < n) {
457 /* place leading zeroes in buffer */
458 memset(next, 0, MAC_LEADING_ZERO_CNT);
459
460 /* place mac after leading zeroes*/
461 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
462 ETH_ALEN);
463
464 /* calculate address of next element and
465 * advance counter
466 */
467 counter++;
468 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
469
470 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
471 counter, next, pos->u.mac.mac);
472 }
473 }
474 return counter * ETH_ALEN;
475 }
476
477 /* check_add() callbacks */
bnx2x_check_mac_add(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union bnx2x_classification_ramrod_data * data)478 static int bnx2x_check_mac_add(struct bnx2x *bp,
479 struct bnx2x_vlan_mac_obj *o,
480 union bnx2x_classification_ramrod_data *data)
481 {
482 struct bnx2x_vlan_mac_registry_elem *pos;
483
484 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
485
486 if (!is_valid_ether_addr(data->mac.mac))
487 return -EINVAL;
488
489 /* Check if a requested MAC already exists */
490 list_for_each_entry(pos, &o->head, link)
491 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
492 return -EEXIST;
493
494 return 0;
495 }
496
bnx2x_check_vlan_add(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union bnx2x_classification_ramrod_data * data)497 static int bnx2x_check_vlan_add(struct bnx2x *bp,
498 struct bnx2x_vlan_mac_obj *o,
499 union bnx2x_classification_ramrod_data *data)
500 {
501 struct bnx2x_vlan_mac_registry_elem *pos;
502
503 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
504
505 list_for_each_entry(pos, &o->head, link)
506 if (data->vlan.vlan == pos->u.vlan.vlan)
507 return -EEXIST;
508
509 return 0;
510 }
511
bnx2x_check_vlan_mac_add(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union bnx2x_classification_ramrod_data * data)512 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
513 struct bnx2x_vlan_mac_obj *o,
514 union bnx2x_classification_ramrod_data *data)
515 {
516 struct bnx2x_vlan_mac_registry_elem *pos;
517
518 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
519 data->vlan_mac.mac, data->vlan_mac.vlan);
520
521 list_for_each_entry(pos, &o->head, link)
522 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
523 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
524 ETH_ALEN)))
525 return -EEXIST;
526
527 return 0;
528 }
529
530
531 /* check_del() callbacks */
532 static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union bnx2x_classification_ramrod_data * data)533 bnx2x_check_mac_del(struct bnx2x *bp,
534 struct bnx2x_vlan_mac_obj *o,
535 union bnx2x_classification_ramrod_data *data)
536 {
537 struct bnx2x_vlan_mac_registry_elem *pos;
538
539 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
540
541 list_for_each_entry(pos, &o->head, link)
542 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
543 return pos;
544
545 return NULL;
546 }
547
548 static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_vlan_del(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union bnx2x_classification_ramrod_data * data)549 bnx2x_check_vlan_del(struct bnx2x *bp,
550 struct bnx2x_vlan_mac_obj *o,
551 union bnx2x_classification_ramrod_data *data)
552 {
553 struct bnx2x_vlan_mac_registry_elem *pos;
554
555 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
556
557 list_for_each_entry(pos, &o->head, link)
558 if (data->vlan.vlan == pos->u.vlan.vlan)
559 return pos;
560
561 return NULL;
562 }
563
564 static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_vlan_mac_del(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union bnx2x_classification_ramrod_data * data)565 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
566 struct bnx2x_vlan_mac_obj *o,
567 union bnx2x_classification_ramrod_data *data)
568 {
569 struct bnx2x_vlan_mac_registry_elem *pos;
570
571 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
572 data->vlan_mac.mac, data->vlan_mac.vlan);
573
574 list_for_each_entry(pos, &o->head, link)
575 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
576 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
577 ETH_ALEN)))
578 return pos;
579
580 return NULL;
581 }
582
583 /* check_move() callback */
bnx2x_check_move(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * src_o,struct bnx2x_vlan_mac_obj * dst_o,union bnx2x_classification_ramrod_data * data)584 static bool bnx2x_check_move(struct bnx2x *bp,
585 struct bnx2x_vlan_mac_obj *src_o,
586 struct bnx2x_vlan_mac_obj *dst_o,
587 union bnx2x_classification_ramrod_data *data)
588 {
589 struct bnx2x_vlan_mac_registry_elem *pos;
590 int rc;
591
592 /* Check if we can delete the requested configuration from the first
593 * object.
594 */
595 pos = src_o->check_del(bp, src_o, data);
596
597 /* check if configuration can be added */
598 rc = dst_o->check_add(bp, dst_o, data);
599
600 /* If this classification can not be added (is already set)
601 * or can't be deleted - return an error.
602 */
603 if (rc || !pos)
604 return false;
605
606 return true;
607 }
608
bnx2x_check_move_always_err(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * src_o,struct bnx2x_vlan_mac_obj * dst_o,union bnx2x_classification_ramrod_data * data)609 static bool bnx2x_check_move_always_err(
610 struct bnx2x *bp,
611 struct bnx2x_vlan_mac_obj *src_o,
612 struct bnx2x_vlan_mac_obj *dst_o,
613 union bnx2x_classification_ramrod_data *data)
614 {
615 return false;
616 }
617
618
bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj * o)619 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
620 {
621 struct bnx2x_raw_obj *raw = &o->raw;
622 u8 rx_tx_flag = 0;
623
624 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
625 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
626 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
627
628 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
629 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
630 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
631
632 return rx_tx_flag;
633 }
634
635
bnx2x_set_mac_in_nig(struct bnx2x * bp,bool add,unsigned char * dev_addr,int index)636 static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
637 bool add, unsigned char *dev_addr, int index)
638 {
639 u32 wb_data[2];
640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
641 NIG_REG_LLH0_FUNC_MEM;
642
643 if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
644 return;
645
646 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
647 (add ? "ADD" : "DELETE"), index);
648
649 if (add) {
650 /* LLH_FUNC_MEM is a u64 WB register */
651 reg_offset += 8*index;
652
653 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
654 (dev_addr[4] << 8) | dev_addr[5]);
655 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
656
657 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
658 }
659
660 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
661 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
662 }
663
664 /**
665 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
666 *
667 * @bp: device handle
668 * @o: queue for which we want to configure this rule
669 * @add: if true the command is an ADD command, DEL otherwise
670 * @opcode: CLASSIFY_RULE_OPCODE_XXX
671 * @hdr: pointer to a header to setup
672 *
673 */
bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,bool add,int opcode,struct eth_classify_cmd_header * hdr)674 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
675 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
676 struct eth_classify_cmd_header *hdr)
677 {
678 struct bnx2x_raw_obj *raw = &o->raw;
679
680 hdr->client_id = raw->cl_id;
681 hdr->func_id = raw->func_id;
682
683 /* Rx or/and Tx (internal switching) configuration ? */
684 hdr->cmd_general_data |=
685 bnx2x_vlan_mac_get_rx_tx_flag(o);
686
687 if (add)
688 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
689
690 hdr->cmd_general_data |=
691 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
692 }
693
694 /**
695 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
696 *
697 * @cid: connection id
698 * @type: BNX2X_FILTER_XXX_PENDING
699 * @hdr: poiter to header to setup
700 * @rule_cnt:
701 *
702 * currently we always configure one rule and echo field to contain a CID and an
703 * opcode type.
704 */
bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid,int type,struct eth_classify_header * hdr,int rule_cnt)705 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
706 struct eth_classify_header *hdr, int rule_cnt)
707 {
708 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
709 hdr->rule_cnt = (u8)rule_cnt;
710 }
711
712
713 /* hw_config() callbacks */
bnx2x_set_one_mac_e2(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,struct bnx2x_exeq_elem * elem,int rule_idx,int cam_offset)714 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
715 struct bnx2x_vlan_mac_obj *o,
716 struct bnx2x_exeq_elem *elem, int rule_idx,
717 int cam_offset)
718 {
719 struct bnx2x_raw_obj *raw = &o->raw;
720 struct eth_classify_rules_ramrod_data *data =
721 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
722 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
723 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
724 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
725 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
726 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
727
728 /*
729 * Set LLH CAM entry: currently only iSCSI and ETH macs are
730 * relevant. In addition, current implementation is tuned for a
731 * single ETH MAC.
732 *
733 * When multiple unicast ETH MACs PF configuration in switch
734 * independent mode is required (NetQ, multiple netdev MACs,
735 * etc.), consider better utilisation of 8 per function MAC
736 * entries in the LLH register. There is also
737 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
738 * total number of CAM entries to 16.
739 *
740 * Currently we won't configure NIG for MACs other than a primary ETH
741 * MAC and iSCSI L2 MAC.
742 *
743 * If this MAC is moving from one Queue to another, no need to change
744 * NIG configuration.
745 */
746 if (cmd != BNX2X_VLAN_MAC_MOVE) {
747 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
748 bnx2x_set_mac_in_nig(bp, add, mac,
749 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
750 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
751 bnx2x_set_mac_in_nig(bp, add, mac,
752 BNX2X_LLH_CAM_ETH_LINE);
753 }
754
755 /* Reset the ramrod data buffer for the first rule */
756 if (rule_idx == 0)
757 memset(data, 0, sizeof(*data));
758
759 /* Setup a command header */
760 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
761 &rule_entry->mac.header);
762
763 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
764 (add ? "add" : "delete"), mac, raw->cl_id);
765
766 /* Set a MAC itself */
767 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
768 &rule_entry->mac.mac_mid,
769 &rule_entry->mac.mac_lsb, mac);
770
771 /* MOVE: Add a rule that will add this MAC to the target Queue */
772 if (cmd == BNX2X_VLAN_MAC_MOVE) {
773 rule_entry++;
774 rule_cnt++;
775
776 /* Setup ramrod data */
777 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
778 elem->cmd_data.vlan_mac.target_obj,
779 true, CLASSIFY_RULE_OPCODE_MAC,
780 &rule_entry->mac.header);
781
782 /* Set a MAC itself */
783 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
784 &rule_entry->mac.mac_mid,
785 &rule_entry->mac.mac_lsb, mac);
786 }
787
788 /* Set the ramrod data header */
789 /* TODO: take this to the higher level in order to prevent multiple
790 writing */
791 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
792 rule_cnt);
793 }
794
795 /**
796 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
797 *
798 * @bp: device handle
799 * @o: queue
800 * @type:
801 * @cam_offset: offset in cam memory
802 * @hdr: pointer to a header to setup
803 *
804 * E1/E1H
805 */
bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,int type,int cam_offset,struct mac_configuration_hdr * hdr)806 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
807 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
808 struct mac_configuration_hdr *hdr)
809 {
810 struct bnx2x_raw_obj *r = &o->raw;
811
812 hdr->length = 1;
813 hdr->offset = (u8)cam_offset;
814 hdr->client_id = 0xff;
815 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
816 }
817
bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,bool add,int opcode,u8 * mac,u16 vlan_id,struct mac_configuration_entry * cfg_entry)818 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
819 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
820 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
821 {
822 struct bnx2x_raw_obj *r = &o->raw;
823 u32 cl_bit_vec = (1 << r->cl_id);
824
825 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
826 cfg_entry->pf_id = r->func_id;
827 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
828
829 if (add) {
830 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
831 T_ETH_MAC_COMMAND_SET);
832 SET_FLAG(cfg_entry->flags,
833 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
834
835 /* Set a MAC in a ramrod data */
836 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
837 &cfg_entry->middle_mac_addr,
838 &cfg_entry->lsb_mac_addr, mac);
839 } else
840 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
841 T_ETH_MAC_COMMAND_INVALIDATE);
842 }
843
bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,int type,int cam_offset,bool add,u8 * mac,u16 vlan_id,int opcode,struct mac_configuration_cmd * config)844 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
845 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
846 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
847 {
848 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
849 struct bnx2x_raw_obj *raw = &o->raw;
850
851 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
852 &config->hdr);
853 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
854 cfg_entry);
855
856 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
857 (add ? "setting" : "clearing"),
858 mac, raw->cl_id, cam_offset);
859 }
860
861 /**
862 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
863 *
864 * @bp: device handle
865 * @o: bnx2x_vlan_mac_obj
866 * @elem: bnx2x_exeq_elem
867 * @rule_idx: rule_idx
868 * @cam_offset: cam_offset
869 */
bnx2x_set_one_mac_e1x(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,struct bnx2x_exeq_elem * elem,int rule_idx,int cam_offset)870 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
871 struct bnx2x_vlan_mac_obj *o,
872 struct bnx2x_exeq_elem *elem, int rule_idx,
873 int cam_offset)
874 {
875 struct bnx2x_raw_obj *raw = &o->raw;
876 struct mac_configuration_cmd *config =
877 (struct mac_configuration_cmd *)(raw->rdata);
878 /*
879 * 57710 and 57711 do not support MOVE command,
880 * so it's either ADD or DEL
881 */
882 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
883 true : false;
884
885 /* Reset the ramrod data buffer */
886 memset(config, 0, sizeof(*config));
887
888 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
889 cam_offset, add,
890 elem->cmd_data.vlan_mac.u.mac.mac, 0,
891 ETH_VLAN_FILTER_ANY_VLAN, config);
892 }
893
bnx2x_set_one_vlan_e2(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,struct bnx2x_exeq_elem * elem,int rule_idx,int cam_offset)894 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
895 struct bnx2x_vlan_mac_obj *o,
896 struct bnx2x_exeq_elem *elem, int rule_idx,
897 int cam_offset)
898 {
899 struct bnx2x_raw_obj *raw = &o->raw;
900 struct eth_classify_rules_ramrod_data *data =
901 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
902 int rule_cnt = rule_idx + 1;
903 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
904 int cmd = elem->cmd_data.vlan_mac.cmd;
905 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
906 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
907
908 /* Reset the ramrod data buffer for the first rule */
909 if (rule_idx == 0)
910 memset(data, 0, sizeof(*data));
911
912 /* Set a rule header */
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
914 &rule_entry->vlan.header);
915
916 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
917 vlan);
918
919 /* Set a VLAN itself */
920 rule_entry->vlan.vlan = cpu_to_le16(vlan);
921
922 /* MOVE: Add a rule that will add this MAC to the target Queue */
923 if (cmd == BNX2X_VLAN_MAC_MOVE) {
924 rule_entry++;
925 rule_cnt++;
926
927 /* Setup ramrod data */
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
929 elem->cmd_data.vlan_mac.target_obj,
930 true, CLASSIFY_RULE_OPCODE_VLAN,
931 &rule_entry->vlan.header);
932
933 /* Set a VLAN itself */
934 rule_entry->vlan.vlan = cpu_to_le16(vlan);
935 }
936
937 /* Set the ramrod data header */
938 /* TODO: take this to the higher level in order to prevent multiple
939 writing */
940 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
941 rule_cnt);
942 }
943
bnx2x_set_one_vlan_mac_e2(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,struct bnx2x_exeq_elem * elem,int rule_idx,int cam_offset)944 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
945 struct bnx2x_vlan_mac_obj *o,
946 struct bnx2x_exeq_elem *elem,
947 int rule_idx, int cam_offset)
948 {
949 struct bnx2x_raw_obj *raw = &o->raw;
950 struct eth_classify_rules_ramrod_data *data =
951 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
952 int rule_cnt = rule_idx + 1;
953 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
954 int cmd = elem->cmd_data.vlan_mac.cmd;
955 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
956 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
957 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
958
959
960 /* Reset the ramrod data buffer for the first rule */
961 if (rule_idx == 0)
962 memset(data, 0, sizeof(*data));
963
964 /* Set a rule header */
965 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
966 &rule_entry->pair.header);
967
968 /* Set VLAN and MAC themselvs */
969 rule_entry->pair.vlan = cpu_to_le16(vlan);
970 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
971 &rule_entry->pair.mac_mid,
972 &rule_entry->pair.mac_lsb, mac);
973
974 /* MOVE: Add a rule that will add this MAC to the target Queue */
975 if (cmd == BNX2X_VLAN_MAC_MOVE) {
976 rule_entry++;
977 rule_cnt++;
978
979 /* Setup ramrod data */
980 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
981 elem->cmd_data.vlan_mac.target_obj,
982 true, CLASSIFY_RULE_OPCODE_PAIR,
983 &rule_entry->pair.header);
984
985 /* Set a VLAN itself */
986 rule_entry->pair.vlan = cpu_to_le16(vlan);
987 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
988 &rule_entry->pair.mac_mid,
989 &rule_entry->pair.mac_lsb, mac);
990 }
991
992 /* Set the ramrod data header */
993 /* TODO: take this to the higher level in order to prevent multiple
994 writing */
995 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
996 rule_cnt);
997 }
998
999 /**
1000 * bnx2x_set_one_vlan_mac_e1h -
1001 *
1002 * @bp: device handle
1003 * @o: bnx2x_vlan_mac_obj
1004 * @elem: bnx2x_exeq_elem
1005 * @rule_idx: rule_idx
1006 * @cam_offset: cam_offset
1007 */
bnx2x_set_one_vlan_mac_e1h(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,struct bnx2x_exeq_elem * elem,int rule_idx,int cam_offset)1008 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1009 struct bnx2x_vlan_mac_obj *o,
1010 struct bnx2x_exeq_elem *elem,
1011 int rule_idx, int cam_offset)
1012 {
1013 struct bnx2x_raw_obj *raw = &o->raw;
1014 struct mac_configuration_cmd *config =
1015 (struct mac_configuration_cmd *)(raw->rdata);
1016 /*
1017 * 57710 and 57711 do not support MOVE command,
1018 * so it's either ADD or DEL
1019 */
1020 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1021 true : false;
1022
1023 /* Reset the ramrod data buffer */
1024 memset(config, 0, sizeof(*config));
1025
1026 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1027 cam_offset, add,
1028 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1029 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1030 ETH_VLAN_FILTER_CLASSIFY, config);
1031 }
1032
1033 /**
1034 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1035 *
1036 * @bp: device handle
1037 * @p: command parameters
1038 * @ppos: pointer to the cooky
1039 *
1040 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1041 * previously configured elements list.
1042 *
1043 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1044 * into an account
1045 *
1046 * pointer to the cooky - that should be given back in the next call to make
1047 * function handle the next element. If *ppos is set to NULL it will restart the
1048 * iterator. If returned *ppos == NULL this means that the last element has been
1049 * handled.
1050 *
1051 */
bnx2x_vlan_mac_restore(struct bnx2x * bp,struct bnx2x_vlan_mac_ramrod_params * p,struct bnx2x_vlan_mac_registry_elem ** ppos)1052 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1053 struct bnx2x_vlan_mac_ramrod_params *p,
1054 struct bnx2x_vlan_mac_registry_elem **ppos)
1055 {
1056 struct bnx2x_vlan_mac_registry_elem *pos;
1057 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1058
1059 /* If list is empty - there is nothing to do here */
1060 if (list_empty(&o->head)) {
1061 *ppos = NULL;
1062 return 0;
1063 }
1064
1065 /* make a step... */
1066 if (*ppos == NULL)
1067 *ppos = list_first_entry(&o->head,
1068 struct bnx2x_vlan_mac_registry_elem,
1069 link);
1070 else
1071 *ppos = list_next_entry(*ppos, link);
1072
1073 pos = *ppos;
1074
1075 /* If it's the last step - return NULL */
1076 if (list_is_last(&pos->link, &o->head))
1077 *ppos = NULL;
1078
1079 /* Prepare a 'user_req' */
1080 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1081
1082 /* Set the command */
1083 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1084
1085 /* Set vlan_mac_flags */
1086 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1087
1088 /* Set a restore bit */
1089 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1090
1091 return bnx2x_config_vlan_mac(bp, p);
1092 }
1093
1094 /*
1095 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1096 * pointer to an element with a specific criteria and NULL if such an element
1097 * hasn't been found.
1098 */
bnx2x_exeq_get_mac(struct bnx2x_exe_queue_obj * o,struct bnx2x_exeq_elem * elem)1099 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1100 struct bnx2x_exe_queue_obj *o,
1101 struct bnx2x_exeq_elem *elem)
1102 {
1103 struct bnx2x_exeq_elem *pos;
1104 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1105
1106 /* Check pending for execution commands */
1107 list_for_each_entry(pos, &o->exe_queue, link)
1108 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1109 sizeof(*data)) &&
1110 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1111 return pos;
1112
1113 return NULL;
1114 }
1115
bnx2x_exeq_get_vlan(struct bnx2x_exe_queue_obj * o,struct bnx2x_exeq_elem * elem)1116 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1117 struct bnx2x_exe_queue_obj *o,
1118 struct bnx2x_exeq_elem *elem)
1119 {
1120 struct bnx2x_exeq_elem *pos;
1121 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1122
1123 /* Check pending for execution commands */
1124 list_for_each_entry(pos, &o->exe_queue, link)
1125 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1126 sizeof(*data)) &&
1127 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1128 return pos;
1129
1130 return NULL;
1131 }
1132
bnx2x_exeq_get_vlan_mac(struct bnx2x_exe_queue_obj * o,struct bnx2x_exeq_elem * elem)1133 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1134 struct bnx2x_exe_queue_obj *o,
1135 struct bnx2x_exeq_elem *elem)
1136 {
1137 struct bnx2x_exeq_elem *pos;
1138 struct bnx2x_vlan_mac_ramrod_data *data =
1139 &elem->cmd_data.vlan_mac.u.vlan_mac;
1140
1141 /* Check pending for execution commands */
1142 list_for_each_entry(pos, &o->exe_queue, link)
1143 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1144 sizeof(*data)) &&
1145 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1146 return pos;
1147
1148 return NULL;
1149 }
1150
1151 /**
1152 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1153 *
1154 * @bp: device handle
1155 * @qo: bnx2x_qable_obj
1156 * @elem: bnx2x_exeq_elem
1157 *
1158 * Checks that the requested configuration can be added. If yes and if
1159 * requested, consume CAM credit.
1160 *
1161 * The 'validate' is run after the 'optimize'.
1162 *
1163 */
bnx2x_validate_vlan_mac_add(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct bnx2x_exeq_elem * elem)1164 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1165 union bnx2x_qable_obj *qo,
1166 struct bnx2x_exeq_elem *elem)
1167 {
1168 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1169 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1170 int rc;
1171
1172 /* Check the registry */
1173 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1174 if (rc) {
1175 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1176 return rc;
1177 }
1178
1179 /*
1180 * Check if there is a pending ADD command for this
1181 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1182 */
1183 if (exeq->get(exeq, elem)) {
1184 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1185 return -EEXIST;
1186 }
1187
1188 /*
1189 * TODO: Check the pending MOVE from other objects where this
1190 * object is a destination object.
1191 */
1192
1193 /* Consume the credit if not requested not to */
1194 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1195 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1196 o->get_credit(o)))
1197 return -EINVAL;
1198
1199 return 0;
1200 }
1201
1202 /**
1203 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1204 *
1205 * @bp: device handle
1206 * @qo: quable object to check
1207 * @elem: element that needs to be deleted
1208 *
1209 * Checks that the requested configuration can be deleted. If yes and if
1210 * requested, returns a CAM credit.
1211 *
1212 * The 'validate' is run after the 'optimize'.
1213 */
bnx2x_validate_vlan_mac_del(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct bnx2x_exeq_elem * elem)1214 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1215 union bnx2x_qable_obj *qo,
1216 struct bnx2x_exeq_elem *elem)
1217 {
1218 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1219 struct bnx2x_vlan_mac_registry_elem *pos;
1220 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1221 struct bnx2x_exeq_elem query_elem;
1222
1223 /* If this classification can not be deleted (doesn't exist)
1224 * - return a BNX2X_EXIST.
1225 */
1226 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1227 if (!pos) {
1228 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1229 return -EEXIST;
1230 }
1231
1232 /*
1233 * Check if there are pending DEL or MOVE commands for this
1234 * MAC/VLAN/VLAN-MAC. Return an error if so.
1235 */
1236 memcpy(&query_elem, elem, sizeof(query_elem));
1237
1238 /* Check for MOVE commands */
1239 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1240 if (exeq->get(exeq, &query_elem)) {
1241 BNX2X_ERR("There is a pending MOVE command already\n");
1242 return -EINVAL;
1243 }
1244
1245 /* Check for DEL commands */
1246 if (exeq->get(exeq, elem)) {
1247 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1248 return -EEXIST;
1249 }
1250
1251 /* Return the credit to the credit pool if not requested not to */
1252 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1253 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1254 o->put_credit(o))) {
1255 BNX2X_ERR("Failed to return a credit\n");
1256 return -EINVAL;
1257 }
1258
1259 return 0;
1260 }
1261
1262 /**
1263 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1264 *
1265 * @bp: device handle
1266 * @qo: quable object to check (source)
1267 * @elem: element that needs to be moved
1268 *
1269 * Checks that the requested configuration can be moved. If yes and if
1270 * requested, returns a CAM credit.
1271 *
1272 * The 'validate' is run after the 'optimize'.
1273 */
bnx2x_validate_vlan_mac_move(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct bnx2x_exeq_elem * elem)1274 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1275 union bnx2x_qable_obj *qo,
1276 struct bnx2x_exeq_elem *elem)
1277 {
1278 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1279 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1280 struct bnx2x_exeq_elem query_elem;
1281 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1282 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1283
1284 /*
1285 * Check if we can perform this operation based on the current registry
1286 * state.
1287 */
1288 if (!src_o->check_move(bp, src_o, dest_o,
1289 &elem->cmd_data.vlan_mac.u)) {
1290 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1291 return -EINVAL;
1292 }
1293
1294 /*
1295 * Check if there is an already pending DEL or MOVE command for the
1296 * source object or ADD command for a destination object. Return an
1297 * error if so.
1298 */
1299 memcpy(&query_elem, elem, sizeof(query_elem));
1300
1301 /* Check DEL on source */
1302 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1303 if (src_exeq->get(src_exeq, &query_elem)) {
1304 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1305 return -EINVAL;
1306 }
1307
1308 /* Check MOVE on source */
1309 if (src_exeq->get(src_exeq, elem)) {
1310 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1311 return -EEXIST;
1312 }
1313
1314 /* Check ADD on destination */
1315 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1316 if (dest_exeq->get(dest_exeq, &query_elem)) {
1317 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1318 return -EINVAL;
1319 }
1320
1321 /* Consume the credit if not requested not to */
1322 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1323 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1324 dest_o->get_credit(dest_o)))
1325 return -EINVAL;
1326
1327 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1328 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1329 src_o->put_credit(src_o))) {
1330 /* return the credit taken from dest... */
1331 dest_o->put_credit(dest_o);
1332 return -EINVAL;
1333 }
1334
1335 return 0;
1336 }
1337
bnx2x_validate_vlan_mac(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct bnx2x_exeq_elem * elem)1338 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1339 union bnx2x_qable_obj *qo,
1340 struct bnx2x_exeq_elem *elem)
1341 {
1342 switch (elem->cmd_data.vlan_mac.cmd) {
1343 case BNX2X_VLAN_MAC_ADD:
1344 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1345 case BNX2X_VLAN_MAC_DEL:
1346 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1347 case BNX2X_VLAN_MAC_MOVE:
1348 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1349 default:
1350 return -EINVAL;
1351 }
1352 }
1353
bnx2x_remove_vlan_mac(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct bnx2x_exeq_elem * elem)1354 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1355 union bnx2x_qable_obj *qo,
1356 struct bnx2x_exeq_elem *elem)
1357 {
1358 int rc = 0;
1359
1360 /* If consumption wasn't required, nothing to do */
1361 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1362 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1363 return 0;
1364
1365 switch (elem->cmd_data.vlan_mac.cmd) {
1366 case BNX2X_VLAN_MAC_ADD:
1367 case BNX2X_VLAN_MAC_MOVE:
1368 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1369 break;
1370 case BNX2X_VLAN_MAC_DEL:
1371 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1372 break;
1373 default:
1374 return -EINVAL;
1375 }
1376
1377 if (rc != true)
1378 return -EINVAL;
1379
1380 return 0;
1381 }
1382
1383 /**
1384 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1385 *
1386 * @bp: device handle
1387 * @o: bnx2x_vlan_mac_obj
1388 *
1389 */
bnx2x_wait_vlan_mac(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o)1390 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1391 struct bnx2x_vlan_mac_obj *o)
1392 {
1393 int cnt = 5000, rc;
1394 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1395 struct bnx2x_raw_obj *raw = &o->raw;
1396
1397 while (cnt--) {
1398 /* Wait for the current command to complete */
1399 rc = raw->wait_comp(bp, raw);
1400 if (rc)
1401 return rc;
1402
1403 /* Wait until there are no pending commands */
1404 if (!bnx2x_exe_queue_empty(exeq))
1405 usleep_range(1000, 1000);
1406 else
1407 return 0;
1408 }
1409
1410 return -EBUSY;
1411 }
1412
1413 /**
1414 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1415 *
1416 * @bp: device handle
1417 * @o: bnx2x_vlan_mac_obj
1418 * @cqe:
1419 * @cont: if true schedule next execution chunk
1420 *
1421 */
bnx2x_complete_vlan_mac(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,union event_ring_elem * cqe,unsigned long * ramrod_flags)1422 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1423 struct bnx2x_vlan_mac_obj *o,
1424 union event_ring_elem *cqe,
1425 unsigned long *ramrod_flags)
1426 {
1427 struct bnx2x_raw_obj *r = &o->raw;
1428 int rc;
1429
1430 /* Reset pending list */
1431 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1432
1433 /* Clear pending */
1434 r->clear_pending(r);
1435
1436 /* If ramrod failed this is most likely a SW bug */
1437 if (cqe->message.error)
1438 return -EINVAL;
1439
1440 /* Run the next bulk of pending commands if requeted */
1441 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1442 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1443 if (rc < 0)
1444 return rc;
1445 }
1446
1447 /* If there is more work to do return PENDING */
1448 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1449 return 1;
1450
1451 return 0;
1452 }
1453
1454 /**
1455 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1456 *
1457 * @bp: device handle
1458 * @o: bnx2x_qable_obj
1459 * @elem: bnx2x_exeq_elem
1460 */
bnx2x_optimize_vlan_mac(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct bnx2x_exeq_elem * elem)1461 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1462 union bnx2x_qable_obj *qo,
1463 struct bnx2x_exeq_elem *elem)
1464 {
1465 struct bnx2x_exeq_elem query, *pos;
1466 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1467 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1468
1469 memcpy(&query, elem, sizeof(query));
1470
1471 switch (elem->cmd_data.vlan_mac.cmd) {
1472 case BNX2X_VLAN_MAC_ADD:
1473 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1474 break;
1475 case BNX2X_VLAN_MAC_DEL:
1476 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1477 break;
1478 default:
1479 /* Don't handle anything other than ADD or DEL */
1480 return 0;
1481 }
1482
1483 /* If we found the appropriate element - delete it */
1484 pos = exeq->get(exeq, &query);
1485 if (pos) {
1486
1487 /* Return the credit of the optimized command */
1488 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1489 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1490 if ((query.cmd_data.vlan_mac.cmd ==
1491 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1492 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1493 return -EINVAL;
1494 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1495 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1496 return -EINVAL;
1497 }
1498 }
1499
1500 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1501 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1502 "ADD" : "DEL");
1503
1504 list_del(&pos->link);
1505 bnx2x_exe_queue_free_elem(bp, pos);
1506 return 1;
1507 }
1508
1509 return 0;
1510 }
1511
1512 /**
1513 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1514 *
1515 * @bp: device handle
1516 * @o:
1517 * @elem:
1518 * @restore:
1519 * @re:
1520 *
1521 * prepare a registry element according to the current command request.
1522 */
bnx2x_vlan_mac_get_registry_elem(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,struct bnx2x_exeq_elem * elem,bool restore,struct bnx2x_vlan_mac_registry_elem ** re)1523 static inline int bnx2x_vlan_mac_get_registry_elem(
1524 struct bnx2x *bp,
1525 struct bnx2x_vlan_mac_obj *o,
1526 struct bnx2x_exeq_elem *elem,
1527 bool restore,
1528 struct bnx2x_vlan_mac_registry_elem **re)
1529 {
1530 int cmd = elem->cmd_data.vlan_mac.cmd;
1531 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1532
1533 /* Allocate a new registry element if needed. */
1534 if (!restore &&
1535 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1536 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1537 if (!reg_elem)
1538 return -ENOMEM;
1539
1540 /* Get a new CAM offset */
1541 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1542 /*
1543 * This shell never happen, because we have checked the
1544 * CAM availiability in the 'validate'.
1545 */
1546 WARN_ON(1);
1547 kfree(reg_elem);
1548 return -EINVAL;
1549 }
1550
1551 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1552
1553 /* Set a VLAN-MAC data */
1554 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1555 sizeof(reg_elem->u));
1556
1557 /* Copy the flags (needed for DEL and RESTORE flows) */
1558 reg_elem->vlan_mac_flags =
1559 elem->cmd_data.vlan_mac.vlan_mac_flags;
1560 } else /* DEL, RESTORE */
1561 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1562
1563 *re = reg_elem;
1564 return 0;
1565 }
1566
1567 /**
1568 * bnx2x_execute_vlan_mac - execute vlan mac command
1569 *
1570 * @bp: device handle
1571 * @qo:
1572 * @exe_chunk:
1573 * @ramrod_flags:
1574 *
1575 * go and send a ramrod!
1576 */
bnx2x_execute_vlan_mac(struct bnx2x * bp,union bnx2x_qable_obj * qo,struct list_head * exe_chunk,unsigned long * ramrod_flags)1577 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1578 union bnx2x_qable_obj *qo,
1579 struct list_head *exe_chunk,
1580 unsigned long *ramrod_flags)
1581 {
1582 struct bnx2x_exeq_elem *elem;
1583 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1584 struct bnx2x_raw_obj *r = &o->raw;
1585 int rc, idx = 0;
1586 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1587 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1588 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1589 int cmd;
1590
1591 /*
1592 * If DRIVER_ONLY execution is requested, cleanup a registry
1593 * and exit. Otherwise send a ramrod to FW.
1594 */
1595 if (!drv_only) {
1596 WARN_ON(r->check_pending(r));
1597
1598 /* Set pending */
1599 r->set_pending(r);
1600
1601 /* Fill tha ramrod data */
1602 list_for_each_entry(elem, exe_chunk, link) {
1603 cmd = elem->cmd_data.vlan_mac.cmd;
1604 /*
1605 * We will add to the target object in MOVE command, so
1606 * change the object for a CAM search.
1607 */
1608 if (cmd == BNX2X_VLAN_MAC_MOVE)
1609 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1610 else
1611 cam_obj = o;
1612
1613 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1614 elem, restore,
1615 ®_elem);
1616 if (rc)
1617 goto error_exit;
1618
1619 WARN_ON(!reg_elem);
1620
1621 /* Push a new entry into the registry */
1622 if (!restore &&
1623 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1624 (cmd == BNX2X_VLAN_MAC_MOVE)))
1625 list_add(®_elem->link, &cam_obj->head);
1626
1627 /* Configure a single command in a ramrod data buffer */
1628 o->set_one_rule(bp, o, elem, idx,
1629 reg_elem->cam_offset);
1630
1631 /* MOVE command consumes 2 entries in the ramrod data */
1632 if (cmd == BNX2X_VLAN_MAC_MOVE)
1633 idx += 2;
1634 else
1635 idx++;
1636 }
1637
1638 /*
1639 * No need for an explicit memory barrier here as long we would
1640 * need to ensure the ordering of writing to the SPQ element
1641 * and updating of the SPQ producer which involves a memory
1642 * read and we will have to put a full memory barrier there
1643 * (inside bnx2x_sp_post()).
1644 */
1645
1646 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1647 U64_HI(r->rdata_mapping),
1648 U64_LO(r->rdata_mapping),
1649 ETH_CONNECTION_TYPE);
1650 if (rc)
1651 goto error_exit;
1652 }
1653
1654 /* Now, when we are done with the ramrod - clean up the registry */
1655 list_for_each_entry(elem, exe_chunk, link) {
1656 cmd = elem->cmd_data.vlan_mac.cmd;
1657 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1658 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1659 reg_elem = o->check_del(bp, o,
1660 &elem->cmd_data.vlan_mac.u);
1661
1662 WARN_ON(!reg_elem);
1663
1664 o->put_cam_offset(o, reg_elem->cam_offset);
1665 list_del(®_elem->link);
1666 kfree(reg_elem);
1667 }
1668 }
1669
1670 if (!drv_only)
1671 return 1;
1672 else
1673 return 0;
1674
1675 error_exit:
1676 r->clear_pending(r);
1677
1678 /* Cleanup a registry in case of a failure */
1679 list_for_each_entry(elem, exe_chunk, link) {
1680 cmd = elem->cmd_data.vlan_mac.cmd;
1681
1682 if (cmd == BNX2X_VLAN_MAC_MOVE)
1683 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1684 else
1685 cam_obj = o;
1686
1687 /* Delete all newly added above entries */
1688 if (!restore &&
1689 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1690 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1691 reg_elem = o->check_del(bp, cam_obj,
1692 &elem->cmd_data.vlan_mac.u);
1693 if (reg_elem) {
1694 list_del(®_elem->link);
1695 kfree(reg_elem);
1696 }
1697 }
1698 }
1699
1700 return rc;
1701 }
1702
bnx2x_vlan_mac_push_new_cmd(struct bnx2x * bp,struct bnx2x_vlan_mac_ramrod_params * p)1703 static inline int bnx2x_vlan_mac_push_new_cmd(
1704 struct bnx2x *bp,
1705 struct bnx2x_vlan_mac_ramrod_params *p)
1706 {
1707 struct bnx2x_exeq_elem *elem;
1708 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1709 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1710
1711 /* Allocate the execution queue element */
1712 elem = bnx2x_exe_queue_alloc_elem(bp);
1713 if (!elem)
1714 return -ENOMEM;
1715
1716 /* Set the command 'length' */
1717 switch (p->user_req.cmd) {
1718 case BNX2X_VLAN_MAC_MOVE:
1719 elem->cmd_len = 2;
1720 break;
1721 default:
1722 elem->cmd_len = 1;
1723 }
1724
1725 /* Fill the object specific info */
1726 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1727
1728 /* Try to add a new command to the pending list */
1729 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1730 }
1731
1732 /**
1733 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1734 *
1735 * @bp: device handle
1736 * @p:
1737 *
1738 */
bnx2x_config_vlan_mac(struct bnx2x * bp,struct bnx2x_vlan_mac_ramrod_params * p)1739 int bnx2x_config_vlan_mac(
1740 struct bnx2x *bp,
1741 struct bnx2x_vlan_mac_ramrod_params *p)
1742 {
1743 int rc = 0;
1744 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1745 unsigned long *ramrod_flags = &p->ramrod_flags;
1746 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1747 struct bnx2x_raw_obj *raw = &o->raw;
1748
1749 /*
1750 * Add new elements to the execution list for commands that require it.
1751 */
1752 if (!cont) {
1753 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1754 if (rc)
1755 return rc;
1756 }
1757
1758 /*
1759 * If nothing will be executed further in this iteration we want to
1760 * return PENDING if there are pending commands
1761 */
1762 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1763 rc = 1;
1764
1765 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1766 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1767 raw->clear_pending(raw);
1768 }
1769
1770 /* Execute commands if required */
1771 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1772 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1773 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1774 if (rc < 0)
1775 return rc;
1776 }
1777
1778 /*
1779 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1780 * then user want to wait until the last command is done.
1781 */
1782 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1783 /*
1784 * Wait maximum for the current exe_queue length iterations plus
1785 * one (for the current pending command).
1786 */
1787 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1788
1789 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1790 max_iterations--) {
1791
1792 /* Wait for the current command to complete */
1793 rc = raw->wait_comp(bp, raw);
1794 if (rc)
1795 return rc;
1796
1797 /* Make a next step */
1798 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1799 ramrod_flags);
1800 if (rc < 0)
1801 return rc;
1802 }
1803
1804 return 0;
1805 }
1806
1807 return rc;
1808 }
1809
1810
1811
1812 /**
1813 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1814 *
1815 * @bp: device handle
1816 * @o:
1817 * @vlan_mac_flags:
1818 * @ramrod_flags: execution flags to be used for this deletion
1819 *
1820 * if the last operation has completed successfully and there are no
1821 * moreelements left, positive value if the last operation has completed
1822 * successfully and there are more previously configured elements, negative
1823 * value is current operation has failed.
1824 */
bnx2x_vlan_mac_del_all(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * o,unsigned long * vlan_mac_flags,unsigned long * ramrod_flags)1825 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1826 struct bnx2x_vlan_mac_obj *o,
1827 unsigned long *vlan_mac_flags,
1828 unsigned long *ramrod_flags)
1829 {
1830 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1831 int rc = 0;
1832 struct bnx2x_vlan_mac_ramrod_params p;
1833 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1834 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1835
1836 /* Clear pending commands first */
1837
1838 spin_lock_bh(&exeq->lock);
1839
1840 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1841 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1842 *vlan_mac_flags) {
1843 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1844 if (rc) {
1845 BNX2X_ERR("Failed to remove command\n");
1846 spin_unlock_bh(&exeq->lock);
1847 return rc;
1848 }
1849 list_del(&exeq_pos->link);
1850 }
1851 }
1852
1853 spin_unlock_bh(&exeq->lock);
1854
1855 /* Prepare a command request */
1856 memset(&p, 0, sizeof(p));
1857 p.vlan_mac_obj = o;
1858 p.ramrod_flags = *ramrod_flags;
1859 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1860
1861 /*
1862 * Add all but the last VLAN-MAC to the execution queue without actually
1863 * execution anything.
1864 */
1865 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1866 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1867 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1868
1869 list_for_each_entry(pos, &o->head, link) {
1870 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1871 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1872 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1873 rc = bnx2x_config_vlan_mac(bp, &p);
1874 if (rc < 0) {
1875 BNX2X_ERR("Failed to add a new DEL command\n");
1876 return rc;
1877 }
1878 }
1879 }
1880
1881 p.ramrod_flags = *ramrod_flags;
1882 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1883
1884 return bnx2x_config_vlan_mac(bp, &p);
1885 }
1886
bnx2x_init_raw_obj(struct bnx2x_raw_obj * raw,u8 cl_id,u32 cid,u8 func_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type)1887 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1888 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1889 unsigned long *pstate, bnx2x_obj_type type)
1890 {
1891 raw->func_id = func_id;
1892 raw->cid = cid;
1893 raw->cl_id = cl_id;
1894 raw->rdata = rdata;
1895 raw->rdata_mapping = rdata_mapping;
1896 raw->state = state;
1897 raw->pstate = pstate;
1898 raw->obj_type = type;
1899 raw->check_pending = bnx2x_raw_check_pending;
1900 raw->clear_pending = bnx2x_raw_clear_pending;
1901 raw->set_pending = bnx2x_raw_set_pending;
1902 raw->wait_comp = bnx2x_raw_wait;
1903 }
1904
bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj * o,u8 cl_id,u32 cid,u8 func_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type,struct bnx2x_credit_pool_obj * macs_pool,struct bnx2x_credit_pool_obj * vlans_pool)1905 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1906 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1907 int state, unsigned long *pstate, bnx2x_obj_type type,
1908 struct bnx2x_credit_pool_obj *macs_pool,
1909 struct bnx2x_credit_pool_obj *vlans_pool)
1910 {
1911 INIT_LIST_HEAD(&o->head);
1912
1913 o->macs_pool = macs_pool;
1914 o->vlans_pool = vlans_pool;
1915
1916 o->delete_all = bnx2x_vlan_mac_del_all;
1917 o->restore = bnx2x_vlan_mac_restore;
1918 o->complete = bnx2x_complete_vlan_mac;
1919 o->wait = bnx2x_wait_vlan_mac;
1920
1921 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1922 state, pstate, type);
1923 }
1924
1925
bnx2x_init_mac_obj(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * mac_obj,u8 cl_id,u32 cid,u8 func_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type,struct bnx2x_credit_pool_obj * macs_pool)1926 void bnx2x_init_mac_obj(struct bnx2x *bp,
1927 struct bnx2x_vlan_mac_obj *mac_obj,
1928 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1929 dma_addr_t rdata_mapping, int state,
1930 unsigned long *pstate, bnx2x_obj_type type,
1931 struct bnx2x_credit_pool_obj *macs_pool)
1932 {
1933 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1934
1935 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1936 rdata_mapping, state, pstate, type,
1937 macs_pool, NULL);
1938
1939 /* CAM credit pool handling */
1940 mac_obj->get_credit = bnx2x_get_credit_mac;
1941 mac_obj->put_credit = bnx2x_put_credit_mac;
1942 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1943 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1944
1945 if (CHIP_IS_E1x(bp)) {
1946 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1947 mac_obj->check_del = bnx2x_check_mac_del;
1948 mac_obj->check_add = bnx2x_check_mac_add;
1949 mac_obj->check_move = bnx2x_check_move_always_err;
1950 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1951
1952 /* Exe Queue */
1953 bnx2x_exe_queue_init(bp,
1954 &mac_obj->exe_queue, 1, qable_obj,
1955 bnx2x_validate_vlan_mac,
1956 bnx2x_remove_vlan_mac,
1957 bnx2x_optimize_vlan_mac,
1958 bnx2x_execute_vlan_mac,
1959 bnx2x_exeq_get_mac);
1960 } else {
1961 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1962 mac_obj->check_del = bnx2x_check_mac_del;
1963 mac_obj->check_add = bnx2x_check_mac_add;
1964 mac_obj->check_move = bnx2x_check_move;
1965 mac_obj->ramrod_cmd =
1966 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1967 mac_obj->get_n_elements = bnx2x_get_n_elements;
1968
1969 /* Exe Queue */
1970 bnx2x_exe_queue_init(bp,
1971 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1972 qable_obj, bnx2x_validate_vlan_mac,
1973 bnx2x_remove_vlan_mac,
1974 bnx2x_optimize_vlan_mac,
1975 bnx2x_execute_vlan_mac,
1976 bnx2x_exeq_get_mac);
1977 }
1978 }
1979
bnx2x_init_vlan_obj(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * vlan_obj,u8 cl_id,u32 cid,u8 func_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type,struct bnx2x_credit_pool_obj * vlans_pool)1980 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1981 struct bnx2x_vlan_mac_obj *vlan_obj,
1982 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1983 dma_addr_t rdata_mapping, int state,
1984 unsigned long *pstate, bnx2x_obj_type type,
1985 struct bnx2x_credit_pool_obj *vlans_pool)
1986 {
1987 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1988
1989 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1990 rdata_mapping, state, pstate, type, NULL,
1991 vlans_pool);
1992
1993 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1994 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1995 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1996 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1997
1998 if (CHIP_IS_E1x(bp)) {
1999 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2000 BUG();
2001 } else {
2002 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2003 vlan_obj->check_del = bnx2x_check_vlan_del;
2004 vlan_obj->check_add = bnx2x_check_vlan_add;
2005 vlan_obj->check_move = bnx2x_check_move;
2006 vlan_obj->ramrod_cmd =
2007 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2008
2009 /* Exe Queue */
2010 bnx2x_exe_queue_init(bp,
2011 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2012 qable_obj, bnx2x_validate_vlan_mac,
2013 bnx2x_remove_vlan_mac,
2014 bnx2x_optimize_vlan_mac,
2015 bnx2x_execute_vlan_mac,
2016 bnx2x_exeq_get_vlan);
2017 }
2018 }
2019
bnx2x_init_vlan_mac_obj(struct bnx2x * bp,struct bnx2x_vlan_mac_obj * vlan_mac_obj,u8 cl_id,u32 cid,u8 func_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type,struct bnx2x_credit_pool_obj * macs_pool,struct bnx2x_credit_pool_obj * vlans_pool)2020 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2021 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2022 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2023 dma_addr_t rdata_mapping, int state,
2024 unsigned long *pstate, bnx2x_obj_type type,
2025 struct bnx2x_credit_pool_obj *macs_pool,
2026 struct bnx2x_credit_pool_obj *vlans_pool)
2027 {
2028 union bnx2x_qable_obj *qable_obj =
2029 (union bnx2x_qable_obj *)vlan_mac_obj;
2030
2031 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2032 rdata_mapping, state, pstate, type,
2033 macs_pool, vlans_pool);
2034
2035 /* CAM pool handling */
2036 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2037 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2038 /*
2039 * CAM offset is relevant for 57710 and 57711 chips only which have a
2040 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2041 * will be taken from MACs' pool object only.
2042 */
2043 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2044 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2045
2046 if (CHIP_IS_E1(bp)) {
2047 BNX2X_ERR("Do not support chips others than E2\n");
2048 BUG();
2049 } else if (CHIP_IS_E1H(bp)) {
2050 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2051 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2052 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2053 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2054 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2055
2056 /* Exe Queue */
2057 bnx2x_exe_queue_init(bp,
2058 &vlan_mac_obj->exe_queue, 1, qable_obj,
2059 bnx2x_validate_vlan_mac,
2060 bnx2x_remove_vlan_mac,
2061 bnx2x_optimize_vlan_mac,
2062 bnx2x_execute_vlan_mac,
2063 bnx2x_exeq_get_vlan_mac);
2064 } else {
2065 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2066 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2067 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2068 vlan_mac_obj->check_move = bnx2x_check_move;
2069 vlan_mac_obj->ramrod_cmd =
2070 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2071
2072 /* Exe Queue */
2073 bnx2x_exe_queue_init(bp,
2074 &vlan_mac_obj->exe_queue,
2075 CLASSIFY_RULES_COUNT,
2076 qable_obj, bnx2x_validate_vlan_mac,
2077 bnx2x_remove_vlan_mac,
2078 bnx2x_optimize_vlan_mac,
2079 bnx2x_execute_vlan_mac,
2080 bnx2x_exeq_get_vlan_mac);
2081 }
2082
2083 }
2084
2085 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
__storm_memset_mac_filters(struct bnx2x * bp,struct tstorm_eth_mac_filter_config * mac_filters,u16 pf_id)2086 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2087 struct tstorm_eth_mac_filter_config *mac_filters,
2088 u16 pf_id)
2089 {
2090 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2091
2092 u32 addr = BAR_TSTRORM_INTMEM +
2093 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2094
2095 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2096 }
2097
bnx2x_set_rx_mode_e1x(struct bnx2x * bp,struct bnx2x_rx_mode_ramrod_params * p)2098 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2099 struct bnx2x_rx_mode_ramrod_params *p)
2100 {
2101 /* update the bp MAC filter structure */
2102 u32 mask = (1 << p->cl_id);
2103
2104 struct tstorm_eth_mac_filter_config *mac_filters =
2105 (struct tstorm_eth_mac_filter_config *)p->rdata;
2106
2107 /* initial seeting is drop-all */
2108 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2109 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2110 u8 unmatched_unicast = 0;
2111
2112 /* In e1x there we only take into account rx acceot flag since tx switching
2113 * isn't enabled. */
2114 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2115 /* accept matched ucast */
2116 drop_all_ucast = 0;
2117
2118 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2119 /* accept matched mcast */
2120 drop_all_mcast = 0;
2121
2122 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2123 /* accept all mcast */
2124 drop_all_ucast = 0;
2125 accp_all_ucast = 1;
2126 }
2127 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2128 /* accept all mcast */
2129 drop_all_mcast = 0;
2130 accp_all_mcast = 1;
2131 }
2132 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2133 /* accept (all) bcast */
2134 accp_all_bcast = 1;
2135 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2136 /* accept unmatched unicasts */
2137 unmatched_unicast = 1;
2138
2139 mac_filters->ucast_drop_all = drop_all_ucast ?
2140 mac_filters->ucast_drop_all | mask :
2141 mac_filters->ucast_drop_all & ~mask;
2142
2143 mac_filters->mcast_drop_all = drop_all_mcast ?
2144 mac_filters->mcast_drop_all | mask :
2145 mac_filters->mcast_drop_all & ~mask;
2146
2147 mac_filters->ucast_accept_all = accp_all_ucast ?
2148 mac_filters->ucast_accept_all | mask :
2149 mac_filters->ucast_accept_all & ~mask;
2150
2151 mac_filters->mcast_accept_all = accp_all_mcast ?
2152 mac_filters->mcast_accept_all | mask :
2153 mac_filters->mcast_accept_all & ~mask;
2154
2155 mac_filters->bcast_accept_all = accp_all_bcast ?
2156 mac_filters->bcast_accept_all | mask :
2157 mac_filters->bcast_accept_all & ~mask;
2158
2159 mac_filters->unmatched_unicast = unmatched_unicast ?
2160 mac_filters->unmatched_unicast | mask :
2161 mac_filters->unmatched_unicast & ~mask;
2162
2163 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2164 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2165 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2166 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2167 mac_filters->bcast_accept_all);
2168
2169 /* write the MAC filter structure*/
2170 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2171
2172 /* The operation is completed */
2173 clear_bit(p->state, p->pstate);
2174 smp_mb__after_clear_bit();
2175
2176 return 0;
2177 }
2178
2179 /* Setup ramrod data */
bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,struct eth_classify_header * hdr,u8 rule_cnt)2180 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2181 struct eth_classify_header *hdr,
2182 u8 rule_cnt)
2183 {
2184 hdr->echo = cid;
2185 hdr->rule_cnt = rule_cnt;
2186 }
2187
bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x * bp,unsigned long accept_flags,struct eth_filter_rules_cmd * cmd,bool clear_accept_all)2188 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2189 unsigned long accept_flags,
2190 struct eth_filter_rules_cmd *cmd,
2191 bool clear_accept_all)
2192 {
2193 u16 state;
2194
2195 /* start with 'drop-all' */
2196 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2197 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2198
2199 if (accept_flags) {
2200 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2201 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2202
2203 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2204 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2205
2206 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2207 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2208 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2209 }
2210
2211 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2212 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2213 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2214 }
2215 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2216 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2217
2218 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2219 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2220 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2221 }
2222 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2223 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2224 }
2225
2226 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2227 if (clear_accept_all) {
2228 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2229 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2230 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2231 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2232 }
2233
2234 cmd->state = cpu_to_le16(state);
2235
2236 }
2237
bnx2x_set_rx_mode_e2(struct bnx2x * bp,struct bnx2x_rx_mode_ramrod_params * p)2238 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2239 struct bnx2x_rx_mode_ramrod_params *p)
2240 {
2241 struct eth_filter_rules_ramrod_data *data = p->rdata;
2242 int rc;
2243 u8 rule_idx = 0;
2244
2245 /* Reset the ramrod data buffer */
2246 memset(data, 0, sizeof(*data));
2247
2248 /* Setup ramrod data */
2249
2250 /* Tx (internal switching) */
2251 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2252 data->rules[rule_idx].client_id = p->cl_id;
2253 data->rules[rule_idx].func_id = p->func_id;
2254
2255 data->rules[rule_idx].cmd_general_data =
2256 ETH_FILTER_RULES_CMD_TX_CMD;
2257
2258 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2259 &(data->rules[rule_idx++]), false);
2260 }
2261
2262 /* Rx */
2263 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2264 data->rules[rule_idx].client_id = p->cl_id;
2265 data->rules[rule_idx].func_id = p->func_id;
2266
2267 data->rules[rule_idx].cmd_general_data =
2268 ETH_FILTER_RULES_CMD_RX_CMD;
2269
2270 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2271 &(data->rules[rule_idx++]), false);
2272 }
2273
2274
2275 /*
2276 * If FCoE Queue configuration has been requested configure the Rx and
2277 * internal switching modes for this queue in separate rules.
2278 *
2279 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2280 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2281 */
2282 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2283 /* Tx (internal switching) */
2284 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2285 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2286 data->rules[rule_idx].func_id = p->func_id;
2287
2288 data->rules[rule_idx].cmd_general_data =
2289 ETH_FILTER_RULES_CMD_TX_CMD;
2290
2291 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2292 &(data->rules[rule_idx++]),
2293 true);
2294 }
2295
2296 /* Rx */
2297 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2298 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2299 data->rules[rule_idx].func_id = p->func_id;
2300
2301 data->rules[rule_idx].cmd_general_data =
2302 ETH_FILTER_RULES_CMD_RX_CMD;
2303
2304 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2305 &(data->rules[rule_idx++]),
2306 true);
2307 }
2308 }
2309
2310 /*
2311 * Set the ramrod header (most importantly - number of rules to
2312 * configure).
2313 */
2314 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2315
2316 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2317 data->header.rule_cnt, p->rx_accept_flags,
2318 p->tx_accept_flags);
2319
2320 /*
2321 * No need for an explicit memory barrier here as long we would
2322 * need to ensure the ordering of writing to the SPQ element
2323 * and updating of the SPQ producer which involves a memory
2324 * read and we will have to put a full memory barrier there
2325 * (inside bnx2x_sp_post()).
2326 */
2327
2328 /* Send a ramrod */
2329 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2330 U64_HI(p->rdata_mapping),
2331 U64_LO(p->rdata_mapping),
2332 ETH_CONNECTION_TYPE);
2333 if (rc)
2334 return rc;
2335
2336 /* Ramrod completion is pending */
2337 return 1;
2338 }
2339
bnx2x_wait_rx_mode_comp_e2(struct bnx2x * bp,struct bnx2x_rx_mode_ramrod_params * p)2340 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2341 struct bnx2x_rx_mode_ramrod_params *p)
2342 {
2343 return bnx2x_state_wait(bp, p->state, p->pstate);
2344 }
2345
bnx2x_empty_rx_mode_wait(struct bnx2x * bp,struct bnx2x_rx_mode_ramrod_params * p)2346 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2347 struct bnx2x_rx_mode_ramrod_params *p)
2348 {
2349 /* Do nothing */
2350 return 0;
2351 }
2352
bnx2x_config_rx_mode(struct bnx2x * bp,struct bnx2x_rx_mode_ramrod_params * p)2353 int bnx2x_config_rx_mode(struct bnx2x *bp,
2354 struct bnx2x_rx_mode_ramrod_params *p)
2355 {
2356 int rc;
2357
2358 /* Configure the new classification in the chip */
2359 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2360 if (rc < 0)
2361 return rc;
2362
2363 /* Wait for a ramrod completion if was requested */
2364 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2365 rc = p->rx_mode_obj->wait_comp(bp, p);
2366 if (rc)
2367 return rc;
2368 }
2369
2370 return rc;
2371 }
2372
bnx2x_init_rx_mode_obj(struct bnx2x * bp,struct bnx2x_rx_mode_obj * o)2373 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2374 struct bnx2x_rx_mode_obj *o)
2375 {
2376 if (CHIP_IS_E1x(bp)) {
2377 o->wait_comp = bnx2x_empty_rx_mode_wait;
2378 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2379 } else {
2380 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2381 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2382 }
2383 }
2384
2385 /********************* Multicast verbs: SET, CLEAR ****************************/
bnx2x_mcast_bin_from_mac(u8 * mac)2386 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2387 {
2388 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2389 }
2390
2391 struct bnx2x_mcast_mac_elem {
2392 struct list_head link;
2393 u8 mac[ETH_ALEN];
2394 u8 pad[2]; /* For a natural alignment of the following buffer */
2395 };
2396
2397 struct bnx2x_pending_mcast_cmd {
2398 struct list_head link;
2399 int type; /* BNX2X_MCAST_CMD_X */
2400 union {
2401 struct list_head macs_head;
2402 u32 macs_num; /* Needed for DEL command */
2403 int next_bin; /* Needed for RESTORE flow with aprox match */
2404 } data;
2405
2406 bool done; /* set to true, when the command has been handled,
2407 * practically used in 57712 handling only, where one pending
2408 * command may be handled in a few operations. As long as for
2409 * other chips every operation handling is completed in a
2410 * single ramrod, there is no need to utilize this field.
2411 */
2412 };
2413
bnx2x_mcast_wait(struct bnx2x * bp,struct bnx2x_mcast_obj * o)2414 static int bnx2x_mcast_wait(struct bnx2x *bp,
2415 struct bnx2x_mcast_obj *o)
2416 {
2417 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2418 o->raw.wait_comp(bp, &o->raw))
2419 return -EBUSY;
2420
2421 return 0;
2422 }
2423
bnx2x_mcast_enqueue_cmd(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_mcast_ramrod_params * p,int cmd)2424 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2425 struct bnx2x_mcast_obj *o,
2426 struct bnx2x_mcast_ramrod_params *p,
2427 int cmd)
2428 {
2429 int total_sz;
2430 struct bnx2x_pending_mcast_cmd *new_cmd;
2431 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2432 struct bnx2x_mcast_list_elem *pos;
2433 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2434 p->mcast_list_len : 0);
2435
2436 /* If the command is empty ("handle pending commands only"), break */
2437 if (!p->mcast_list_len)
2438 return 0;
2439
2440 total_sz = sizeof(*new_cmd) +
2441 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2442
2443 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2444 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2445
2446 if (!new_cmd)
2447 return -ENOMEM;
2448
2449 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2450 cmd, macs_list_len);
2451
2452 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2453
2454 new_cmd->type = cmd;
2455 new_cmd->done = false;
2456
2457 switch (cmd) {
2458 case BNX2X_MCAST_CMD_ADD:
2459 cur_mac = (struct bnx2x_mcast_mac_elem *)
2460 ((u8 *)new_cmd + sizeof(*new_cmd));
2461
2462 /* Push the MACs of the current command into the pendig command
2463 * MACs list: FIFO
2464 */
2465 list_for_each_entry(pos, &p->mcast_list, link) {
2466 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2467 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2468 cur_mac++;
2469 }
2470
2471 break;
2472
2473 case BNX2X_MCAST_CMD_DEL:
2474 new_cmd->data.macs_num = p->mcast_list_len;
2475 break;
2476
2477 case BNX2X_MCAST_CMD_RESTORE:
2478 new_cmd->data.next_bin = 0;
2479 break;
2480
2481 default:
2482 BNX2X_ERR("Unknown command: %d\n", cmd);
2483 return -EINVAL;
2484 }
2485
2486 /* Push the new pending command to the tail of the pending list: FIFO */
2487 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2488
2489 o->set_sched(o);
2490
2491 return 1;
2492 }
2493
2494 /**
2495 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2496 *
2497 * @o:
2498 * @last: index to start looking from (including)
2499 *
2500 * returns the next found (set) bin or a negative value if none is found.
2501 */
bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj * o,int last)2502 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2503 {
2504 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2505
2506 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2507 if (o->registry.aprox_match.vec[i])
2508 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2509 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2510 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2511 vec, cur_bit)) {
2512 return cur_bit;
2513 }
2514 }
2515 inner_start = 0;
2516 }
2517
2518 /* None found */
2519 return -1;
2520 }
2521
2522 /**
2523 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2524 *
2525 * @o:
2526 *
2527 * returns the index of the found bin or -1 if none is found
2528 */
bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj * o)2529 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2530 {
2531 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2532
2533 if (cur_bit >= 0)
2534 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2535
2536 return cur_bit;
2537 }
2538
bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj * o)2539 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2540 {
2541 struct bnx2x_raw_obj *raw = &o->raw;
2542 u8 rx_tx_flag = 0;
2543
2544 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2545 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2546 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2547
2548 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2549 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2550 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2551
2552 return rx_tx_flag;
2553 }
2554
bnx2x_mcast_set_one_rule_e2(struct bnx2x * bp,struct bnx2x_mcast_obj * o,int idx,union bnx2x_mcast_config_data * cfg_data,int cmd)2555 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2556 struct bnx2x_mcast_obj *o, int idx,
2557 union bnx2x_mcast_config_data *cfg_data,
2558 int cmd)
2559 {
2560 struct bnx2x_raw_obj *r = &o->raw;
2561 struct eth_multicast_rules_ramrod_data *data =
2562 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2563 u8 func_id = r->func_id;
2564 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2565 int bin;
2566
2567 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2568 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2569
2570 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2571
2572 /* Get a bin and update a bins' vector */
2573 switch (cmd) {
2574 case BNX2X_MCAST_CMD_ADD:
2575 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2576 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2577 break;
2578
2579 case BNX2X_MCAST_CMD_DEL:
2580 /* If there were no more bins to clear
2581 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2582 * clear any (0xff) bin.
2583 * See bnx2x_mcast_validate_e2() for explanation when it may
2584 * happen.
2585 */
2586 bin = bnx2x_mcast_clear_first_bin(o);
2587 break;
2588
2589 case BNX2X_MCAST_CMD_RESTORE:
2590 bin = cfg_data->bin;
2591 break;
2592
2593 default:
2594 BNX2X_ERR("Unknown command: %d\n", cmd);
2595 return;
2596 }
2597
2598 DP(BNX2X_MSG_SP, "%s bin %d\n",
2599 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2600 "Setting" : "Clearing"), bin);
2601
2602 data->rules[idx].bin_id = (u8)bin;
2603 data->rules[idx].func_id = func_id;
2604 data->rules[idx].engine_id = o->engine_id;
2605 }
2606
2607 /**
2608 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2609 *
2610 * @bp: device handle
2611 * @o:
2612 * @start_bin: index in the registry to start from (including)
2613 * @rdata_idx: index in the ramrod data to start from
2614 *
2615 * returns last handled bin index or -1 if all bins have been handled
2616 */
bnx2x_mcast_handle_restore_cmd_e2(struct bnx2x * bp,struct bnx2x_mcast_obj * o,int start_bin,int * rdata_idx)2617 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2618 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2619 int *rdata_idx)
2620 {
2621 int cur_bin, cnt = *rdata_idx;
2622 union bnx2x_mcast_config_data cfg_data = {0};
2623
2624 /* go through the registry and configure the bins from it */
2625 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2626 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2627
2628 cfg_data.bin = (u8)cur_bin;
2629 o->set_one_rule(bp, o, cnt, &cfg_data,
2630 BNX2X_MCAST_CMD_RESTORE);
2631
2632 cnt++;
2633
2634 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2635
2636 /* Break if we reached the maximum number
2637 * of rules.
2638 */
2639 if (cnt >= o->max_cmd_len)
2640 break;
2641 }
2642
2643 *rdata_idx = cnt;
2644
2645 return cur_bin;
2646 }
2647
bnx2x_mcast_hdl_pending_add_e2(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_pending_mcast_cmd * cmd_pos,int * line_idx)2648 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2649 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2650 int *line_idx)
2651 {
2652 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2653 int cnt = *line_idx;
2654 union bnx2x_mcast_config_data cfg_data = {0};
2655
2656 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2657 link) {
2658
2659 cfg_data.mac = &pmac_pos->mac[0];
2660 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2661
2662 cnt++;
2663
2664 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2665 pmac_pos->mac);
2666
2667 list_del(&pmac_pos->link);
2668
2669 /* Break if we reached the maximum number
2670 * of rules.
2671 */
2672 if (cnt >= o->max_cmd_len)
2673 break;
2674 }
2675
2676 *line_idx = cnt;
2677
2678 /* if no more MACs to configure - we are done */
2679 if (list_empty(&cmd_pos->data.macs_head))
2680 cmd_pos->done = true;
2681 }
2682
bnx2x_mcast_hdl_pending_del_e2(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_pending_mcast_cmd * cmd_pos,int * line_idx)2683 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2684 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2685 int *line_idx)
2686 {
2687 int cnt = *line_idx;
2688
2689 while (cmd_pos->data.macs_num) {
2690 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2691
2692 cnt++;
2693
2694 cmd_pos->data.macs_num--;
2695
2696 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2697 cmd_pos->data.macs_num, cnt);
2698
2699 /* Break if we reached the maximum
2700 * number of rules.
2701 */
2702 if (cnt >= o->max_cmd_len)
2703 break;
2704 }
2705
2706 *line_idx = cnt;
2707
2708 /* If we cleared all bins - we are done */
2709 if (!cmd_pos->data.macs_num)
2710 cmd_pos->done = true;
2711 }
2712
bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_pending_mcast_cmd * cmd_pos,int * line_idx)2713 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2714 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2715 int *line_idx)
2716 {
2717 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2718 line_idx);
2719
2720 if (cmd_pos->data.next_bin < 0)
2721 /* If o->set_restore returned -1 we are done */
2722 cmd_pos->done = true;
2723 else
2724 /* Start from the next bin next time */
2725 cmd_pos->data.next_bin++;
2726 }
2727
bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p)2728 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2729 struct bnx2x_mcast_ramrod_params *p)
2730 {
2731 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2732 int cnt = 0;
2733 struct bnx2x_mcast_obj *o = p->mcast_obj;
2734
2735 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2736 link) {
2737 switch (cmd_pos->type) {
2738 case BNX2X_MCAST_CMD_ADD:
2739 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2740 break;
2741
2742 case BNX2X_MCAST_CMD_DEL:
2743 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2744 break;
2745
2746 case BNX2X_MCAST_CMD_RESTORE:
2747 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2748 &cnt);
2749 break;
2750
2751 default:
2752 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2753 return -EINVAL;
2754 }
2755
2756 /* If the command has been completed - remove it from the list
2757 * and free the memory
2758 */
2759 if (cmd_pos->done) {
2760 list_del(&cmd_pos->link);
2761 kfree(cmd_pos);
2762 }
2763
2764 /* Break if we reached the maximum number of rules */
2765 if (cnt >= o->max_cmd_len)
2766 break;
2767 }
2768
2769 return cnt;
2770 }
2771
bnx2x_mcast_hdl_add(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_mcast_ramrod_params * p,int * line_idx)2772 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2773 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2774 int *line_idx)
2775 {
2776 struct bnx2x_mcast_list_elem *mlist_pos;
2777 union bnx2x_mcast_config_data cfg_data = {0};
2778 int cnt = *line_idx;
2779
2780 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2781 cfg_data.mac = mlist_pos->mac;
2782 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2783
2784 cnt++;
2785
2786 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2787 mlist_pos->mac);
2788 }
2789
2790 *line_idx = cnt;
2791 }
2792
bnx2x_mcast_hdl_del(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_mcast_ramrod_params * p,int * line_idx)2793 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2794 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2795 int *line_idx)
2796 {
2797 int cnt = *line_idx, i;
2798
2799 for (i = 0; i < p->mcast_list_len; i++) {
2800 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2801
2802 cnt++;
2803
2804 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2805 p->mcast_list_len - i - 1);
2806 }
2807
2808 *line_idx = cnt;
2809 }
2810
2811 /**
2812 * bnx2x_mcast_handle_current_cmd -
2813 *
2814 * @bp: device handle
2815 * @p:
2816 * @cmd:
2817 * @start_cnt: first line in the ramrod data that may be used
2818 *
2819 * This function is called iff there is enough place for the current command in
2820 * the ramrod data.
2821 * Returns number of lines filled in the ramrod data in total.
2822 */
bnx2x_mcast_handle_current_cmd(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd,int start_cnt)2823 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2824 struct bnx2x_mcast_ramrod_params *p, int cmd,
2825 int start_cnt)
2826 {
2827 struct bnx2x_mcast_obj *o = p->mcast_obj;
2828 int cnt = start_cnt;
2829
2830 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2831
2832 switch (cmd) {
2833 case BNX2X_MCAST_CMD_ADD:
2834 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2835 break;
2836
2837 case BNX2X_MCAST_CMD_DEL:
2838 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2839 break;
2840
2841 case BNX2X_MCAST_CMD_RESTORE:
2842 o->hdl_restore(bp, o, 0, &cnt);
2843 break;
2844
2845 default:
2846 BNX2X_ERR("Unknown command: %d\n", cmd);
2847 return -EINVAL;
2848 }
2849
2850 /* The current command has been handled */
2851 p->mcast_list_len = 0;
2852
2853 return cnt;
2854 }
2855
bnx2x_mcast_validate_e2(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)2856 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2857 struct bnx2x_mcast_ramrod_params *p,
2858 int cmd)
2859 {
2860 struct bnx2x_mcast_obj *o = p->mcast_obj;
2861 int reg_sz = o->get_registry_size(o);
2862
2863 switch (cmd) {
2864 /* DEL command deletes all currently configured MACs */
2865 case BNX2X_MCAST_CMD_DEL:
2866 o->set_registry_size(o, 0);
2867 /* Don't break */
2868
2869 /* RESTORE command will restore the entire multicast configuration */
2870 case BNX2X_MCAST_CMD_RESTORE:
2871 /* Here we set the approximate amount of work to do, which in
2872 * fact may be only less as some MACs in postponed ADD
2873 * command(s) scheduled before this command may fall into
2874 * the same bin and the actual number of bins set in the
2875 * registry would be less than we estimated here. See
2876 * bnx2x_mcast_set_one_rule_e2() for further details.
2877 */
2878 p->mcast_list_len = reg_sz;
2879 break;
2880
2881 case BNX2X_MCAST_CMD_ADD:
2882 case BNX2X_MCAST_CMD_CONT:
2883 /* Here we assume that all new MACs will fall into new bins.
2884 * However we will correct the real registry size after we
2885 * handle all pending commands.
2886 */
2887 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2888 break;
2889
2890 default:
2891 BNX2X_ERR("Unknown command: %d\n", cmd);
2892 return -EINVAL;
2893
2894 }
2895
2896 /* Increase the total number of MACs pending to be configured */
2897 o->total_pending_num += p->mcast_list_len;
2898
2899 return 0;
2900 }
2901
bnx2x_mcast_revert_e2(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int old_num_bins)2902 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2903 struct bnx2x_mcast_ramrod_params *p,
2904 int old_num_bins)
2905 {
2906 struct bnx2x_mcast_obj *o = p->mcast_obj;
2907
2908 o->set_registry_size(o, old_num_bins);
2909 o->total_pending_num -= p->mcast_list_len;
2910 }
2911
2912 /**
2913 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2914 *
2915 * @bp: device handle
2916 * @p:
2917 * @len: number of rules to handle
2918 */
bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,u8 len)2919 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2920 struct bnx2x_mcast_ramrod_params *p,
2921 u8 len)
2922 {
2923 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2924 struct eth_multicast_rules_ramrod_data *data =
2925 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2926
2927 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2928 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2929 data->header.rule_cnt = len;
2930 }
2931
2932 /**
2933 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2934 *
2935 * @bp: device handle
2936 * @o:
2937 *
2938 * Recalculate the actual number of set bins in the registry using Brian
2939 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2940 *
2941 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2942 */
bnx2x_mcast_refresh_registry_e2(struct bnx2x * bp,struct bnx2x_mcast_obj * o)2943 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2944 struct bnx2x_mcast_obj *o)
2945 {
2946 int i, cnt = 0;
2947 u64 elem;
2948
2949 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2950 elem = o->registry.aprox_match.vec[i];
2951 for (; elem; cnt++)
2952 elem &= elem - 1;
2953 }
2954
2955 o->set_registry_size(o, cnt);
2956
2957 return 0;
2958 }
2959
bnx2x_mcast_setup_e2(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)2960 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2961 struct bnx2x_mcast_ramrod_params *p,
2962 int cmd)
2963 {
2964 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2965 struct bnx2x_mcast_obj *o = p->mcast_obj;
2966 struct eth_multicast_rules_ramrod_data *data =
2967 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2968 int cnt = 0, rc;
2969
2970 /* Reset the ramrod data buffer */
2971 memset(data, 0, sizeof(*data));
2972
2973 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2974
2975 /* If there are no more pending commands - clear SCHEDULED state */
2976 if (list_empty(&o->pending_cmds_head))
2977 o->clear_sched(o);
2978
2979 /* The below may be true iff there was enough room in ramrod
2980 * data for all pending commands and for the current
2981 * command. Otherwise the current command would have been added
2982 * to the pending commands and p->mcast_list_len would have been
2983 * zeroed.
2984 */
2985 if (p->mcast_list_len > 0)
2986 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2987
2988 /* We've pulled out some MACs - update the total number of
2989 * outstanding.
2990 */
2991 o->total_pending_num -= cnt;
2992
2993 /* send a ramrod */
2994 WARN_ON(o->total_pending_num < 0);
2995 WARN_ON(cnt > o->max_cmd_len);
2996
2997 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2998
2999 /* Update a registry size if there are no more pending operations.
3000 *
3001 * We don't want to change the value of the registry size if there are
3002 * pending operations because we want it to always be equal to the
3003 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3004 * set bins after the last requested operation in order to properly
3005 * evaluate the size of the next DEL/RESTORE operation.
3006 *
3007 * Note that we update the registry itself during command(s) handling
3008 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3009 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3010 * with a limited amount of update commands (per MAC/bin) and we don't
3011 * know in this scope what the actual state of bins configuration is
3012 * going to be after this ramrod.
3013 */
3014 if (!o->total_pending_num)
3015 bnx2x_mcast_refresh_registry_e2(bp, o);
3016
3017 /*
3018 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3019 * RAMROD_PENDING status immediately.
3020 */
3021 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3022 raw->clear_pending(raw);
3023 return 0;
3024 } else {
3025 /*
3026 * No need for an explicit memory barrier here as long we would
3027 * need to ensure the ordering of writing to the SPQ element
3028 * and updating of the SPQ producer which involves a memory
3029 * read and we will have to put a full memory barrier there
3030 * (inside bnx2x_sp_post()).
3031 */
3032
3033 /* Send a ramrod */
3034 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3035 raw->cid, U64_HI(raw->rdata_mapping),
3036 U64_LO(raw->rdata_mapping),
3037 ETH_CONNECTION_TYPE);
3038 if (rc)
3039 return rc;
3040
3041 /* Ramrod completion is pending */
3042 return 1;
3043 }
3044 }
3045
bnx2x_mcast_validate_e1h(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)3046 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3047 struct bnx2x_mcast_ramrod_params *p,
3048 int cmd)
3049 {
3050 /* Mark, that there is a work to do */
3051 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3052 p->mcast_list_len = 1;
3053
3054 return 0;
3055 }
3056
bnx2x_mcast_revert_e1h(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int old_num_bins)3057 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3058 struct bnx2x_mcast_ramrod_params *p,
3059 int old_num_bins)
3060 {
3061 /* Do nothing */
3062 }
3063
3064 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3065 do { \
3066 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3067 } while (0)
3068
bnx2x_mcast_hdl_add_e1h(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_mcast_ramrod_params * p,u32 * mc_filter)3069 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3070 struct bnx2x_mcast_obj *o,
3071 struct bnx2x_mcast_ramrod_params *p,
3072 u32 *mc_filter)
3073 {
3074 struct bnx2x_mcast_list_elem *mlist_pos;
3075 int bit;
3076
3077 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3078 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3079 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3080
3081 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3082 mlist_pos->mac, bit);
3083
3084 /* bookkeeping... */
3085 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3086 bit);
3087 }
3088 }
3089
bnx2x_mcast_hdl_restore_e1h(struct bnx2x * bp,struct bnx2x_mcast_obj * o,struct bnx2x_mcast_ramrod_params * p,u32 * mc_filter)3090 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3091 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3092 u32 *mc_filter)
3093 {
3094 int bit;
3095
3096 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3097 bit >= 0;
3098 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3099 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3100 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3101 }
3102 }
3103
3104 /* On 57711 we write the multicast MACs' aproximate match
3105 * table by directly into the TSTORM's internal RAM. So we don't
3106 * really need to handle any tricks to make it work.
3107 */
bnx2x_mcast_setup_e1h(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)3108 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3109 struct bnx2x_mcast_ramrod_params *p,
3110 int cmd)
3111 {
3112 int i;
3113 struct bnx2x_mcast_obj *o = p->mcast_obj;
3114 struct bnx2x_raw_obj *r = &o->raw;
3115
3116 /* If CLEAR_ONLY has been requested - clear the registry
3117 * and clear a pending bit.
3118 */
3119 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3120 u32 mc_filter[MC_HASH_SIZE] = {0};
3121
3122 /* Set the multicast filter bits before writing it into
3123 * the internal memory.
3124 */
3125 switch (cmd) {
3126 case BNX2X_MCAST_CMD_ADD:
3127 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3128 break;
3129
3130 case BNX2X_MCAST_CMD_DEL:
3131 DP(BNX2X_MSG_SP,
3132 "Invalidating multicast MACs configuration\n");
3133
3134 /* clear the registry */
3135 memset(o->registry.aprox_match.vec, 0,
3136 sizeof(o->registry.aprox_match.vec));
3137 break;
3138
3139 case BNX2X_MCAST_CMD_RESTORE:
3140 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3141 break;
3142
3143 default:
3144 BNX2X_ERR("Unknown command: %d\n", cmd);
3145 return -EINVAL;
3146 }
3147
3148 /* Set the mcast filter in the internal memory */
3149 for (i = 0; i < MC_HASH_SIZE; i++)
3150 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3151 } else
3152 /* clear the registry */
3153 memset(o->registry.aprox_match.vec, 0,
3154 sizeof(o->registry.aprox_match.vec));
3155
3156 /* We are done */
3157 r->clear_pending(r);
3158
3159 return 0;
3160 }
3161
bnx2x_mcast_validate_e1(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)3162 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3163 struct bnx2x_mcast_ramrod_params *p,
3164 int cmd)
3165 {
3166 struct bnx2x_mcast_obj *o = p->mcast_obj;
3167 int reg_sz = o->get_registry_size(o);
3168
3169 switch (cmd) {
3170 /* DEL command deletes all currently configured MACs */
3171 case BNX2X_MCAST_CMD_DEL:
3172 o->set_registry_size(o, 0);
3173 /* Don't break */
3174
3175 /* RESTORE command will restore the entire multicast configuration */
3176 case BNX2X_MCAST_CMD_RESTORE:
3177 p->mcast_list_len = reg_sz;
3178 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3179 cmd, p->mcast_list_len);
3180 break;
3181
3182 case BNX2X_MCAST_CMD_ADD:
3183 case BNX2X_MCAST_CMD_CONT:
3184 /* Multicast MACs on 57710 are configured as unicast MACs and
3185 * there is only a limited number of CAM entries for that
3186 * matter.
3187 */
3188 if (p->mcast_list_len > o->max_cmd_len) {
3189 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3190 o->max_cmd_len);
3191 return -EINVAL;
3192 }
3193 /* Every configured MAC should be cleared if DEL command is
3194 * called. Only the last ADD command is relevant as long as
3195 * every ADD commands overrides the previous configuration.
3196 */
3197 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3198 if (p->mcast_list_len > 0)
3199 o->set_registry_size(o, p->mcast_list_len);
3200
3201 break;
3202
3203 default:
3204 BNX2X_ERR("Unknown command: %d\n", cmd);
3205 return -EINVAL;
3206
3207 }
3208
3209 /* We want to ensure that commands are executed one by one for 57710.
3210 * Therefore each none-empty command will consume o->max_cmd_len.
3211 */
3212 if (p->mcast_list_len)
3213 o->total_pending_num += o->max_cmd_len;
3214
3215 return 0;
3216 }
3217
bnx2x_mcast_revert_e1(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int old_num_macs)3218 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3219 struct bnx2x_mcast_ramrod_params *p,
3220 int old_num_macs)
3221 {
3222 struct bnx2x_mcast_obj *o = p->mcast_obj;
3223
3224 o->set_registry_size(o, old_num_macs);
3225
3226 /* If current command hasn't been handled yet and we are
3227 * here means that it's meant to be dropped and we have to
3228 * update the number of outstandling MACs accordingly.
3229 */
3230 if (p->mcast_list_len)
3231 o->total_pending_num -= o->max_cmd_len;
3232 }
3233
bnx2x_mcast_set_one_rule_e1(struct bnx2x * bp,struct bnx2x_mcast_obj * o,int idx,union bnx2x_mcast_config_data * cfg_data,int cmd)3234 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3235 struct bnx2x_mcast_obj *o, int idx,
3236 union bnx2x_mcast_config_data *cfg_data,
3237 int cmd)
3238 {
3239 struct bnx2x_raw_obj *r = &o->raw;
3240 struct mac_configuration_cmd *data =
3241 (struct mac_configuration_cmd *)(r->rdata);
3242
3243 /* copy mac */
3244 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3245 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3246 &data->config_table[idx].middle_mac_addr,
3247 &data->config_table[idx].lsb_mac_addr,
3248 cfg_data->mac);
3249
3250 data->config_table[idx].vlan_id = 0;
3251 data->config_table[idx].pf_id = r->func_id;
3252 data->config_table[idx].clients_bit_vector =
3253 cpu_to_le32(1 << r->cl_id);
3254
3255 SET_FLAG(data->config_table[idx].flags,
3256 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3257 T_ETH_MAC_COMMAND_SET);
3258 }
3259 }
3260
3261 /**
3262 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3263 *
3264 * @bp: device handle
3265 * @p:
3266 * @len: number of rules to handle
3267 */
bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,u8 len)3268 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3269 struct bnx2x_mcast_ramrod_params *p,
3270 u8 len)
3271 {
3272 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3273 struct mac_configuration_cmd *data =
3274 (struct mac_configuration_cmd *)(r->rdata);
3275
3276 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3277 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3278 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3279
3280 data->hdr.offset = offset;
3281 data->hdr.client_id = 0xff;
3282 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3283 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3284 data->hdr.length = len;
3285 }
3286
3287 /**
3288 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3289 *
3290 * @bp: device handle
3291 * @o:
3292 * @start_idx: index in the registry to start from
3293 * @rdata_idx: index in the ramrod data to start from
3294 *
3295 * restore command for 57710 is like all other commands - always a stand alone
3296 * command - start_idx and rdata_idx will always be 0. This function will always
3297 * succeed.
3298 * returns -1 to comply with 57712 variant.
3299 */
bnx2x_mcast_handle_restore_cmd_e1(struct bnx2x * bp,struct bnx2x_mcast_obj * o,int start_idx,int * rdata_idx)3300 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3301 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3302 int *rdata_idx)
3303 {
3304 struct bnx2x_mcast_mac_elem *elem;
3305 int i = 0;
3306 union bnx2x_mcast_config_data cfg_data = {0};
3307
3308 /* go through the registry and configure the MACs from it. */
3309 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3310 cfg_data.mac = &elem->mac[0];
3311 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3312
3313 i++;
3314
3315 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3316 cfg_data.mac);
3317 }
3318
3319 *rdata_idx = i;
3320
3321 return -1;
3322 }
3323
3324
bnx2x_mcast_handle_pending_cmds_e1(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p)3325 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3326 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3327 {
3328 struct bnx2x_pending_mcast_cmd *cmd_pos;
3329 struct bnx2x_mcast_mac_elem *pmac_pos;
3330 struct bnx2x_mcast_obj *o = p->mcast_obj;
3331 union bnx2x_mcast_config_data cfg_data = {0};
3332 int cnt = 0;
3333
3334
3335 /* If nothing to be done - return */
3336 if (list_empty(&o->pending_cmds_head))
3337 return 0;
3338
3339 /* Handle the first command */
3340 cmd_pos = list_first_entry(&o->pending_cmds_head,
3341 struct bnx2x_pending_mcast_cmd, link);
3342
3343 switch (cmd_pos->type) {
3344 case BNX2X_MCAST_CMD_ADD:
3345 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3346 cfg_data.mac = &pmac_pos->mac[0];
3347 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3348
3349 cnt++;
3350
3351 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3352 pmac_pos->mac);
3353 }
3354 break;
3355
3356 case BNX2X_MCAST_CMD_DEL:
3357 cnt = cmd_pos->data.macs_num;
3358 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3359 break;
3360
3361 case BNX2X_MCAST_CMD_RESTORE:
3362 o->hdl_restore(bp, o, 0, &cnt);
3363 break;
3364
3365 default:
3366 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3367 return -EINVAL;
3368 }
3369
3370 list_del(&cmd_pos->link);
3371 kfree(cmd_pos);
3372
3373 return cnt;
3374 }
3375
3376 /**
3377 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3378 *
3379 * @fw_hi:
3380 * @fw_mid:
3381 * @fw_lo:
3382 * @mac:
3383 */
bnx2x_get_fw_mac_addr(__le16 * fw_hi,__le16 * fw_mid,__le16 * fw_lo,u8 * mac)3384 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3385 __le16 *fw_lo, u8 *mac)
3386 {
3387 mac[1] = ((u8 *)fw_hi)[0];
3388 mac[0] = ((u8 *)fw_hi)[1];
3389 mac[3] = ((u8 *)fw_mid)[0];
3390 mac[2] = ((u8 *)fw_mid)[1];
3391 mac[5] = ((u8 *)fw_lo)[0];
3392 mac[4] = ((u8 *)fw_lo)[1];
3393 }
3394
3395 /**
3396 * bnx2x_mcast_refresh_registry_e1 -
3397 *
3398 * @bp: device handle
3399 * @cnt:
3400 *
3401 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3402 * and update the registry correspondingly: if ADD - allocate a memory and add
3403 * the entries to the registry (list), if DELETE - clear the registry and free
3404 * the memory.
3405 */
bnx2x_mcast_refresh_registry_e1(struct bnx2x * bp,struct bnx2x_mcast_obj * o)3406 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3407 struct bnx2x_mcast_obj *o)
3408 {
3409 struct bnx2x_raw_obj *raw = &o->raw;
3410 struct bnx2x_mcast_mac_elem *elem;
3411 struct mac_configuration_cmd *data =
3412 (struct mac_configuration_cmd *)(raw->rdata);
3413
3414 /* If first entry contains a SET bit - the command was ADD,
3415 * otherwise - DEL_ALL
3416 */
3417 if (GET_FLAG(data->config_table[0].flags,
3418 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3419 int i, len = data->hdr.length;
3420
3421 /* Break if it was a RESTORE command */
3422 if (!list_empty(&o->registry.exact_match.macs))
3423 return 0;
3424
3425 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3426 if (!elem) {
3427 BNX2X_ERR("Failed to allocate registry memory\n");
3428 return -ENOMEM;
3429 }
3430
3431 for (i = 0; i < len; i++, elem++) {
3432 bnx2x_get_fw_mac_addr(
3433 &data->config_table[i].msb_mac_addr,
3434 &data->config_table[i].middle_mac_addr,
3435 &data->config_table[i].lsb_mac_addr,
3436 elem->mac);
3437 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3438 elem->mac);
3439 list_add_tail(&elem->link,
3440 &o->registry.exact_match.macs);
3441 }
3442 } else {
3443 elem = list_first_entry(&o->registry.exact_match.macs,
3444 struct bnx2x_mcast_mac_elem, link);
3445 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3446 kfree(elem);
3447 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3448 }
3449
3450 return 0;
3451 }
3452
bnx2x_mcast_setup_e1(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)3453 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3454 struct bnx2x_mcast_ramrod_params *p,
3455 int cmd)
3456 {
3457 struct bnx2x_mcast_obj *o = p->mcast_obj;
3458 struct bnx2x_raw_obj *raw = &o->raw;
3459 struct mac_configuration_cmd *data =
3460 (struct mac_configuration_cmd *)(raw->rdata);
3461 int cnt = 0, i, rc;
3462
3463 /* Reset the ramrod data buffer */
3464 memset(data, 0, sizeof(*data));
3465
3466 /* First set all entries as invalid */
3467 for (i = 0; i < o->max_cmd_len ; i++)
3468 SET_FLAG(data->config_table[i].flags,
3469 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3470 T_ETH_MAC_COMMAND_INVALIDATE);
3471
3472 /* Handle pending commands first */
3473 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3474
3475 /* If there are no more pending commands - clear SCHEDULED state */
3476 if (list_empty(&o->pending_cmds_head))
3477 o->clear_sched(o);
3478
3479 /* The below may be true iff there were no pending commands */
3480 if (!cnt)
3481 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3482
3483 /* For 57710 every command has o->max_cmd_len length to ensure that
3484 * commands are done one at a time.
3485 */
3486 o->total_pending_num -= o->max_cmd_len;
3487
3488 /* send a ramrod */
3489
3490 WARN_ON(cnt > o->max_cmd_len);
3491
3492 /* Set ramrod header (in particular, a number of entries to update) */
3493 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3494
3495 /* update a registry: we need the registry contents to be always up
3496 * to date in order to be able to execute a RESTORE opcode. Here
3497 * we use the fact that for 57710 we sent one command at a time
3498 * hence we may take the registry update out of the command handling
3499 * and do it in a simpler way here.
3500 */
3501 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3502 if (rc)
3503 return rc;
3504
3505 /*
3506 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3507 * RAMROD_PENDING status immediately.
3508 */
3509 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3510 raw->clear_pending(raw);
3511 return 0;
3512 } else {
3513 /*
3514 * No need for an explicit memory barrier here as long we would
3515 * need to ensure the ordering of writing to the SPQ element
3516 * and updating of the SPQ producer which involves a memory
3517 * read and we will have to put a full memory barrier there
3518 * (inside bnx2x_sp_post()).
3519 */
3520
3521 /* Send a ramrod */
3522 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3523 U64_HI(raw->rdata_mapping),
3524 U64_LO(raw->rdata_mapping),
3525 ETH_CONNECTION_TYPE);
3526 if (rc)
3527 return rc;
3528
3529 /* Ramrod completion is pending */
3530 return 1;
3531 }
3532
3533 }
3534
bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj * o)3535 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3536 {
3537 return o->registry.exact_match.num_macs_set;
3538 }
3539
bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj * o)3540 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3541 {
3542 return o->registry.aprox_match.num_bins_set;
3543 }
3544
bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj * o,int n)3545 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3546 int n)
3547 {
3548 o->registry.exact_match.num_macs_set = n;
3549 }
3550
bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj * o,int n)3551 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3552 int n)
3553 {
3554 o->registry.aprox_match.num_bins_set = n;
3555 }
3556
bnx2x_config_mcast(struct bnx2x * bp,struct bnx2x_mcast_ramrod_params * p,int cmd)3557 int bnx2x_config_mcast(struct bnx2x *bp,
3558 struct bnx2x_mcast_ramrod_params *p,
3559 int cmd)
3560 {
3561 struct bnx2x_mcast_obj *o = p->mcast_obj;
3562 struct bnx2x_raw_obj *r = &o->raw;
3563 int rc = 0, old_reg_size;
3564
3565 /* This is needed to recover number of currently configured mcast macs
3566 * in case of failure.
3567 */
3568 old_reg_size = o->get_registry_size(o);
3569
3570 /* Do some calculations and checks */
3571 rc = o->validate(bp, p, cmd);
3572 if (rc)
3573 return rc;
3574
3575 /* Return if there is no work to do */
3576 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3577 return 0;
3578
3579 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3580 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3581
3582 /* Enqueue the current command to the pending list if we can't complete
3583 * it in the current iteration
3584 */
3585 if (r->check_pending(r) ||
3586 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3587 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3588 if (rc < 0)
3589 goto error_exit1;
3590
3591 /* As long as the current command is in a command list we
3592 * don't need to handle it separately.
3593 */
3594 p->mcast_list_len = 0;
3595 }
3596
3597 if (!r->check_pending(r)) {
3598
3599 /* Set 'pending' state */
3600 r->set_pending(r);
3601
3602 /* Configure the new classification in the chip */
3603 rc = o->config_mcast(bp, p, cmd);
3604 if (rc < 0)
3605 goto error_exit2;
3606
3607 /* Wait for a ramrod completion if was requested */
3608 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3609 rc = o->wait_comp(bp, o);
3610 }
3611
3612 return rc;
3613
3614 error_exit2:
3615 r->clear_pending(r);
3616
3617 error_exit1:
3618 o->revert(bp, p, old_reg_size);
3619
3620 return rc;
3621 }
3622
bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj * o)3623 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3624 {
3625 smp_mb__before_clear_bit();
3626 clear_bit(o->sched_state, o->raw.pstate);
3627 smp_mb__after_clear_bit();
3628 }
3629
bnx2x_mcast_set_sched(struct bnx2x_mcast_obj * o)3630 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3631 {
3632 smp_mb__before_clear_bit();
3633 set_bit(o->sched_state, o->raw.pstate);
3634 smp_mb__after_clear_bit();
3635 }
3636
bnx2x_mcast_check_sched(struct bnx2x_mcast_obj * o)3637 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3638 {
3639 return !!test_bit(o->sched_state, o->raw.pstate);
3640 }
3641
bnx2x_mcast_check_pending(struct bnx2x_mcast_obj * o)3642 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3643 {
3644 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3645 }
3646
bnx2x_init_mcast_obj(struct bnx2x * bp,struct bnx2x_mcast_obj * mcast_obj,u8 mcast_cl_id,u32 mcast_cid,u8 func_id,u8 engine_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type)3647 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3648 struct bnx2x_mcast_obj *mcast_obj,
3649 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3650 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3651 int state, unsigned long *pstate, bnx2x_obj_type type)
3652 {
3653 memset(mcast_obj, 0, sizeof(*mcast_obj));
3654
3655 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3656 rdata, rdata_mapping, state, pstate, type);
3657
3658 mcast_obj->engine_id = engine_id;
3659
3660 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3661
3662 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3663 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3664 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3665 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3666
3667 if (CHIP_IS_E1(bp)) {
3668 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3669 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3670 mcast_obj->hdl_restore =
3671 bnx2x_mcast_handle_restore_cmd_e1;
3672 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3673
3674 if (CHIP_REV_IS_SLOW(bp))
3675 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3676 else
3677 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3678
3679 mcast_obj->wait_comp = bnx2x_mcast_wait;
3680 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3681 mcast_obj->validate = bnx2x_mcast_validate_e1;
3682 mcast_obj->revert = bnx2x_mcast_revert_e1;
3683 mcast_obj->get_registry_size =
3684 bnx2x_mcast_get_registry_size_exact;
3685 mcast_obj->set_registry_size =
3686 bnx2x_mcast_set_registry_size_exact;
3687
3688 /* 57710 is the only chip that uses the exact match for mcast
3689 * at the moment.
3690 */
3691 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3692
3693 } else if (CHIP_IS_E1H(bp)) {
3694 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3695 mcast_obj->enqueue_cmd = NULL;
3696 mcast_obj->hdl_restore = NULL;
3697 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3698
3699 /* 57711 doesn't send a ramrod, so it has unlimited credit
3700 * for one command.
3701 */
3702 mcast_obj->max_cmd_len = -1;
3703 mcast_obj->wait_comp = bnx2x_mcast_wait;
3704 mcast_obj->set_one_rule = NULL;
3705 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3706 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3707 mcast_obj->get_registry_size =
3708 bnx2x_mcast_get_registry_size_aprox;
3709 mcast_obj->set_registry_size =
3710 bnx2x_mcast_set_registry_size_aprox;
3711 } else {
3712 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3713 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3714 mcast_obj->hdl_restore =
3715 bnx2x_mcast_handle_restore_cmd_e2;
3716 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3717 /* TODO: There should be a proper HSI define for this number!!!
3718 */
3719 mcast_obj->max_cmd_len = 16;
3720 mcast_obj->wait_comp = bnx2x_mcast_wait;
3721 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3722 mcast_obj->validate = bnx2x_mcast_validate_e2;
3723 mcast_obj->revert = bnx2x_mcast_revert_e2;
3724 mcast_obj->get_registry_size =
3725 bnx2x_mcast_get_registry_size_aprox;
3726 mcast_obj->set_registry_size =
3727 bnx2x_mcast_set_registry_size_aprox;
3728 }
3729 }
3730
3731 /*************************** Credit handling **********************************/
3732
3733 /**
3734 * atomic_add_ifless - add if the result is less than a given value.
3735 *
3736 * @v: pointer of type atomic_t
3737 * @a: the amount to add to v...
3738 * @u: ...if (v + a) is less than u.
3739 *
3740 * returns true if (v + a) was less than u, and false otherwise.
3741 *
3742 */
__atomic_add_ifless(atomic_t * v,int a,int u)3743 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3744 {
3745 int c, old;
3746
3747 c = atomic_read(v);
3748 for (;;) {
3749 if (unlikely(c + a >= u))
3750 return false;
3751
3752 old = atomic_cmpxchg((v), c, c + a);
3753 if (likely(old == c))
3754 break;
3755 c = old;
3756 }
3757
3758 return true;
3759 }
3760
3761 /**
3762 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3763 *
3764 * @v: pointer of type atomic_t
3765 * @a: the amount to dec from v...
3766 * @u: ...if (v - a) is more or equal than u.
3767 *
3768 * returns true if (v - a) was more or equal than u, and false
3769 * otherwise.
3770 */
__atomic_dec_ifmoe(atomic_t * v,int a,int u)3771 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3772 {
3773 int c, old;
3774
3775 c = atomic_read(v);
3776 for (;;) {
3777 if (unlikely(c - a < u))
3778 return false;
3779
3780 old = atomic_cmpxchg((v), c, c - a);
3781 if (likely(old == c))
3782 break;
3783 c = old;
3784 }
3785
3786 return true;
3787 }
3788
bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj * o,int cnt)3789 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3790 {
3791 bool rc;
3792
3793 smp_mb();
3794 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3795 smp_mb();
3796
3797 return rc;
3798 }
3799
bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj * o,int cnt)3800 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3801 {
3802 bool rc;
3803
3804 smp_mb();
3805
3806 /* Don't let to refill if credit + cnt > pool_sz */
3807 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3808
3809 smp_mb();
3810
3811 return rc;
3812 }
3813
bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj * o)3814 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3815 {
3816 int cur_credit;
3817
3818 smp_mb();
3819 cur_credit = atomic_read(&o->credit);
3820
3821 return cur_credit;
3822 }
3823
bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj * o,int cnt)3824 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3825 int cnt)
3826 {
3827 return true;
3828 }
3829
3830
bnx2x_credit_pool_get_entry(struct bnx2x_credit_pool_obj * o,int * offset)3831 static bool bnx2x_credit_pool_get_entry(
3832 struct bnx2x_credit_pool_obj *o,
3833 int *offset)
3834 {
3835 int idx, vec, i;
3836
3837 *offset = -1;
3838
3839 /* Find "internal cam-offset" then add to base for this object... */
3840 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3841
3842 /* Skip the current vector if there are no free entries in it */
3843 if (!o->pool_mirror[vec])
3844 continue;
3845
3846 /* If we've got here we are going to find a free entry */
3847 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3848 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3849
3850 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3851 /* Got one!! */
3852 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3853 *offset = o->base_pool_offset + idx;
3854 return true;
3855 }
3856 }
3857
3858 return false;
3859 }
3860
bnx2x_credit_pool_put_entry(struct bnx2x_credit_pool_obj * o,int offset)3861 static bool bnx2x_credit_pool_put_entry(
3862 struct bnx2x_credit_pool_obj *o,
3863 int offset)
3864 {
3865 if (offset < o->base_pool_offset)
3866 return false;
3867
3868 offset -= o->base_pool_offset;
3869
3870 if (offset >= o->pool_sz)
3871 return false;
3872
3873 /* Return the entry to the pool */
3874 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3875
3876 return true;
3877 }
3878
bnx2x_credit_pool_put_entry_always_true(struct bnx2x_credit_pool_obj * o,int offset)3879 static bool bnx2x_credit_pool_put_entry_always_true(
3880 struct bnx2x_credit_pool_obj *o,
3881 int offset)
3882 {
3883 return true;
3884 }
3885
bnx2x_credit_pool_get_entry_always_true(struct bnx2x_credit_pool_obj * o,int * offset)3886 static bool bnx2x_credit_pool_get_entry_always_true(
3887 struct bnx2x_credit_pool_obj *o,
3888 int *offset)
3889 {
3890 *offset = -1;
3891 return true;
3892 }
3893 /**
3894 * bnx2x_init_credit_pool - initialize credit pool internals.
3895 *
3896 * @p:
3897 * @base: Base entry in the CAM to use.
3898 * @credit: pool size.
3899 *
3900 * If base is negative no CAM entries handling will be performed.
3901 * If credit is negative pool operations will always succeed (unlimited pool).
3902 *
3903 */
bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj * p,int base,int credit)3904 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3905 int base, int credit)
3906 {
3907 /* Zero the object first */
3908 memset(p, 0, sizeof(*p));
3909
3910 /* Set the table to all 1s */
3911 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3912
3913 /* Init a pool as full */
3914 atomic_set(&p->credit, credit);
3915
3916 /* The total poll size */
3917 p->pool_sz = credit;
3918
3919 p->base_pool_offset = base;
3920
3921 /* Commit the change */
3922 smp_mb();
3923
3924 p->check = bnx2x_credit_pool_check;
3925
3926 /* if pool credit is negative - disable the checks */
3927 if (credit >= 0) {
3928 p->put = bnx2x_credit_pool_put;
3929 p->get = bnx2x_credit_pool_get;
3930 p->put_entry = bnx2x_credit_pool_put_entry;
3931 p->get_entry = bnx2x_credit_pool_get_entry;
3932 } else {
3933 p->put = bnx2x_credit_pool_always_true;
3934 p->get = bnx2x_credit_pool_always_true;
3935 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3936 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3937 }
3938
3939 /* If base is negative - disable entries handling */
3940 if (base < 0) {
3941 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3942 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3943 }
3944 }
3945
bnx2x_init_mac_credit_pool(struct bnx2x * bp,struct bnx2x_credit_pool_obj * p,u8 func_id,u8 func_num)3946 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3947 struct bnx2x_credit_pool_obj *p, u8 func_id,
3948 u8 func_num)
3949 {
3950 /* TODO: this will be defined in consts as well... */
3951 #define BNX2X_CAM_SIZE_EMUL 5
3952
3953 int cam_sz;
3954
3955 if (CHIP_IS_E1(bp)) {
3956 /* In E1, Multicast is saved in cam... */
3957 if (!CHIP_REV_IS_SLOW(bp))
3958 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3959 else
3960 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3961
3962 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3963
3964 } else if (CHIP_IS_E1H(bp)) {
3965 /* CAM credit is equaly divided between all active functions
3966 * on the PORT!.
3967 */
3968 if ((func_num > 0)) {
3969 if (!CHIP_REV_IS_SLOW(bp))
3970 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3971 else
3972 cam_sz = BNX2X_CAM_SIZE_EMUL;
3973 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3974 } else {
3975 /* this should never happen! Block MAC operations. */
3976 bnx2x_init_credit_pool(p, 0, 0);
3977 }
3978
3979 } else {
3980
3981 /*
3982 * CAM credit is equaly divided between all active functions
3983 * on the PATH.
3984 */
3985 if ((func_num > 0)) {
3986 if (!CHIP_REV_IS_SLOW(bp))
3987 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3988 else
3989 cam_sz = BNX2X_CAM_SIZE_EMUL;
3990
3991 /*
3992 * No need for CAM entries handling for 57712 and
3993 * newer.
3994 */
3995 bnx2x_init_credit_pool(p, -1, cam_sz);
3996 } else {
3997 /* this should never happen! Block MAC operations. */
3998 bnx2x_init_credit_pool(p, 0, 0);
3999 }
4000
4001 }
4002 }
4003
bnx2x_init_vlan_credit_pool(struct bnx2x * bp,struct bnx2x_credit_pool_obj * p,u8 func_id,u8 func_num)4004 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4005 struct bnx2x_credit_pool_obj *p,
4006 u8 func_id,
4007 u8 func_num)
4008 {
4009 if (CHIP_IS_E1x(bp)) {
4010 /*
4011 * There is no VLAN credit in HW on 57710 and 57711 only
4012 * MAC / MAC-VLAN can be set
4013 */
4014 bnx2x_init_credit_pool(p, 0, -1);
4015 } else {
4016 /*
4017 * CAM credit is equaly divided between all active functions
4018 * on the PATH.
4019 */
4020 if (func_num > 0) {
4021 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4022 bnx2x_init_credit_pool(p, func_id * credit, credit);
4023 } else
4024 /* this should never happen! Block VLAN operations. */
4025 bnx2x_init_credit_pool(p, 0, 0);
4026 }
4027 }
4028
4029 /****************** RSS Configuration ******************/
4030 /**
4031 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4032 *
4033 * @bp: driver hanlde
4034 * @p: pointer to rss configuration
4035 *
4036 * Prints it when NETIF_MSG_IFUP debug level is configured.
4037 */
bnx2x_debug_print_ind_table(struct bnx2x * bp,struct bnx2x_config_rss_params * p)4038 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4039 struct bnx2x_config_rss_params *p)
4040 {
4041 int i;
4042
4043 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4044 DP(BNX2X_MSG_SP, "0x0000: ");
4045 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4046 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4047
4048 /* Print 4 bytes in a line */
4049 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4050 (((i + 1) & 0x3) == 0)) {
4051 DP_CONT(BNX2X_MSG_SP, "\n");
4052 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4053 }
4054 }
4055
4056 DP_CONT(BNX2X_MSG_SP, "\n");
4057 }
4058
4059 /**
4060 * bnx2x_setup_rss - configure RSS
4061 *
4062 * @bp: device handle
4063 * @p: rss configuration
4064 *
4065 * sends on UPDATE ramrod for that matter.
4066 */
bnx2x_setup_rss(struct bnx2x * bp,struct bnx2x_config_rss_params * p)4067 static int bnx2x_setup_rss(struct bnx2x *bp,
4068 struct bnx2x_config_rss_params *p)
4069 {
4070 struct bnx2x_rss_config_obj *o = p->rss_obj;
4071 struct bnx2x_raw_obj *r = &o->raw;
4072 struct eth_rss_update_ramrod_data *data =
4073 (struct eth_rss_update_ramrod_data *)(r->rdata);
4074 u8 rss_mode = 0;
4075 int rc;
4076
4077 memset(data, 0, sizeof(*data));
4078
4079 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4080
4081 /* Set an echo field */
4082 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4083 (r->state << BNX2X_SWCID_SHIFT);
4084
4085 /* RSS mode */
4086 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4087 rss_mode = ETH_RSS_MODE_DISABLED;
4088 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4089 rss_mode = ETH_RSS_MODE_REGULAR;
4090 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4091 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4092 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4093 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4094 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4095 rss_mode = ETH_RSS_MODE_IP_DSCP;
4096
4097 data->rss_mode = rss_mode;
4098
4099 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4100
4101 /* RSS capabilities */
4102 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4103 data->capabilities |=
4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4105
4106 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4109
4110 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4113
4114 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4115 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4117
4118 /* Hashing mask */
4119 data->rss_result_mask = p->rss_result_mask;
4120
4121 /* RSS engine ID */
4122 data->rss_engine_id = o->engine_id;
4123
4124 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4125
4126 /* Indirection table */
4127 memcpy(data->indirection_table, p->ind_table,
4128 T_ETH_INDIRECTION_TABLE_SIZE);
4129
4130 /* Remember the last configuration */
4131 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4132
4133 /* Print the indirection table */
4134 if (netif_msg_ifup(bp))
4135 bnx2x_debug_print_ind_table(bp, p);
4136
4137 /* RSS keys */
4138 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4139 memcpy(&data->rss_key[0], &p->rss_key[0],
4140 sizeof(data->rss_key));
4141 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4142 }
4143
4144 /*
4145 * No need for an explicit memory barrier here as long we would
4146 * need to ensure the ordering of writing to the SPQ element
4147 * and updating of the SPQ producer which involves a memory
4148 * read and we will have to put a full memory barrier there
4149 * (inside bnx2x_sp_post()).
4150 */
4151
4152 /* Send a ramrod */
4153 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4154 U64_HI(r->rdata_mapping),
4155 U64_LO(r->rdata_mapping),
4156 ETH_CONNECTION_TYPE);
4157
4158 if (rc < 0)
4159 return rc;
4160
4161 return 1;
4162 }
4163
bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj * rss_obj,u8 * ind_table)4164 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4165 u8 *ind_table)
4166 {
4167 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4168 }
4169
bnx2x_config_rss(struct bnx2x * bp,struct bnx2x_config_rss_params * p)4170 int bnx2x_config_rss(struct bnx2x *bp,
4171 struct bnx2x_config_rss_params *p)
4172 {
4173 int rc;
4174 struct bnx2x_rss_config_obj *o = p->rss_obj;
4175 struct bnx2x_raw_obj *r = &o->raw;
4176
4177 /* Do nothing if only driver cleanup was requested */
4178 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4179 return 0;
4180
4181 r->set_pending(r);
4182
4183 rc = o->config_rss(bp, p);
4184 if (rc < 0) {
4185 r->clear_pending(r);
4186 return rc;
4187 }
4188
4189 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4190 rc = r->wait_comp(bp, r);
4191
4192 return rc;
4193 }
4194
4195
bnx2x_init_rss_config_obj(struct bnx2x * bp,struct bnx2x_rss_config_obj * rss_obj,u8 cl_id,u32 cid,u8 func_id,u8 engine_id,void * rdata,dma_addr_t rdata_mapping,int state,unsigned long * pstate,bnx2x_obj_type type)4196 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4197 struct bnx2x_rss_config_obj *rss_obj,
4198 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4199 void *rdata, dma_addr_t rdata_mapping,
4200 int state, unsigned long *pstate,
4201 bnx2x_obj_type type)
4202 {
4203 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4204 rdata_mapping, state, pstate, type);
4205
4206 rss_obj->engine_id = engine_id;
4207 rss_obj->config_rss = bnx2x_setup_rss;
4208 }
4209
4210 /********************** Queue state object ***********************************/
4211
4212 /**
4213 * bnx2x_queue_state_change - perform Queue state change transition
4214 *
4215 * @bp: device handle
4216 * @params: parameters to perform the transition
4217 *
4218 * returns 0 in case of successfully completed transition, negative error
4219 * code in case of failure, positive (EBUSY) value if there is a completion
4220 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4221 * not set in params->ramrod_flags for asynchronous commands).
4222 *
4223 */
bnx2x_queue_state_change(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4224 int bnx2x_queue_state_change(struct bnx2x *bp,
4225 struct bnx2x_queue_state_params *params)
4226 {
4227 struct bnx2x_queue_sp_obj *o = params->q_obj;
4228 int rc, pending_bit;
4229 unsigned long *pending = &o->pending;
4230
4231 /* Check that the requested transition is legal */
4232 if (o->check_transition(bp, o, params))
4233 return -EINVAL;
4234
4235 /* Set "pending" bit */
4236 pending_bit = o->set_pending(o, params);
4237
4238 /* Don't send a command if only driver cleanup was requested */
4239 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4240 o->complete_cmd(bp, o, pending_bit);
4241 else {
4242 /* Send a ramrod */
4243 rc = o->send_cmd(bp, params);
4244 if (rc) {
4245 o->next_state = BNX2X_Q_STATE_MAX;
4246 clear_bit(pending_bit, pending);
4247 smp_mb__after_clear_bit();
4248 return rc;
4249 }
4250
4251 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4252 rc = o->wait_comp(bp, o, pending_bit);
4253 if (rc)
4254 return rc;
4255
4256 return 0;
4257 }
4258 }
4259
4260 return !!test_bit(pending_bit, pending);
4261 }
4262
4263
bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj * obj,struct bnx2x_queue_state_params * params)4264 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4265 struct bnx2x_queue_state_params *params)
4266 {
4267 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4268
4269 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4270 * UPDATE command.
4271 */
4272 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4273 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4274 bit = BNX2X_Q_CMD_UPDATE;
4275 else
4276 bit = cmd;
4277
4278 set_bit(bit, &obj->pending);
4279 return bit;
4280 }
4281
bnx2x_queue_wait_comp(struct bnx2x * bp,struct bnx2x_queue_sp_obj * o,enum bnx2x_queue_cmd cmd)4282 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4283 struct bnx2x_queue_sp_obj *o,
4284 enum bnx2x_queue_cmd cmd)
4285 {
4286 return bnx2x_state_wait(bp, cmd, &o->pending);
4287 }
4288
4289 /**
4290 * bnx2x_queue_comp_cmd - complete the state change command.
4291 *
4292 * @bp: device handle
4293 * @o:
4294 * @cmd:
4295 *
4296 * Checks that the arrived completion is expected.
4297 */
bnx2x_queue_comp_cmd(struct bnx2x * bp,struct bnx2x_queue_sp_obj * o,enum bnx2x_queue_cmd cmd)4298 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4299 struct bnx2x_queue_sp_obj *o,
4300 enum bnx2x_queue_cmd cmd)
4301 {
4302 unsigned long cur_pending = o->pending;
4303
4304 if (!test_and_clear_bit(cmd, &cur_pending)) {
4305 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4306 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4307 o->state, cur_pending, o->next_state);
4308 return -EINVAL;
4309 }
4310
4311 if (o->next_tx_only >= o->max_cos)
4312 /* >= becuase tx only must always be smaller than cos since the
4313 * primary connection suports COS 0
4314 */
4315 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4316 o->next_tx_only, o->max_cos);
4317
4318 DP(BNX2X_MSG_SP,
4319 "Completing command %d for queue %d, setting state to %d\n",
4320 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4321
4322 if (o->next_tx_only) /* print num tx-only if any exist */
4323 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4324 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4325
4326 o->state = o->next_state;
4327 o->num_tx_only = o->next_tx_only;
4328 o->next_state = BNX2X_Q_STATE_MAX;
4329
4330 /* It's important that o->state and o->next_state are
4331 * updated before o->pending.
4332 */
4333 wmb();
4334
4335 clear_bit(cmd, &o->pending);
4336 smp_mb__after_clear_bit();
4337
4338 return 0;
4339 }
4340
bnx2x_q_fill_setup_data_e2(struct bnx2x * bp,struct bnx2x_queue_state_params * cmd_params,struct client_init_ramrod_data * data)4341 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4342 struct bnx2x_queue_state_params *cmd_params,
4343 struct client_init_ramrod_data *data)
4344 {
4345 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4346
4347 /* Rx data */
4348
4349 /* IPv6 TPA supported for E2 and above only */
4350 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4351 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4352 }
4353
bnx2x_q_fill_init_general_data(struct bnx2x * bp,struct bnx2x_queue_sp_obj * o,struct bnx2x_general_setup_params * params,struct client_init_general_data * gen_data,unsigned long * flags)4354 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4355 struct bnx2x_queue_sp_obj *o,
4356 struct bnx2x_general_setup_params *params,
4357 struct client_init_general_data *gen_data,
4358 unsigned long *flags)
4359 {
4360 gen_data->client_id = o->cl_id;
4361
4362 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4363 gen_data->statistics_counter_id =
4364 params->stat_id;
4365 gen_data->statistics_en_flg = 1;
4366 gen_data->statistics_zero_flg =
4367 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4368 } else
4369 gen_data->statistics_counter_id =
4370 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4371
4372 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4373 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4374 gen_data->sp_client_id = params->spcl_id;
4375 gen_data->mtu = cpu_to_le16(params->mtu);
4376 gen_data->func_id = o->func_id;
4377
4378
4379 gen_data->cos = params->cos;
4380
4381 gen_data->traffic_type =
4382 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4383 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4384
4385 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4386 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4387 }
4388
bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj * o,struct bnx2x_txq_setup_params * params,struct client_init_tx_data * tx_data,unsigned long * flags)4389 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4390 struct bnx2x_txq_setup_params *params,
4391 struct client_init_tx_data *tx_data,
4392 unsigned long *flags)
4393 {
4394 tx_data->enforce_security_flg =
4395 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4396 tx_data->default_vlan =
4397 cpu_to_le16(params->default_vlan);
4398 tx_data->default_vlan_flg =
4399 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4400 tx_data->tx_switching_flg =
4401 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4402 tx_data->anti_spoofing_flg =
4403 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4404 tx_data->tx_status_block_id = params->fw_sb_id;
4405 tx_data->tx_sb_index_number = params->sb_cq_index;
4406 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4407
4408 tx_data->tx_bd_page_base.lo =
4409 cpu_to_le32(U64_LO(params->dscr_map));
4410 tx_data->tx_bd_page_base.hi =
4411 cpu_to_le32(U64_HI(params->dscr_map));
4412
4413 /* Don't configure any Tx switching mode during queue SETUP */
4414 tx_data->state = 0;
4415 }
4416
bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj * o,struct rxq_pause_params * params,struct client_init_rx_data * rx_data)4417 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4418 struct rxq_pause_params *params,
4419 struct client_init_rx_data *rx_data)
4420 {
4421 /* flow control data */
4422 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4423 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4424 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4425 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4426 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4427 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4428 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4429 }
4430
bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj * o,struct bnx2x_rxq_setup_params * params,struct client_init_rx_data * rx_data,unsigned long * flags)4431 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4432 struct bnx2x_rxq_setup_params *params,
4433 struct client_init_rx_data *rx_data,
4434 unsigned long *flags)
4435 {
4436 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4437 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4438 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4439 CLIENT_INIT_RX_DATA_TPA_MODE;
4440 rx_data->vmqueue_mode_en_flg = 0;
4441
4442 rx_data->cache_line_alignment_log_size =
4443 params->cache_line_log;
4444 rx_data->enable_dynamic_hc =
4445 test_bit(BNX2X_Q_FLG_DHC, flags);
4446 rx_data->max_sges_for_packet = params->max_sges_pkt;
4447 rx_data->client_qzone_id = params->cl_qzone_id;
4448 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4449
4450 /* Always start in DROP_ALL mode */
4451 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4452 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4453
4454 /* We don't set drop flags */
4455 rx_data->drop_ip_cs_err_flg = 0;
4456 rx_data->drop_tcp_cs_err_flg = 0;
4457 rx_data->drop_ttl0_flg = 0;
4458 rx_data->drop_udp_cs_err_flg = 0;
4459 rx_data->inner_vlan_removal_enable_flg =
4460 test_bit(BNX2X_Q_FLG_VLAN, flags);
4461 rx_data->outer_vlan_removal_enable_flg =
4462 test_bit(BNX2X_Q_FLG_OV, flags);
4463 rx_data->status_block_id = params->fw_sb_id;
4464 rx_data->rx_sb_index_number = params->sb_cq_index;
4465 rx_data->max_tpa_queues = params->max_tpa_queues;
4466 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4467 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4468 rx_data->bd_page_base.lo =
4469 cpu_to_le32(U64_LO(params->dscr_map));
4470 rx_data->bd_page_base.hi =
4471 cpu_to_le32(U64_HI(params->dscr_map));
4472 rx_data->sge_page_base.lo =
4473 cpu_to_le32(U64_LO(params->sge_map));
4474 rx_data->sge_page_base.hi =
4475 cpu_to_le32(U64_HI(params->sge_map));
4476 rx_data->cqe_page_base.lo =
4477 cpu_to_le32(U64_LO(params->rcq_map));
4478 rx_data->cqe_page_base.hi =
4479 cpu_to_le32(U64_HI(params->rcq_map));
4480 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4481
4482 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4483 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4484 rx_data->is_approx_mcast = 1;
4485 }
4486
4487 rx_data->rss_engine_id = params->rss_engine_id;
4488
4489 /* silent vlan removal */
4490 rx_data->silent_vlan_removal_flg =
4491 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4492 rx_data->silent_vlan_value =
4493 cpu_to_le16(params->silent_removal_value);
4494 rx_data->silent_vlan_mask =
4495 cpu_to_le16(params->silent_removal_mask);
4496
4497 }
4498
4499 /* initialize the general, tx and rx parts of a queue object */
bnx2x_q_fill_setup_data_cmn(struct bnx2x * bp,struct bnx2x_queue_state_params * cmd_params,struct client_init_ramrod_data * data)4500 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4501 struct bnx2x_queue_state_params *cmd_params,
4502 struct client_init_ramrod_data *data)
4503 {
4504 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4505 &cmd_params->params.setup.gen_params,
4506 &data->general,
4507 &cmd_params->params.setup.flags);
4508
4509 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4510 &cmd_params->params.setup.txq_params,
4511 &data->tx,
4512 &cmd_params->params.setup.flags);
4513
4514 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4515 &cmd_params->params.setup.rxq_params,
4516 &data->rx,
4517 &cmd_params->params.setup.flags);
4518
4519 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4520 &cmd_params->params.setup.pause_params,
4521 &data->rx);
4522 }
4523
4524 /* initialize the general and tx parts of a tx-only queue object */
bnx2x_q_fill_setup_tx_only(struct bnx2x * bp,struct bnx2x_queue_state_params * cmd_params,struct tx_queue_init_ramrod_data * data)4525 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4526 struct bnx2x_queue_state_params *cmd_params,
4527 struct tx_queue_init_ramrod_data *data)
4528 {
4529 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4530 &cmd_params->params.tx_only.gen_params,
4531 &data->general,
4532 &cmd_params->params.tx_only.flags);
4533
4534 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4535 &cmd_params->params.tx_only.txq_params,
4536 &data->tx,
4537 &cmd_params->params.tx_only.flags);
4538
4539 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4540 cmd_params->q_obj->cids[0],
4541 data->tx.tx_bd_page_base.lo,
4542 data->tx.tx_bd_page_base.hi);
4543 }
4544
4545 /**
4546 * bnx2x_q_init - init HW/FW queue
4547 *
4548 * @bp: device handle
4549 * @params:
4550 *
4551 * HW/FW initial Queue configuration:
4552 * - HC: Rx and Tx
4553 * - CDU context validation
4554 *
4555 */
bnx2x_q_init(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4556 static inline int bnx2x_q_init(struct bnx2x *bp,
4557 struct bnx2x_queue_state_params *params)
4558 {
4559 struct bnx2x_queue_sp_obj *o = params->q_obj;
4560 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4561 u16 hc_usec;
4562 u8 cos;
4563
4564 /* Tx HC configuration */
4565 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4566 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4567 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4568
4569 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4570 init->tx.sb_cq_index,
4571 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4572 hc_usec);
4573 }
4574
4575 /* Rx HC configuration */
4576 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4577 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4578 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4579
4580 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4581 init->rx.sb_cq_index,
4582 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4583 hc_usec);
4584 }
4585
4586 /* Set CDU context validation values */
4587 for (cos = 0; cos < o->max_cos; cos++) {
4588 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4589 o->cids[cos], cos);
4590 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4591 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4592 }
4593
4594 /* As no ramrod is sent, complete the command immediately */
4595 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4596
4597 mmiowb();
4598 smp_mb();
4599
4600 return 0;
4601 }
4602
bnx2x_q_send_setup_e1x(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4603 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4604 struct bnx2x_queue_state_params *params)
4605 {
4606 struct bnx2x_queue_sp_obj *o = params->q_obj;
4607 struct client_init_ramrod_data *rdata =
4608 (struct client_init_ramrod_data *)o->rdata;
4609 dma_addr_t data_mapping = o->rdata_mapping;
4610 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4611
4612 /* Clear the ramrod data */
4613 memset(rdata, 0, sizeof(*rdata));
4614
4615 /* Fill the ramrod data */
4616 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4617
4618 /*
4619 * No need for an explicit memory barrier here as long we would
4620 * need to ensure the ordering of writing to the SPQ element
4621 * and updating of the SPQ producer which involves a memory
4622 * read and we will have to put a full memory barrier there
4623 * (inside bnx2x_sp_post()).
4624 */
4625
4626 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4627 U64_HI(data_mapping),
4628 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4629 }
4630
bnx2x_q_send_setup_e2(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4631 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4632 struct bnx2x_queue_state_params *params)
4633 {
4634 struct bnx2x_queue_sp_obj *o = params->q_obj;
4635 struct client_init_ramrod_data *rdata =
4636 (struct client_init_ramrod_data *)o->rdata;
4637 dma_addr_t data_mapping = o->rdata_mapping;
4638 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4639
4640 /* Clear the ramrod data */
4641 memset(rdata, 0, sizeof(*rdata));
4642
4643 /* Fill the ramrod data */
4644 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4645 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4646
4647 /*
4648 * No need for an explicit memory barrier here as long we would
4649 * need to ensure the ordering of writing to the SPQ element
4650 * and updating of the SPQ producer which involves a memory
4651 * read and we will have to put a full memory barrier there
4652 * (inside bnx2x_sp_post()).
4653 */
4654
4655 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4656 U64_HI(data_mapping),
4657 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4658 }
4659
bnx2x_q_send_setup_tx_only(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4660 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4661 struct bnx2x_queue_state_params *params)
4662 {
4663 struct bnx2x_queue_sp_obj *o = params->q_obj;
4664 struct tx_queue_init_ramrod_data *rdata =
4665 (struct tx_queue_init_ramrod_data *)o->rdata;
4666 dma_addr_t data_mapping = o->rdata_mapping;
4667 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4668 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4669 ¶ms->params.tx_only;
4670 u8 cid_index = tx_only_params->cid_index;
4671
4672
4673 if (cid_index >= o->max_cos) {
4674 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4675 o->cl_id, cid_index);
4676 return -EINVAL;
4677 }
4678
4679 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4680 tx_only_params->gen_params.cos,
4681 tx_only_params->gen_params.spcl_id);
4682
4683 /* Clear the ramrod data */
4684 memset(rdata, 0, sizeof(*rdata));
4685
4686 /* Fill the ramrod data */
4687 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4688
4689 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4690 o->cids[cid_index], rdata->general.client_id,
4691 rdata->general.sp_client_id, rdata->general.cos);
4692
4693 /*
4694 * No need for an explicit memory barrier here as long we would
4695 * need to ensure the ordering of writing to the SPQ element
4696 * and updating of the SPQ producer which involves a memory
4697 * read and we will have to put a full memory barrier there
4698 * (inside bnx2x_sp_post()).
4699 */
4700
4701 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4702 U64_HI(data_mapping),
4703 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4704 }
4705
bnx2x_q_fill_update_data(struct bnx2x * bp,struct bnx2x_queue_sp_obj * obj,struct bnx2x_queue_update_params * params,struct client_update_ramrod_data * data)4706 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4707 struct bnx2x_queue_sp_obj *obj,
4708 struct bnx2x_queue_update_params *params,
4709 struct client_update_ramrod_data *data)
4710 {
4711 /* Client ID of the client to update */
4712 data->client_id = obj->cl_id;
4713
4714 /* Function ID of the client to update */
4715 data->func_id = obj->func_id;
4716
4717 /* Default VLAN value */
4718 data->default_vlan = cpu_to_le16(params->def_vlan);
4719
4720 /* Inner VLAN stripping */
4721 data->inner_vlan_removal_enable_flg =
4722 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4723 data->inner_vlan_removal_change_flg =
4724 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4725 ¶ms->update_flags);
4726
4727 /* Outer VLAN sripping */
4728 data->outer_vlan_removal_enable_flg =
4729 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4730 data->outer_vlan_removal_change_flg =
4731 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4732 ¶ms->update_flags);
4733
4734 /* Drop packets that have source MAC that doesn't belong to this
4735 * Queue.
4736 */
4737 data->anti_spoofing_enable_flg =
4738 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4739 data->anti_spoofing_change_flg =
4740 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4741
4742 /* Activate/Deactivate */
4743 data->activate_flg =
4744 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4745 data->activate_change_flg =
4746 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4747
4748 /* Enable default VLAN */
4749 data->default_vlan_enable_flg =
4750 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4751 data->default_vlan_change_flg =
4752 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4753 ¶ms->update_flags);
4754
4755 /* silent vlan removal */
4756 data->silent_vlan_change_flg =
4757 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4758 ¶ms->update_flags);
4759 data->silent_vlan_removal_flg =
4760 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4761 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4762 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4763 }
4764
bnx2x_q_send_update(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4765 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4766 struct bnx2x_queue_state_params *params)
4767 {
4768 struct bnx2x_queue_sp_obj *o = params->q_obj;
4769 struct client_update_ramrod_data *rdata =
4770 (struct client_update_ramrod_data *)o->rdata;
4771 dma_addr_t data_mapping = o->rdata_mapping;
4772 struct bnx2x_queue_update_params *update_params =
4773 ¶ms->params.update;
4774 u8 cid_index = update_params->cid_index;
4775
4776 if (cid_index >= o->max_cos) {
4777 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4778 o->cl_id, cid_index);
4779 return -EINVAL;
4780 }
4781
4782
4783 /* Clear the ramrod data */
4784 memset(rdata, 0, sizeof(*rdata));
4785
4786 /* Fill the ramrod data */
4787 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4788
4789 /*
4790 * No need for an explicit memory barrier here as long we would
4791 * need to ensure the ordering of writing to the SPQ element
4792 * and updating of the SPQ producer which involves a memory
4793 * read and we will have to put a full memory barrier there
4794 * (inside bnx2x_sp_post()).
4795 */
4796
4797 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4798 o->cids[cid_index], U64_HI(data_mapping),
4799 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4800 }
4801
4802 /**
4803 * bnx2x_q_send_deactivate - send DEACTIVATE command
4804 *
4805 * @bp: device handle
4806 * @params:
4807 *
4808 * implemented using the UPDATE command.
4809 */
bnx2x_q_send_deactivate(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4810 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4811 struct bnx2x_queue_state_params *params)
4812 {
4813 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4814
4815 memset(update, 0, sizeof(*update));
4816
4817 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4818
4819 return bnx2x_q_send_update(bp, params);
4820 }
4821
4822 /**
4823 * bnx2x_q_send_activate - send ACTIVATE command
4824 *
4825 * @bp: device handle
4826 * @params:
4827 *
4828 * implemented using the UPDATE command.
4829 */
bnx2x_q_send_activate(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4830 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4831 struct bnx2x_queue_state_params *params)
4832 {
4833 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4834
4835 memset(update, 0, sizeof(*update));
4836
4837 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4838 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4839
4840 return bnx2x_q_send_update(bp, params);
4841 }
4842
bnx2x_q_send_update_tpa(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4843 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4844 struct bnx2x_queue_state_params *params)
4845 {
4846 /* TODO: Not implemented yet. */
4847 return -1;
4848 }
4849
bnx2x_q_send_halt(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4850 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4851 struct bnx2x_queue_state_params *params)
4852 {
4853 struct bnx2x_queue_sp_obj *o = params->q_obj;
4854
4855 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4856 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4857 ETH_CONNECTION_TYPE);
4858 }
4859
bnx2x_q_send_cfc_del(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4860 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4861 struct bnx2x_queue_state_params *params)
4862 {
4863 struct bnx2x_queue_sp_obj *o = params->q_obj;
4864 u8 cid_idx = params->params.cfc_del.cid_index;
4865
4866 if (cid_idx >= o->max_cos) {
4867 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4868 o->cl_id, cid_idx);
4869 return -EINVAL;
4870 }
4871
4872 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4873 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4874 }
4875
bnx2x_q_send_terminate(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4876 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4877 struct bnx2x_queue_state_params *params)
4878 {
4879 struct bnx2x_queue_sp_obj *o = params->q_obj;
4880 u8 cid_index = params->params.terminate.cid_index;
4881
4882 if (cid_index >= o->max_cos) {
4883 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4884 o->cl_id, cid_index);
4885 return -EINVAL;
4886 }
4887
4888 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4889 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4890 }
4891
bnx2x_q_send_empty(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4892 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4893 struct bnx2x_queue_state_params *params)
4894 {
4895 struct bnx2x_queue_sp_obj *o = params->q_obj;
4896
4897 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4898 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4899 ETH_CONNECTION_TYPE);
4900 }
4901
bnx2x_queue_send_cmd_cmn(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4902 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4903 struct bnx2x_queue_state_params *params)
4904 {
4905 switch (params->cmd) {
4906 case BNX2X_Q_CMD_INIT:
4907 return bnx2x_q_init(bp, params);
4908 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4909 return bnx2x_q_send_setup_tx_only(bp, params);
4910 case BNX2X_Q_CMD_DEACTIVATE:
4911 return bnx2x_q_send_deactivate(bp, params);
4912 case BNX2X_Q_CMD_ACTIVATE:
4913 return bnx2x_q_send_activate(bp, params);
4914 case BNX2X_Q_CMD_UPDATE:
4915 return bnx2x_q_send_update(bp, params);
4916 case BNX2X_Q_CMD_UPDATE_TPA:
4917 return bnx2x_q_send_update_tpa(bp, params);
4918 case BNX2X_Q_CMD_HALT:
4919 return bnx2x_q_send_halt(bp, params);
4920 case BNX2X_Q_CMD_CFC_DEL:
4921 return bnx2x_q_send_cfc_del(bp, params);
4922 case BNX2X_Q_CMD_TERMINATE:
4923 return bnx2x_q_send_terminate(bp, params);
4924 case BNX2X_Q_CMD_EMPTY:
4925 return bnx2x_q_send_empty(bp, params);
4926 default:
4927 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4928 return -EINVAL;
4929 }
4930 }
4931
bnx2x_queue_send_cmd_e1x(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4932 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4933 struct bnx2x_queue_state_params *params)
4934 {
4935 switch (params->cmd) {
4936 case BNX2X_Q_CMD_SETUP:
4937 return bnx2x_q_send_setup_e1x(bp, params);
4938 case BNX2X_Q_CMD_INIT:
4939 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4940 case BNX2X_Q_CMD_DEACTIVATE:
4941 case BNX2X_Q_CMD_ACTIVATE:
4942 case BNX2X_Q_CMD_UPDATE:
4943 case BNX2X_Q_CMD_UPDATE_TPA:
4944 case BNX2X_Q_CMD_HALT:
4945 case BNX2X_Q_CMD_CFC_DEL:
4946 case BNX2X_Q_CMD_TERMINATE:
4947 case BNX2X_Q_CMD_EMPTY:
4948 return bnx2x_queue_send_cmd_cmn(bp, params);
4949 default:
4950 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4951 return -EINVAL;
4952 }
4953 }
4954
bnx2x_queue_send_cmd_e2(struct bnx2x * bp,struct bnx2x_queue_state_params * params)4955 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4956 struct bnx2x_queue_state_params *params)
4957 {
4958 switch (params->cmd) {
4959 case BNX2X_Q_CMD_SETUP:
4960 return bnx2x_q_send_setup_e2(bp, params);
4961 case BNX2X_Q_CMD_INIT:
4962 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4963 case BNX2X_Q_CMD_DEACTIVATE:
4964 case BNX2X_Q_CMD_ACTIVATE:
4965 case BNX2X_Q_CMD_UPDATE:
4966 case BNX2X_Q_CMD_UPDATE_TPA:
4967 case BNX2X_Q_CMD_HALT:
4968 case BNX2X_Q_CMD_CFC_DEL:
4969 case BNX2X_Q_CMD_TERMINATE:
4970 case BNX2X_Q_CMD_EMPTY:
4971 return bnx2x_queue_send_cmd_cmn(bp, params);
4972 default:
4973 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4974 return -EINVAL;
4975 }
4976 }
4977
4978 /**
4979 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4980 *
4981 * @bp: device handle
4982 * @o:
4983 * @params:
4984 *
4985 * (not Forwarding)
4986 * It both checks if the requested command is legal in a current
4987 * state and, if it's legal, sets a `next_state' in the object
4988 * that will be used in the completion flow to set the `state'
4989 * of the object.
4990 *
4991 * returns 0 if a requested command is a legal transition,
4992 * -EINVAL otherwise.
4993 */
bnx2x_queue_chk_transition(struct bnx2x * bp,struct bnx2x_queue_sp_obj * o,struct bnx2x_queue_state_params * params)4994 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4995 struct bnx2x_queue_sp_obj *o,
4996 struct bnx2x_queue_state_params *params)
4997 {
4998 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4999 enum bnx2x_queue_cmd cmd = params->cmd;
5000 struct bnx2x_queue_update_params *update_params =
5001 ¶ms->params.update;
5002 u8 next_tx_only = o->num_tx_only;
5003
5004 /*
5005 * Forget all pending for completion commands if a driver only state
5006 * transition has been requested.
5007 */
5008 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5009 o->pending = 0;
5010 o->next_state = BNX2X_Q_STATE_MAX;
5011 }
5012
5013 /*
5014 * Don't allow a next state transition if we are in the middle of
5015 * the previous one.
5016 */
5017 if (o->pending)
5018 return -EBUSY;
5019
5020 switch (state) {
5021 case BNX2X_Q_STATE_RESET:
5022 if (cmd == BNX2X_Q_CMD_INIT)
5023 next_state = BNX2X_Q_STATE_INITIALIZED;
5024
5025 break;
5026 case BNX2X_Q_STATE_INITIALIZED:
5027 if (cmd == BNX2X_Q_CMD_SETUP) {
5028 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5029 ¶ms->params.setup.flags))
5030 next_state = BNX2X_Q_STATE_ACTIVE;
5031 else
5032 next_state = BNX2X_Q_STATE_INACTIVE;
5033 }
5034
5035 break;
5036 case BNX2X_Q_STATE_ACTIVE:
5037 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5038 next_state = BNX2X_Q_STATE_INACTIVE;
5039
5040 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5041 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5042 next_state = BNX2X_Q_STATE_ACTIVE;
5043
5044 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5045 next_state = BNX2X_Q_STATE_MULTI_COS;
5046 next_tx_only = 1;
5047 }
5048
5049 else if (cmd == BNX2X_Q_CMD_HALT)
5050 next_state = BNX2X_Q_STATE_STOPPED;
5051
5052 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5053 /* If "active" state change is requested, update the
5054 * state accordingly.
5055 */
5056 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5057 &update_params->update_flags) &&
5058 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5059 &update_params->update_flags))
5060 next_state = BNX2X_Q_STATE_INACTIVE;
5061 else
5062 next_state = BNX2X_Q_STATE_ACTIVE;
5063 }
5064
5065 break;
5066 case BNX2X_Q_STATE_MULTI_COS:
5067 if (cmd == BNX2X_Q_CMD_TERMINATE)
5068 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5069
5070 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5071 next_state = BNX2X_Q_STATE_MULTI_COS;
5072 next_tx_only = o->num_tx_only + 1;
5073 }
5074
5075 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5076 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5077 next_state = BNX2X_Q_STATE_MULTI_COS;
5078
5079 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5080 /* If "active" state change is requested, update the
5081 * state accordingly.
5082 */
5083 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5084 &update_params->update_flags) &&
5085 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5086 &update_params->update_flags))
5087 next_state = BNX2X_Q_STATE_INACTIVE;
5088 else
5089 next_state = BNX2X_Q_STATE_MULTI_COS;
5090 }
5091
5092 break;
5093 case BNX2X_Q_STATE_MCOS_TERMINATED:
5094 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5095 next_tx_only = o->num_tx_only - 1;
5096 if (next_tx_only == 0)
5097 next_state = BNX2X_Q_STATE_ACTIVE;
5098 else
5099 next_state = BNX2X_Q_STATE_MULTI_COS;
5100 }
5101
5102 break;
5103 case BNX2X_Q_STATE_INACTIVE:
5104 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5105 next_state = BNX2X_Q_STATE_ACTIVE;
5106
5107 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5108 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5109 next_state = BNX2X_Q_STATE_INACTIVE;
5110
5111 else if (cmd == BNX2X_Q_CMD_HALT)
5112 next_state = BNX2X_Q_STATE_STOPPED;
5113
5114 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5115 /* If "active" state change is requested, update the
5116 * state accordingly.
5117 */
5118 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5119 &update_params->update_flags) &&
5120 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5121 &update_params->update_flags)){
5122 if (o->num_tx_only == 0)
5123 next_state = BNX2X_Q_STATE_ACTIVE;
5124 else /* tx only queues exist for this queue */
5125 next_state = BNX2X_Q_STATE_MULTI_COS;
5126 } else
5127 next_state = BNX2X_Q_STATE_INACTIVE;
5128 }
5129
5130 break;
5131 case BNX2X_Q_STATE_STOPPED:
5132 if (cmd == BNX2X_Q_CMD_TERMINATE)
5133 next_state = BNX2X_Q_STATE_TERMINATED;
5134
5135 break;
5136 case BNX2X_Q_STATE_TERMINATED:
5137 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5138 next_state = BNX2X_Q_STATE_RESET;
5139
5140 break;
5141 default:
5142 BNX2X_ERR("Illegal state: %d\n", state);
5143 }
5144
5145 /* Transition is assured */
5146 if (next_state != BNX2X_Q_STATE_MAX) {
5147 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5148 state, cmd, next_state);
5149 o->next_state = next_state;
5150 o->next_tx_only = next_tx_only;
5151 return 0;
5152 }
5153
5154 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5155
5156 return -EINVAL;
5157 }
5158
bnx2x_init_queue_obj(struct bnx2x * bp,struct bnx2x_queue_sp_obj * obj,u8 cl_id,u32 * cids,u8 cid_cnt,u8 func_id,void * rdata,dma_addr_t rdata_mapping,unsigned long type)5159 void bnx2x_init_queue_obj(struct bnx2x *bp,
5160 struct bnx2x_queue_sp_obj *obj,
5161 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5162 void *rdata,
5163 dma_addr_t rdata_mapping, unsigned long type)
5164 {
5165 memset(obj, 0, sizeof(*obj));
5166
5167 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5168 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5169
5170 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5171 obj->max_cos = cid_cnt;
5172 obj->cl_id = cl_id;
5173 obj->func_id = func_id;
5174 obj->rdata = rdata;
5175 obj->rdata_mapping = rdata_mapping;
5176 obj->type = type;
5177 obj->next_state = BNX2X_Q_STATE_MAX;
5178
5179 if (CHIP_IS_E1x(bp))
5180 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5181 else
5182 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5183
5184 obj->check_transition = bnx2x_queue_chk_transition;
5185
5186 obj->complete_cmd = bnx2x_queue_comp_cmd;
5187 obj->wait_comp = bnx2x_queue_wait_comp;
5188 obj->set_pending = bnx2x_queue_set_pending;
5189 }
5190
5191 /********************** Function state object *********************************/
bnx2x_func_get_state(struct bnx2x * bp,struct bnx2x_func_sp_obj * o)5192 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5193 struct bnx2x_func_sp_obj *o)
5194 {
5195 /* in the middle of transaction - return INVALID state */
5196 if (o->pending)
5197 return BNX2X_F_STATE_MAX;
5198
5199 /*
5200 * unsure the order of reading of o->pending and o->state
5201 * o->pending should be read first
5202 */
5203 rmb();
5204
5205 return o->state;
5206 }
5207
bnx2x_func_wait_comp(struct bnx2x * bp,struct bnx2x_func_sp_obj * o,enum bnx2x_func_cmd cmd)5208 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5209 struct bnx2x_func_sp_obj *o,
5210 enum bnx2x_func_cmd cmd)
5211 {
5212 return bnx2x_state_wait(bp, cmd, &o->pending);
5213 }
5214
5215 /**
5216 * bnx2x_func_state_change_comp - complete the state machine transition
5217 *
5218 * @bp: device handle
5219 * @o:
5220 * @cmd:
5221 *
5222 * Called on state change transition. Completes the state
5223 * machine transition only - no HW interaction.
5224 */
bnx2x_func_state_change_comp(struct bnx2x * bp,struct bnx2x_func_sp_obj * o,enum bnx2x_func_cmd cmd)5225 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5226 struct bnx2x_func_sp_obj *o,
5227 enum bnx2x_func_cmd cmd)
5228 {
5229 unsigned long cur_pending = o->pending;
5230
5231 if (!test_and_clear_bit(cmd, &cur_pending)) {
5232 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5233 cmd, BP_FUNC(bp), o->state,
5234 cur_pending, o->next_state);
5235 return -EINVAL;
5236 }
5237
5238 DP(BNX2X_MSG_SP,
5239 "Completing command %d for func %d, setting state to %d\n",
5240 cmd, BP_FUNC(bp), o->next_state);
5241
5242 o->state = o->next_state;
5243 o->next_state = BNX2X_F_STATE_MAX;
5244
5245 /* It's important that o->state and o->next_state are
5246 * updated before o->pending.
5247 */
5248 wmb();
5249
5250 clear_bit(cmd, &o->pending);
5251 smp_mb__after_clear_bit();
5252
5253 return 0;
5254 }
5255
5256 /**
5257 * bnx2x_func_comp_cmd - complete the state change command
5258 *
5259 * @bp: device handle
5260 * @o:
5261 * @cmd:
5262 *
5263 * Checks that the arrived completion is expected.
5264 */
bnx2x_func_comp_cmd(struct bnx2x * bp,struct bnx2x_func_sp_obj * o,enum bnx2x_func_cmd cmd)5265 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5266 struct bnx2x_func_sp_obj *o,
5267 enum bnx2x_func_cmd cmd)
5268 {
5269 /* Complete the state machine part first, check if it's a
5270 * legal completion.
5271 */
5272 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5273 return rc;
5274 }
5275
5276 /**
5277 * bnx2x_func_chk_transition - perform function state machine transition
5278 *
5279 * @bp: device handle
5280 * @o:
5281 * @params:
5282 *
5283 * It both checks if the requested command is legal in a current
5284 * state and, if it's legal, sets a `next_state' in the object
5285 * that will be used in the completion flow to set the `state'
5286 * of the object.
5287 *
5288 * returns 0 if a requested command is a legal transition,
5289 * -EINVAL otherwise.
5290 */
bnx2x_func_chk_transition(struct bnx2x * bp,struct bnx2x_func_sp_obj * o,struct bnx2x_func_state_params * params)5291 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5292 struct bnx2x_func_sp_obj *o,
5293 struct bnx2x_func_state_params *params)
5294 {
5295 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5296 enum bnx2x_func_cmd cmd = params->cmd;
5297
5298 /*
5299 * Forget all pending for completion commands if a driver only state
5300 * transition has been requested.
5301 */
5302 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5303 o->pending = 0;
5304 o->next_state = BNX2X_F_STATE_MAX;
5305 }
5306
5307 /*
5308 * Don't allow a next state transition if we are in the middle of
5309 * the previous one.
5310 */
5311 if (o->pending)
5312 return -EBUSY;
5313
5314 switch (state) {
5315 case BNX2X_F_STATE_RESET:
5316 if (cmd == BNX2X_F_CMD_HW_INIT)
5317 next_state = BNX2X_F_STATE_INITIALIZED;
5318
5319 break;
5320 case BNX2X_F_STATE_INITIALIZED:
5321 if (cmd == BNX2X_F_CMD_START)
5322 next_state = BNX2X_F_STATE_STARTED;
5323
5324 else if (cmd == BNX2X_F_CMD_HW_RESET)
5325 next_state = BNX2X_F_STATE_RESET;
5326
5327 break;
5328 case BNX2X_F_STATE_STARTED:
5329 if (cmd == BNX2X_F_CMD_STOP)
5330 next_state = BNX2X_F_STATE_INITIALIZED;
5331 else if (cmd == BNX2X_F_CMD_TX_STOP)
5332 next_state = BNX2X_F_STATE_TX_STOPPED;
5333
5334 break;
5335 case BNX2X_F_STATE_TX_STOPPED:
5336 if (cmd == BNX2X_F_CMD_TX_START)
5337 next_state = BNX2X_F_STATE_STARTED;
5338
5339 break;
5340 default:
5341 BNX2X_ERR("Unknown state: %d\n", state);
5342 }
5343
5344 /* Transition is assured */
5345 if (next_state != BNX2X_F_STATE_MAX) {
5346 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5347 state, cmd, next_state);
5348 o->next_state = next_state;
5349 return 0;
5350 }
5351
5352 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5353 state, cmd);
5354
5355 return -EINVAL;
5356 }
5357
5358 /**
5359 * bnx2x_func_init_func - performs HW init at function stage
5360 *
5361 * @bp: device handle
5362 * @drv:
5363 *
5364 * Init HW when the current phase is
5365 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5366 * HW blocks.
5367 */
bnx2x_func_init_func(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5368 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5369 const struct bnx2x_func_sp_drv_ops *drv)
5370 {
5371 return drv->init_hw_func(bp);
5372 }
5373
5374 /**
5375 * bnx2x_func_init_port - performs HW init at port stage
5376 *
5377 * @bp: device handle
5378 * @drv:
5379 *
5380 * Init HW when the current phase is
5381 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5382 * FUNCTION-only HW blocks.
5383 *
5384 */
bnx2x_func_init_port(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5385 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5386 const struct bnx2x_func_sp_drv_ops *drv)
5387 {
5388 int rc = drv->init_hw_port(bp);
5389 if (rc)
5390 return rc;
5391
5392 return bnx2x_func_init_func(bp, drv);
5393 }
5394
5395 /**
5396 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5397 *
5398 * @bp: device handle
5399 * @drv:
5400 *
5401 * Init HW when the current phase is
5402 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5403 * PORT-only and FUNCTION-only HW blocks.
5404 */
bnx2x_func_init_cmn_chip(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5405 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5406 const struct bnx2x_func_sp_drv_ops *drv)
5407 {
5408 int rc = drv->init_hw_cmn_chip(bp);
5409 if (rc)
5410 return rc;
5411
5412 return bnx2x_func_init_port(bp, drv);
5413 }
5414
5415 /**
5416 * bnx2x_func_init_cmn - performs HW init at common stage
5417 *
5418 * @bp: device handle
5419 * @drv:
5420 *
5421 * Init HW when the current phase is
5422 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5423 * PORT-only and FUNCTION-only HW blocks.
5424 */
bnx2x_func_init_cmn(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5425 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5426 const struct bnx2x_func_sp_drv_ops *drv)
5427 {
5428 int rc = drv->init_hw_cmn(bp);
5429 if (rc)
5430 return rc;
5431
5432 return bnx2x_func_init_port(bp, drv);
5433 }
5434
bnx2x_func_hw_init(struct bnx2x * bp,struct bnx2x_func_state_params * params)5435 static int bnx2x_func_hw_init(struct bnx2x *bp,
5436 struct bnx2x_func_state_params *params)
5437 {
5438 u32 load_code = params->params.hw_init.load_phase;
5439 struct bnx2x_func_sp_obj *o = params->f_obj;
5440 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5441 int rc = 0;
5442
5443 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5444 BP_ABS_FUNC(bp), load_code);
5445
5446 /* Prepare buffers for unzipping the FW */
5447 rc = drv->gunzip_init(bp);
5448 if (rc)
5449 return rc;
5450
5451 /* Prepare FW */
5452 rc = drv->init_fw(bp);
5453 if (rc) {
5454 BNX2X_ERR("Error loading firmware\n");
5455 goto init_err;
5456 }
5457
5458 /* Handle the beginning of COMMON_XXX pases separatelly... */
5459 switch (load_code) {
5460 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5461 rc = bnx2x_func_init_cmn_chip(bp, drv);
5462 if (rc)
5463 goto init_err;
5464
5465 break;
5466 case FW_MSG_CODE_DRV_LOAD_COMMON:
5467 rc = bnx2x_func_init_cmn(bp, drv);
5468 if (rc)
5469 goto init_err;
5470
5471 break;
5472 case FW_MSG_CODE_DRV_LOAD_PORT:
5473 rc = bnx2x_func_init_port(bp, drv);
5474 if (rc)
5475 goto init_err;
5476
5477 break;
5478 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5479 rc = bnx2x_func_init_func(bp, drv);
5480 if (rc)
5481 goto init_err;
5482
5483 break;
5484 default:
5485 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5486 rc = -EINVAL;
5487 }
5488
5489 init_err:
5490 drv->gunzip_end(bp);
5491
5492 /* In case of success, complete the comand immediatelly: no ramrods
5493 * have been sent.
5494 */
5495 if (!rc)
5496 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5497
5498 return rc;
5499 }
5500
5501 /**
5502 * bnx2x_func_reset_func - reset HW at function stage
5503 *
5504 * @bp: device handle
5505 * @drv:
5506 *
5507 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5508 * FUNCTION-only HW blocks.
5509 */
bnx2x_func_reset_func(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5510 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5511 const struct bnx2x_func_sp_drv_ops *drv)
5512 {
5513 drv->reset_hw_func(bp);
5514 }
5515
5516 /**
5517 * bnx2x_func_reset_port - reser HW at port stage
5518 *
5519 * @bp: device handle
5520 * @drv:
5521 *
5522 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5523 * FUNCTION-only and PORT-only HW blocks.
5524 *
5525 * !!!IMPORTANT!!!
5526 *
5527 * It's important to call reset_port before reset_func() as the last thing
5528 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5529 * makes impossible any DMAE transactions.
5530 */
bnx2x_func_reset_port(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5531 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5532 const struct bnx2x_func_sp_drv_ops *drv)
5533 {
5534 drv->reset_hw_port(bp);
5535 bnx2x_func_reset_func(bp, drv);
5536 }
5537
5538 /**
5539 * bnx2x_func_reset_cmn - reser HW at common stage
5540 *
5541 * @bp: device handle
5542 * @drv:
5543 *
5544 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5545 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5546 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5547 */
bnx2x_func_reset_cmn(struct bnx2x * bp,const struct bnx2x_func_sp_drv_ops * drv)5548 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5549 const struct bnx2x_func_sp_drv_ops *drv)
5550 {
5551 bnx2x_func_reset_port(bp, drv);
5552 drv->reset_hw_cmn(bp);
5553 }
5554
5555
bnx2x_func_hw_reset(struct bnx2x * bp,struct bnx2x_func_state_params * params)5556 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5557 struct bnx2x_func_state_params *params)
5558 {
5559 u32 reset_phase = params->params.hw_reset.reset_phase;
5560 struct bnx2x_func_sp_obj *o = params->f_obj;
5561 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5562
5563 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5564 reset_phase);
5565
5566 switch (reset_phase) {
5567 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5568 bnx2x_func_reset_cmn(bp, drv);
5569 break;
5570 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5571 bnx2x_func_reset_port(bp, drv);
5572 break;
5573 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5574 bnx2x_func_reset_func(bp, drv);
5575 break;
5576 default:
5577 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5578 reset_phase);
5579 break;
5580 }
5581
5582 /* Complete the comand immediatelly: no ramrods have been sent. */
5583 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5584
5585 return 0;
5586 }
5587
bnx2x_func_send_start(struct bnx2x * bp,struct bnx2x_func_state_params * params)5588 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5589 struct bnx2x_func_state_params *params)
5590 {
5591 struct bnx2x_func_sp_obj *o = params->f_obj;
5592 struct function_start_data *rdata =
5593 (struct function_start_data *)o->rdata;
5594 dma_addr_t data_mapping = o->rdata_mapping;
5595 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5596
5597 memset(rdata, 0, sizeof(*rdata));
5598
5599 /* Fill the ramrod data with provided parameters */
5600 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5601 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5602 rdata->path_id = BP_PATH(bp);
5603 rdata->network_cos_mode = start_params->network_cos_mode;
5604
5605 /*
5606 * No need for an explicit memory barrier here as long we would
5607 * need to ensure the ordering of writing to the SPQ element
5608 * and updating of the SPQ producer which involves a memory
5609 * read and we will have to put a full memory barrier there
5610 * (inside bnx2x_sp_post()).
5611 */
5612
5613 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5614 U64_HI(data_mapping),
5615 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5616 }
5617
bnx2x_func_send_stop(struct bnx2x * bp,struct bnx2x_func_state_params * params)5618 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5619 struct bnx2x_func_state_params *params)
5620 {
5621 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5622 NONE_CONNECTION_TYPE);
5623 }
5624
bnx2x_func_send_tx_stop(struct bnx2x * bp,struct bnx2x_func_state_params * params)5625 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5626 struct bnx2x_func_state_params *params)
5627 {
5628 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5629 NONE_CONNECTION_TYPE);
5630 }
bnx2x_func_send_tx_start(struct bnx2x * bp,struct bnx2x_func_state_params * params)5631 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5632 struct bnx2x_func_state_params *params)
5633 {
5634 struct bnx2x_func_sp_obj *o = params->f_obj;
5635 struct flow_control_configuration *rdata =
5636 (struct flow_control_configuration *)o->rdata;
5637 dma_addr_t data_mapping = o->rdata_mapping;
5638 struct bnx2x_func_tx_start_params *tx_start_params =
5639 ¶ms->params.tx_start;
5640 int i;
5641
5642 memset(rdata, 0, sizeof(*rdata));
5643
5644 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5645 rdata->dcb_version = tx_start_params->dcb_version;
5646 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5647
5648 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5649 rdata->traffic_type_to_priority_cos[i] =
5650 tx_start_params->traffic_type_to_priority_cos[i];
5651
5652 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5653 U64_HI(data_mapping),
5654 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5655 }
5656
bnx2x_func_send_cmd(struct bnx2x * bp,struct bnx2x_func_state_params * params)5657 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5658 struct bnx2x_func_state_params *params)
5659 {
5660 switch (params->cmd) {
5661 case BNX2X_F_CMD_HW_INIT:
5662 return bnx2x_func_hw_init(bp, params);
5663 case BNX2X_F_CMD_START:
5664 return bnx2x_func_send_start(bp, params);
5665 case BNX2X_F_CMD_STOP:
5666 return bnx2x_func_send_stop(bp, params);
5667 case BNX2X_F_CMD_HW_RESET:
5668 return bnx2x_func_hw_reset(bp, params);
5669 case BNX2X_F_CMD_TX_STOP:
5670 return bnx2x_func_send_tx_stop(bp, params);
5671 case BNX2X_F_CMD_TX_START:
5672 return bnx2x_func_send_tx_start(bp, params);
5673 default:
5674 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5675 return -EINVAL;
5676 }
5677 }
5678
bnx2x_init_func_obj(struct bnx2x * bp,struct bnx2x_func_sp_obj * obj,void * rdata,dma_addr_t rdata_mapping,struct bnx2x_func_sp_drv_ops * drv_iface)5679 void bnx2x_init_func_obj(struct bnx2x *bp,
5680 struct bnx2x_func_sp_obj *obj,
5681 void *rdata, dma_addr_t rdata_mapping,
5682 struct bnx2x_func_sp_drv_ops *drv_iface)
5683 {
5684 memset(obj, 0, sizeof(*obj));
5685
5686 mutex_init(&obj->one_pending_mutex);
5687
5688 obj->rdata = rdata;
5689 obj->rdata_mapping = rdata_mapping;
5690
5691 obj->send_cmd = bnx2x_func_send_cmd;
5692 obj->check_transition = bnx2x_func_chk_transition;
5693 obj->complete_cmd = bnx2x_func_comp_cmd;
5694 obj->wait_comp = bnx2x_func_wait_comp;
5695
5696 obj->drv = drv_iface;
5697 }
5698
5699 /**
5700 * bnx2x_func_state_change - perform Function state change transition
5701 *
5702 * @bp: device handle
5703 * @params: parameters to perform the transaction
5704 *
5705 * returns 0 in case of successfully completed transition,
5706 * negative error code in case of failure, positive
5707 * (EBUSY) value if there is a completion to that is
5708 * still pending (possible only if RAMROD_COMP_WAIT is
5709 * not set in params->ramrod_flags for asynchronous
5710 * commands).
5711 */
bnx2x_func_state_change(struct bnx2x * bp,struct bnx2x_func_state_params * params)5712 int bnx2x_func_state_change(struct bnx2x *bp,
5713 struct bnx2x_func_state_params *params)
5714 {
5715 struct bnx2x_func_sp_obj *o = params->f_obj;
5716 int rc;
5717 enum bnx2x_func_cmd cmd = params->cmd;
5718 unsigned long *pending = &o->pending;
5719
5720 mutex_lock(&o->one_pending_mutex);
5721
5722 /* Check that the requested transition is legal */
5723 if (o->check_transition(bp, o, params)) {
5724 mutex_unlock(&o->one_pending_mutex);
5725 return -EINVAL;
5726 }
5727
5728 /* Set "pending" bit */
5729 set_bit(cmd, pending);
5730
5731 /* Don't send a command if only driver cleanup was requested */
5732 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5733 bnx2x_func_state_change_comp(bp, o, cmd);
5734 mutex_unlock(&o->one_pending_mutex);
5735 } else {
5736 /* Send a ramrod */
5737 rc = o->send_cmd(bp, params);
5738
5739 mutex_unlock(&o->one_pending_mutex);
5740
5741 if (rc) {
5742 o->next_state = BNX2X_F_STATE_MAX;
5743 clear_bit(cmd, pending);
5744 smp_mb__after_clear_bit();
5745 return rc;
5746 }
5747
5748 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5749 rc = o->wait_comp(bp, o, cmd);
5750 if (rc)
5751 return rc;
5752
5753 return 0;
5754 }
5755 }
5756
5757 return !!test_bit(cmd, pending);
5758 }
5759