1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include "bna.h"
19 #include "bfa_sm.h"
20 #include "bfa_wc.h"
21 
22 static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
23 
24 static void
bna_port_cb_link_up(struct bna_port * port,struct bfi_ll_aen * aen,int status)25 bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
26 			int status)
27 {
28 	int i;
29 	u8 prio_map;
30 
31 	port->llport.link_status = BNA_LINK_UP;
32 	if (aen->cee_linkup)
33 		port->llport.link_status = BNA_CEE_UP;
34 
35 	/* Compute the priority */
36 	prio_map = aen->prio_map;
37 	if (prio_map) {
38 		for (i = 0; i < 8; i++) {
39 			if ((prio_map >> i) & 0x1)
40 				break;
41 		}
42 		port->priority = i;
43 	} else
44 		port->priority = 0;
45 
46 	/* Dispatch events */
47 	bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
48 	bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
49 	port->link_cbfn(port->bna->bnad, port->llport.link_status);
50 }
51 
52 static void
bna_port_cb_link_down(struct bna_port * port,int status)53 bna_port_cb_link_down(struct bna_port *port, int status)
54 {
55 	port->llport.link_status = BNA_LINK_DOWN;
56 
57 	/* Dispatch events */
58 	bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
59 	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
60 }
61 
62 static inline int
llport_can_be_up(struct bna_llport * llport)63 llport_can_be_up(struct bna_llport *llport)
64 {
65 	int ready = 0;
66 	if (llport->type == BNA_PORT_T_REGULAR)
67 		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
68 			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
69 			 (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
70 	else
71 		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
72 			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
73 			 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
74 	return ready;
75 }
76 
77 #define llport_is_up llport_can_be_up
78 
79 enum bna_llport_event {
80 	LLPORT_E_START			= 1,
81 	LLPORT_E_STOP			= 2,
82 	LLPORT_E_FAIL			= 3,
83 	LLPORT_E_UP			= 4,
84 	LLPORT_E_DOWN			= 5,
85 	LLPORT_E_FWRESP_UP_OK		= 6,
86 	LLPORT_E_FWRESP_UP_FAIL		= 7,
87 	LLPORT_E_FWRESP_DOWN		= 8
88 };
89 
90 static void
bna_llport_cb_port_enabled(struct bna_llport * llport)91 bna_llport_cb_port_enabled(struct bna_llport *llport)
92 {
93 	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
94 
95 	if (llport_can_be_up(llport))
96 		bfa_fsm_send_event(llport, LLPORT_E_UP);
97 }
98 
99 static void
bna_llport_cb_port_disabled(struct bna_llport * llport)100 bna_llport_cb_port_disabled(struct bna_llport *llport)
101 {
102 	int llport_up = llport_is_up(llport);
103 
104 	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
105 
106 	if (llport_up)
107 		bfa_fsm_send_event(llport, LLPORT_E_DOWN);
108 }
109 
110 /**
111  * MBOX
112  */
113 static int
bna_is_aen(u8 msg_id)114 bna_is_aen(u8 msg_id)
115 {
116 	switch (msg_id) {
117 	case BFI_LL_I2H_LINK_DOWN_AEN:
118 	case BFI_LL_I2H_LINK_UP_AEN:
119 	case BFI_LL_I2H_PORT_ENABLE_AEN:
120 	case BFI_LL_I2H_PORT_DISABLE_AEN:
121 		return 1;
122 
123 	default:
124 		return 0;
125 	}
126 }
127 
128 static void
bna_mbox_aen_callback(struct bna * bna,struct bfi_mbmsg * msg)129 bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
130 {
131 	struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
132 
133 	switch (aen->mh.msg_id) {
134 	case BFI_LL_I2H_LINK_UP_AEN:
135 		bna_port_cb_link_up(&bna->port, aen, aen->reason);
136 		break;
137 	case BFI_LL_I2H_LINK_DOWN_AEN:
138 		bna_port_cb_link_down(&bna->port, aen->reason);
139 		break;
140 	case BFI_LL_I2H_PORT_ENABLE_AEN:
141 		bna_llport_cb_port_enabled(&bna->port.llport);
142 		break;
143 	case BFI_LL_I2H_PORT_DISABLE_AEN:
144 		bna_llport_cb_port_disabled(&bna->port.llport);
145 		break;
146 	default:
147 		break;
148 	}
149 }
150 
151 static void
bna_ll_isr(void * llarg,struct bfi_mbmsg * msg)152 bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
153 {
154 	struct bna *bna = (struct bna *)(llarg);
155 	struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
156 	struct bfi_mhdr *cmd_h, *rsp_h;
157 	struct bna_mbox_qe *mb_qe = NULL;
158 	int to_post = 0;
159 	u8 aen = 0;
160 	char message[BNA_MESSAGE_SIZE];
161 
162 	aen = bna_is_aen(mb_rsp->mh.msg_id);
163 
164 	if (!aen) {
165 		mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
166 		cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
167 		rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
168 
169 		if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
170 		    (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
171 			/* Remove the request from posted_q, update state  */
172 			list_del(&mb_qe->qe);
173 			bna->mbox_mod.msg_pending--;
174 			if (list_empty(&bna->mbox_mod.posted_q))
175 				bna->mbox_mod.state = BNA_MBOX_FREE;
176 			else
177 				to_post = 1;
178 
179 			/* Dispatch the cbfn */
180 			if (mb_qe->cbfn)
181 				mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
182 
183 			/* Post the next entry, if needed */
184 			if (to_post) {
185 				mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
186 				bfa_nw_ioc_mbox_queue(&bna->device.ioc,
187 							&mb_qe->cmd);
188 			}
189 		} else {
190 			snprintf(message, BNA_MESSAGE_SIZE,
191 				       "No matching rsp for [%d:%d:%d]\n",
192 				       mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
193 				       mb_rsp->mh.mtag.i2htok);
194 		pr_info("%s", message);
195 		}
196 
197 	} else
198 		bna_mbox_aen_callback(bna, msg);
199 }
200 
201 static void
bna_err_handler(struct bna * bna,u32 intr_status)202 bna_err_handler(struct bna *bna, u32 intr_status)
203 {
204 	u32 init_halt;
205 
206 	if (intr_status & __HALT_STATUS_BITS) {
207 		init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
208 		init_halt &= ~__FW_INIT_HALT_P;
209 		writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
210 	}
211 
212 	bfa_nw_ioc_error_isr(&bna->device.ioc);
213 }
214 
215 void
bna_mbox_handler(struct bna * bna,u32 intr_status)216 bna_mbox_handler(struct bna *bna, u32 intr_status)
217 {
218 	if (BNA_IS_ERR_INTR(intr_status)) {
219 		bna_err_handler(bna, intr_status);
220 		return;
221 	}
222 	if (BNA_IS_MBOX_INTR(intr_status))
223 		bfa_nw_ioc_mbox_isr(&bna->device.ioc);
224 }
225 
226 void
bna_mbox_send(struct bna * bna,struct bna_mbox_qe * mbox_qe)227 bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
228 {
229 	struct bfi_mhdr *mh;
230 
231 	mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
232 
233 	mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
234 	bna->mbox_mod.msg_ctr++;
235 	bna->mbox_mod.msg_pending++;
236 	if (bna->mbox_mod.state == BNA_MBOX_FREE) {
237 		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
238 		bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
239 		bna->mbox_mod.state = BNA_MBOX_POSTED;
240 	} else {
241 		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
242 	}
243 }
244 
245 static void
bna_mbox_flush_q(struct bna * bna,struct list_head * q)246 bna_mbox_flush_q(struct bna *bna, struct list_head *q)
247 {
248 	struct bna_mbox_qe *mb_qe = NULL;
249 	struct bfi_mhdr *cmd_h;
250 	struct list_head			*mb_q;
251 	void 			(*cbfn)(void *arg, int status);
252 	void 			*cbarg;
253 
254 	mb_q = &bna->mbox_mod.posted_q;
255 
256 	while (!list_empty(mb_q)) {
257 		bfa_q_deq(mb_q, &mb_qe);
258 		cbfn = mb_qe->cbfn;
259 		cbarg = mb_qe->cbarg;
260 		bfa_q_qe_init(mb_qe);
261 		bna->mbox_mod.msg_pending--;
262 
263 		cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
264 		if (cbfn)
265 			cbfn(cbarg, BNA_CB_NOT_EXEC);
266 	}
267 
268 	bna->mbox_mod.state = BNA_MBOX_FREE;
269 }
270 
271 static void
bna_mbox_mod_start(struct bna_mbox_mod * mbox_mod)272 bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
273 {
274 }
275 
276 static void
bna_mbox_mod_stop(struct bna_mbox_mod * mbox_mod)277 bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
278 {
279 	bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
280 }
281 
282 static void
bna_mbox_mod_init(struct bna_mbox_mod * mbox_mod,struct bna * bna)283 bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
284 {
285 	bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
286 	mbox_mod->state = BNA_MBOX_FREE;
287 	mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
288 	INIT_LIST_HEAD(&mbox_mod->posted_q);
289 	mbox_mod->bna = bna;
290 }
291 
292 static void
bna_mbox_mod_uninit(struct bna_mbox_mod * mbox_mod)293 bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
294 {
295 	mbox_mod->bna = NULL;
296 }
297 
298 /**
299  * LLPORT
300  */
301 #define call_llport_stop_cbfn(llport, status)\
302 do {\
303 	if ((llport)->stop_cbfn)\
304 		(llport)->stop_cbfn(&(llport)->bna->port, status);\
305 	(llport)->stop_cbfn = NULL;\
306 } while (0)
307 
308 static void bna_fw_llport_up(struct bna_llport *llport);
309 static void bna_fw_cb_llport_up(void *arg, int status);
310 static void bna_fw_llport_down(struct bna_llport *llport);
311 static void bna_fw_cb_llport_down(void *arg, int status);
312 static void bna_llport_start(struct bna_llport *llport);
313 static void bna_llport_stop(struct bna_llport *llport);
314 static void bna_llport_fail(struct bna_llport *llport);
315 
316 enum bna_llport_state {
317 	BNA_LLPORT_STOPPED		= 1,
318 	BNA_LLPORT_DOWN			= 2,
319 	BNA_LLPORT_UP_RESP_WAIT		= 3,
320 	BNA_LLPORT_DOWN_RESP_WAIT	= 4,
321 	BNA_LLPORT_UP			= 5,
322 	BNA_LLPORT_LAST_RESP_WAIT 	= 6
323 };
324 
325 bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
326 			enum bna_llport_event);
327 bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
328 			enum bna_llport_event);
329 bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
330 			enum bna_llport_event);
331 bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
332 			enum bna_llport_event);
333 bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
334 			enum bna_llport_event);
335 bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
336 			enum bna_llport_event);
337 
338 static struct bfa_sm_table llport_sm_table[] = {
339 	{BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
340 	{BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
341 	{BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
342 	{BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
343 	{BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
344 	{BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
345 };
346 
347 static void
bna_llport_sm_stopped_entry(struct bna_llport * llport)348 bna_llport_sm_stopped_entry(struct bna_llport *llport)
349 {
350 	llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
351 	call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
352 }
353 
354 static void
bna_llport_sm_stopped(struct bna_llport * llport,enum bna_llport_event event)355 bna_llport_sm_stopped(struct bna_llport *llport,
356 			enum bna_llport_event event)
357 {
358 	switch (event) {
359 	case LLPORT_E_START:
360 		bfa_fsm_set_state(llport, bna_llport_sm_down);
361 		break;
362 
363 	case LLPORT_E_STOP:
364 		call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
365 		break;
366 
367 	case LLPORT_E_FAIL:
368 		break;
369 
370 	case LLPORT_E_DOWN:
371 		/* This event is received due to Rx objects failing */
372 		/* No-op */
373 		break;
374 
375 	case LLPORT_E_FWRESP_UP_OK:
376 	case LLPORT_E_FWRESP_DOWN:
377 		/**
378 		 * These events are received due to flushing of mbox when
379 		 * device fails
380 		 */
381 		/* No-op */
382 		break;
383 
384 	default:
385 		bfa_sm_fault(llport->bna, event);
386 	}
387 }
388 
389 static void
bna_llport_sm_down_entry(struct bna_llport * llport)390 bna_llport_sm_down_entry(struct bna_llport *llport)
391 {
392 	bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
393 }
394 
395 static void
bna_llport_sm_down(struct bna_llport * llport,enum bna_llport_event event)396 bna_llport_sm_down(struct bna_llport *llport,
397 			enum bna_llport_event event)
398 {
399 	switch (event) {
400 	case LLPORT_E_STOP:
401 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
402 		break;
403 
404 	case LLPORT_E_FAIL:
405 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
406 		break;
407 
408 	case LLPORT_E_UP:
409 		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
410 		bna_fw_llport_up(llport);
411 		break;
412 
413 	default:
414 		bfa_sm_fault(llport->bna, event);
415 	}
416 }
417 
418 static void
bna_llport_sm_up_resp_wait_entry(struct bna_llport * llport)419 bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
420 {
421 	BUG_ON(!llport_can_be_up(llport));
422 	/**
423 	 * NOTE: Do not call bna_fw_llport_up() here. That will over step
424 	 * mbox due to down_resp_wait -> up_resp_wait transition on event
425 	 * LLPORT_E_UP
426 	 */
427 }
428 
429 static void
bna_llport_sm_up_resp_wait(struct bna_llport * llport,enum bna_llport_event event)430 bna_llport_sm_up_resp_wait(struct bna_llport *llport,
431 			enum bna_llport_event event)
432 {
433 	switch (event) {
434 	case LLPORT_E_STOP:
435 		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
436 		break;
437 
438 	case LLPORT_E_FAIL:
439 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
440 		break;
441 
442 	case LLPORT_E_DOWN:
443 		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
444 		break;
445 
446 	case LLPORT_E_FWRESP_UP_OK:
447 		bfa_fsm_set_state(llport, bna_llport_sm_up);
448 		break;
449 
450 	case LLPORT_E_FWRESP_UP_FAIL:
451 		bfa_fsm_set_state(llport, bna_llport_sm_down);
452 		break;
453 
454 	case LLPORT_E_FWRESP_DOWN:
455 		/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
456 		bna_fw_llport_up(llport);
457 		break;
458 
459 	default:
460 		bfa_sm_fault(llport->bna, event);
461 	}
462 }
463 
464 static void
bna_llport_sm_down_resp_wait_entry(struct bna_llport * llport)465 bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
466 {
467 	/**
468 	 * NOTE: Do not call bna_fw_llport_down() here. That will over step
469 	 * mbox due to up_resp_wait -> down_resp_wait transition on event
470 	 * LLPORT_E_DOWN
471 	 */
472 }
473 
474 static void
bna_llport_sm_down_resp_wait(struct bna_llport * llport,enum bna_llport_event event)475 bna_llport_sm_down_resp_wait(struct bna_llport *llport,
476 			enum bna_llport_event event)
477 {
478 	switch (event) {
479 	case LLPORT_E_STOP:
480 		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
481 		break;
482 
483 	case LLPORT_E_FAIL:
484 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
485 		break;
486 
487 	case LLPORT_E_UP:
488 		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
489 		break;
490 
491 	case LLPORT_E_FWRESP_UP_OK:
492 		/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
493 		bna_fw_llport_down(llport);
494 		break;
495 
496 	case LLPORT_E_FWRESP_UP_FAIL:
497 	case LLPORT_E_FWRESP_DOWN:
498 		bfa_fsm_set_state(llport, bna_llport_sm_down);
499 		break;
500 
501 	default:
502 		bfa_sm_fault(llport->bna, event);
503 	}
504 }
505 
506 static void
bna_llport_sm_up_entry(struct bna_llport * llport)507 bna_llport_sm_up_entry(struct bna_llport *llport)
508 {
509 }
510 
511 static void
bna_llport_sm_up(struct bna_llport * llport,enum bna_llport_event event)512 bna_llport_sm_up(struct bna_llport *llport,
513 			enum bna_llport_event event)
514 {
515 	switch (event) {
516 	case LLPORT_E_STOP:
517 		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
518 		bna_fw_llport_down(llport);
519 		break;
520 
521 	case LLPORT_E_FAIL:
522 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
523 		break;
524 
525 	case LLPORT_E_DOWN:
526 		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
527 		bna_fw_llport_down(llport);
528 		break;
529 
530 	default:
531 		bfa_sm_fault(llport->bna, event);
532 	}
533 }
534 
535 static void
bna_llport_sm_last_resp_wait_entry(struct bna_llport * llport)536 bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
537 {
538 }
539 
540 static void
bna_llport_sm_last_resp_wait(struct bna_llport * llport,enum bna_llport_event event)541 bna_llport_sm_last_resp_wait(struct bna_llport *llport,
542 			enum bna_llport_event event)
543 {
544 	switch (event) {
545 	case LLPORT_E_FAIL:
546 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
547 		break;
548 
549 	case LLPORT_E_DOWN:
550 		/**
551 		 * This event is received due to Rx objects stopping in
552 		 * parallel to llport
553 		 */
554 		/* No-op */
555 		break;
556 
557 	case LLPORT_E_FWRESP_UP_OK:
558 		/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
559 		bna_fw_llport_down(llport);
560 		break;
561 
562 	case LLPORT_E_FWRESP_UP_FAIL:
563 	case LLPORT_E_FWRESP_DOWN:
564 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
565 		break;
566 
567 	default:
568 		bfa_sm_fault(llport->bna, event);
569 	}
570 }
571 
572 static void
bna_fw_llport_admin_up(struct bna_llport * llport)573 bna_fw_llport_admin_up(struct bna_llport *llport)
574 {
575 	struct bfi_ll_port_admin_req ll_req;
576 
577 	memset(&ll_req, 0, sizeof(ll_req));
578 	ll_req.mh.msg_class = BFI_MC_LL;
579 	ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
580 	ll_req.mh.mtag.h2i.lpu_id = 0;
581 
582 	ll_req.up = BNA_STATUS_T_ENABLED;
583 
584 	bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
585 			bna_fw_cb_llport_up, llport);
586 
587 	bna_mbox_send(llport->bna, &llport->mbox_qe);
588 }
589 
590 static void
bna_fw_llport_up(struct bna_llport * llport)591 bna_fw_llport_up(struct bna_llport *llport)
592 {
593 	if (llport->type == BNA_PORT_T_REGULAR)
594 		bna_fw_llport_admin_up(llport);
595 }
596 
597 static void
bna_fw_cb_llport_up(void * arg,int status)598 bna_fw_cb_llport_up(void *arg, int status)
599 {
600 	struct bna_llport *llport = (struct bna_llport *)arg;
601 
602 	bfa_q_qe_init(&llport->mbox_qe.qe);
603 	if (status == BFI_LL_CMD_FAIL) {
604 		if (llport->type == BNA_PORT_T_REGULAR)
605 			llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
606 		else
607 			llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
608 		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
609 	} else
610 		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
611 }
612 
613 static void
bna_fw_llport_admin_down(struct bna_llport * llport)614 bna_fw_llport_admin_down(struct bna_llport *llport)
615 {
616 	struct bfi_ll_port_admin_req ll_req;
617 
618 	memset(&ll_req, 0, sizeof(ll_req));
619 	ll_req.mh.msg_class = BFI_MC_LL;
620 	ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
621 	ll_req.mh.mtag.h2i.lpu_id = 0;
622 
623 	ll_req.up = BNA_STATUS_T_DISABLED;
624 
625 	bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
626 			bna_fw_cb_llport_down, llport);
627 
628 	bna_mbox_send(llport->bna, &llport->mbox_qe);
629 }
630 
631 static void
bna_fw_llport_down(struct bna_llport * llport)632 bna_fw_llport_down(struct bna_llport *llport)
633 {
634 	if (llport->type == BNA_PORT_T_REGULAR)
635 		bna_fw_llport_admin_down(llport);
636 }
637 
638 static void
bna_fw_cb_llport_down(void * arg,int status)639 bna_fw_cb_llport_down(void *arg, int status)
640 {
641 	struct bna_llport *llport = (struct bna_llport *)arg;
642 
643 	bfa_q_qe_init(&llport->mbox_qe.qe);
644 	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
645 }
646 
647 static void
bna_port_cb_llport_stopped(struct bna_port * port,enum bna_cb_status status)648 bna_port_cb_llport_stopped(struct bna_port *port,
649 				enum bna_cb_status status)
650 {
651 	bfa_wc_down(&port->chld_stop_wc);
652 }
653 
654 static void
bna_llport_init(struct bna_llport * llport,struct bna * bna)655 bna_llport_init(struct bna_llport *llport, struct bna *bna)
656 {
657 	llport->flags |= BNA_LLPORT_F_ADMIN_UP;
658 	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
659 	llport->type = BNA_PORT_T_REGULAR;
660 	llport->bna = bna;
661 
662 	llport->link_status = BNA_LINK_DOWN;
663 
664 	llport->rx_started_count = 0;
665 
666 	llport->stop_cbfn = NULL;
667 
668 	bfa_q_qe_init(&llport->mbox_qe.qe);
669 
670 	bfa_fsm_set_state(llport, bna_llport_sm_stopped);
671 }
672 
673 static void
bna_llport_uninit(struct bna_llport * llport)674 bna_llport_uninit(struct bna_llport *llport)
675 {
676 	llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
677 	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
678 
679 	llport->bna = NULL;
680 }
681 
682 static void
bna_llport_start(struct bna_llport * llport)683 bna_llport_start(struct bna_llport *llport)
684 {
685 	bfa_fsm_send_event(llport, LLPORT_E_START);
686 }
687 
688 static void
bna_llport_stop(struct bna_llport * llport)689 bna_llport_stop(struct bna_llport *llport)
690 {
691 	llport->stop_cbfn = bna_port_cb_llport_stopped;
692 
693 	bfa_fsm_send_event(llport, LLPORT_E_STOP);
694 }
695 
696 static void
bna_llport_fail(struct bna_llport * llport)697 bna_llport_fail(struct bna_llport *llport)
698 {
699 	/* Reset the physical port status to enabled */
700 	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
701 	bfa_fsm_send_event(llport, LLPORT_E_FAIL);
702 }
703 
704 static int
bna_llport_state_get(struct bna_llport * llport)705 bna_llport_state_get(struct bna_llport *llport)
706 {
707 	return bfa_sm_to_state(llport_sm_table, llport->fsm);
708 }
709 
710 void
bna_llport_rx_started(struct bna_llport * llport)711 bna_llport_rx_started(struct bna_llport *llport)
712 {
713 	llport->rx_started_count++;
714 
715 	if (llport->rx_started_count == 1) {
716 
717 		llport->flags |= BNA_LLPORT_F_RX_STARTED;
718 
719 		if (llport_can_be_up(llport))
720 			bfa_fsm_send_event(llport, LLPORT_E_UP);
721 	}
722 }
723 
724 void
bna_llport_rx_stopped(struct bna_llport * llport)725 bna_llport_rx_stopped(struct bna_llport *llport)
726 {
727 	int llport_up = llport_is_up(llport);
728 
729 	llport->rx_started_count--;
730 
731 	if (llport->rx_started_count == 0) {
732 
733 		llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
734 
735 		if (llport_up)
736 			bfa_fsm_send_event(llport, LLPORT_E_DOWN);
737 	}
738 }
739 
740 /**
741  * PORT
742  */
743 #define bna_port_chld_start(port)\
744 do {\
745 	enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
746 					BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
747 	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
748 					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
749 	bna_llport_start(&(port)->llport);\
750 	bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
751 	bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
752 } while (0)
753 
754 #define bna_port_chld_stop(port)\
755 do {\
756 	enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
757 					BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
758 	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
759 					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
760 	bfa_wc_up(&(port)->chld_stop_wc);\
761 	bfa_wc_up(&(port)->chld_stop_wc);\
762 	bfa_wc_up(&(port)->chld_stop_wc);\
763 	bna_llport_stop(&(port)->llport);\
764 	bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
765 	bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
766 } while (0)
767 
768 #define bna_port_chld_fail(port)\
769 do {\
770 	bna_llport_fail(&(port)->llport);\
771 	bna_tx_mod_fail(&(port)->bna->tx_mod);\
772 	bna_rx_mod_fail(&(port)->bna->rx_mod);\
773 } while (0)
774 
775 #define bna_port_rx_start(port)\
776 do {\
777 	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
778 					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
779 	bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
780 } while (0)
781 
782 #define bna_port_rx_stop(port)\
783 do {\
784 	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
785 					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
786 	bfa_wc_up(&(port)->chld_stop_wc);\
787 	bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
788 } while (0)
789 
790 #define call_port_stop_cbfn(port, status)\
791 do {\
792 	if ((port)->stop_cbfn)\
793 		(port)->stop_cbfn((port)->stop_cbarg, status);\
794 	(port)->stop_cbfn = NULL;\
795 	(port)->stop_cbarg = NULL;\
796 } while (0)
797 
798 #define call_port_pause_cbfn(port, status)\
799 do {\
800 	if ((port)->pause_cbfn)\
801 		(port)->pause_cbfn((port)->bna->bnad, status);\
802 	(port)->pause_cbfn = NULL;\
803 } while (0)
804 
805 #define call_port_mtu_cbfn(port, status)\
806 do {\
807 	if ((port)->mtu_cbfn)\
808 		(port)->mtu_cbfn((port)->bna->bnad, status);\
809 	(port)->mtu_cbfn = NULL;\
810 } while (0)
811 
812 static void bna_fw_pause_set(struct bna_port *port);
813 static void bna_fw_cb_pause_set(void *arg, int status);
814 static void bna_fw_mtu_set(struct bna_port *port);
815 static void bna_fw_cb_mtu_set(void *arg, int status);
816 
817 enum bna_port_event {
818 	PORT_E_START			= 1,
819 	PORT_E_STOP			= 2,
820 	PORT_E_FAIL			= 3,
821 	PORT_E_PAUSE_CFG		= 4,
822 	PORT_E_MTU_CFG			= 5,
823 	PORT_E_CHLD_STOPPED		= 6,
824 	PORT_E_FWRESP_PAUSE		= 7,
825 	PORT_E_FWRESP_MTU		= 8
826 };
827 
828 enum bna_port_state {
829 	BNA_PORT_STOPPED		= 1,
830 	BNA_PORT_MTU_INIT_WAIT		= 2,
831 	BNA_PORT_PAUSE_INIT_WAIT	= 3,
832 	BNA_PORT_LAST_RESP_WAIT		= 4,
833 	BNA_PORT_STARTED		= 5,
834 	BNA_PORT_PAUSE_CFG_WAIT		= 6,
835 	BNA_PORT_RX_STOP_WAIT		= 7,
836 	BNA_PORT_MTU_CFG_WAIT 		= 8,
837 	BNA_PORT_CHLD_STOP_WAIT		= 9
838 };
839 
840 bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
841 			enum bna_port_event);
842 bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
843 			enum bna_port_event);
844 bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
845 			enum bna_port_event);
846 bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
847 			enum bna_port_event);
848 bfa_fsm_state_decl(bna_port, started, struct bna_port,
849 			enum bna_port_event);
850 bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
851 			enum bna_port_event);
852 bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
853 			enum bna_port_event);
854 bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
855 			enum bna_port_event);
856 bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
857 			enum bna_port_event);
858 
859 static struct bfa_sm_table port_sm_table[] = {
860 	{BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
861 	{BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
862 	{BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
863 	{BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
864 	{BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
865 	{BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
866 	{BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
867 	{BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
868 	{BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
869 };
870 
871 static void
bna_port_sm_stopped_entry(struct bna_port * port)872 bna_port_sm_stopped_entry(struct bna_port *port)
873 {
874 	call_port_pause_cbfn(port, BNA_CB_SUCCESS);
875 	call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
876 	call_port_stop_cbfn(port, BNA_CB_SUCCESS);
877 }
878 
879 static void
bna_port_sm_stopped(struct bna_port * port,enum bna_port_event event)880 bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
881 {
882 	switch (event) {
883 	case PORT_E_START:
884 		bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
885 		break;
886 
887 	case PORT_E_STOP:
888 		call_port_stop_cbfn(port, BNA_CB_SUCCESS);
889 		break;
890 
891 	case PORT_E_FAIL:
892 		/* No-op */
893 		break;
894 
895 	case PORT_E_PAUSE_CFG:
896 		call_port_pause_cbfn(port, BNA_CB_SUCCESS);
897 		break;
898 
899 	case PORT_E_MTU_CFG:
900 		call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
901 		break;
902 
903 	case PORT_E_CHLD_STOPPED:
904 		/**
905 		 * This event is received due to LLPort, Tx and Rx objects
906 		 * failing
907 		 */
908 		/* No-op */
909 		break;
910 
911 	case PORT_E_FWRESP_PAUSE:
912 	case PORT_E_FWRESP_MTU:
913 		/**
914 		 * These events are received due to flushing of mbox when
915 		 * device fails
916 		 */
917 		/* No-op */
918 		break;
919 
920 	default:
921 		bfa_sm_fault(port->bna, event);
922 	}
923 }
924 
925 static void
bna_port_sm_mtu_init_wait_entry(struct bna_port * port)926 bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
927 {
928 	bna_fw_mtu_set(port);
929 }
930 
931 static void
bna_port_sm_mtu_init_wait(struct bna_port * port,enum bna_port_event event)932 bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
933 {
934 	switch (event) {
935 	case PORT_E_STOP:
936 		bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
937 		break;
938 
939 	case PORT_E_FAIL:
940 		bfa_fsm_set_state(port, bna_port_sm_stopped);
941 		break;
942 
943 	case PORT_E_PAUSE_CFG:
944 		/* No-op */
945 		break;
946 
947 	case PORT_E_MTU_CFG:
948 		port->flags |= BNA_PORT_F_MTU_CHANGED;
949 		break;
950 
951 	case PORT_E_FWRESP_MTU:
952 		if (port->flags & BNA_PORT_F_MTU_CHANGED) {
953 			port->flags &= ~BNA_PORT_F_MTU_CHANGED;
954 			bna_fw_mtu_set(port);
955 		} else {
956 			bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
957 		}
958 		break;
959 
960 	default:
961 		bfa_sm_fault(port->bna, event);
962 	}
963 }
964 
965 static void
bna_port_sm_pause_init_wait_entry(struct bna_port * port)966 bna_port_sm_pause_init_wait_entry(struct bna_port *port)
967 {
968 	bna_fw_pause_set(port);
969 }
970 
971 static void
bna_port_sm_pause_init_wait(struct bna_port * port,enum bna_port_event event)972 bna_port_sm_pause_init_wait(struct bna_port *port,
973 				enum bna_port_event event)
974 {
975 	switch (event) {
976 	case PORT_E_STOP:
977 		bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
978 		break;
979 
980 	case PORT_E_FAIL:
981 		bfa_fsm_set_state(port, bna_port_sm_stopped);
982 		break;
983 
984 	case PORT_E_PAUSE_CFG:
985 		port->flags |= BNA_PORT_F_PAUSE_CHANGED;
986 		break;
987 
988 	case PORT_E_MTU_CFG:
989 		port->flags |= BNA_PORT_F_MTU_CHANGED;
990 		break;
991 
992 	case PORT_E_FWRESP_PAUSE:
993 		if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
994 			port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
995 			bna_fw_pause_set(port);
996 		} else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
997 			port->flags &= ~BNA_PORT_F_MTU_CHANGED;
998 			bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
999 		} else {
1000 			bfa_fsm_set_state(port, bna_port_sm_started);
1001 			bna_port_chld_start(port);
1002 		}
1003 		break;
1004 
1005 	default:
1006 		bfa_sm_fault(port->bna, event);
1007 	}
1008 }
1009 
1010 static void
bna_port_sm_last_resp_wait_entry(struct bna_port * port)1011 bna_port_sm_last_resp_wait_entry(struct bna_port *port)
1012 {
1013 }
1014 
1015 static void
bna_port_sm_last_resp_wait(struct bna_port * port,enum bna_port_event event)1016 bna_port_sm_last_resp_wait(struct bna_port *port,
1017 				enum bna_port_event event)
1018 {
1019 	switch (event) {
1020 	case PORT_E_FAIL:
1021 	case PORT_E_FWRESP_PAUSE:
1022 	case PORT_E_FWRESP_MTU:
1023 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1024 		break;
1025 
1026 	default:
1027 		bfa_sm_fault(port->bna, event);
1028 	}
1029 }
1030 
1031 static void
bna_port_sm_started_entry(struct bna_port * port)1032 bna_port_sm_started_entry(struct bna_port *port)
1033 {
1034 	/**
1035 	 * NOTE: Do not call bna_port_chld_start() here, since it will be
1036 	 * inadvertently called during pause_cfg_wait->started transition
1037 	 * as well
1038 	 */
1039 	call_port_pause_cbfn(port, BNA_CB_SUCCESS);
1040 	call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
1041 }
1042 
1043 static void
bna_port_sm_started(struct bna_port * port,enum bna_port_event event)1044 bna_port_sm_started(struct bna_port *port,
1045 			enum bna_port_event event)
1046 {
1047 	switch (event) {
1048 	case PORT_E_STOP:
1049 		bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
1050 		break;
1051 
1052 	case PORT_E_FAIL:
1053 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1054 		bna_port_chld_fail(port);
1055 		break;
1056 
1057 	case PORT_E_PAUSE_CFG:
1058 		bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
1059 		break;
1060 
1061 	case PORT_E_MTU_CFG:
1062 		bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
1063 		break;
1064 
1065 	default:
1066 		bfa_sm_fault(port->bna, event);
1067 	}
1068 }
1069 
1070 static void
bna_port_sm_pause_cfg_wait_entry(struct bna_port * port)1071 bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
1072 {
1073 	bna_fw_pause_set(port);
1074 }
1075 
1076 static void
bna_port_sm_pause_cfg_wait(struct bna_port * port,enum bna_port_event event)1077 bna_port_sm_pause_cfg_wait(struct bna_port *port,
1078 				enum bna_port_event event)
1079 {
1080 	switch (event) {
1081 	case PORT_E_FAIL:
1082 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1083 		bna_port_chld_fail(port);
1084 		break;
1085 
1086 	case PORT_E_FWRESP_PAUSE:
1087 		bfa_fsm_set_state(port, bna_port_sm_started);
1088 		break;
1089 
1090 	default:
1091 		bfa_sm_fault(port->bna, event);
1092 	}
1093 }
1094 
1095 static void
bna_port_sm_rx_stop_wait_entry(struct bna_port * port)1096 bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
1097 {
1098 	bna_port_rx_stop(port);
1099 }
1100 
1101 static void
bna_port_sm_rx_stop_wait(struct bna_port * port,enum bna_port_event event)1102 bna_port_sm_rx_stop_wait(struct bna_port *port,
1103 				enum bna_port_event event)
1104 {
1105 	switch (event) {
1106 	case PORT_E_FAIL:
1107 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1108 		bna_port_chld_fail(port);
1109 		break;
1110 
1111 	case PORT_E_CHLD_STOPPED:
1112 		bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
1113 		break;
1114 
1115 	default:
1116 		bfa_sm_fault(port->bna, event);
1117 	}
1118 }
1119 
1120 static void
bna_port_sm_mtu_cfg_wait_entry(struct bna_port * port)1121 bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
1122 {
1123 	bna_fw_mtu_set(port);
1124 }
1125 
1126 static void
bna_port_sm_mtu_cfg_wait(struct bna_port * port,enum bna_port_event event)1127 bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
1128 {
1129 	switch (event) {
1130 	case PORT_E_FAIL:
1131 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1132 		bna_port_chld_fail(port);
1133 		break;
1134 
1135 	case PORT_E_FWRESP_MTU:
1136 		bfa_fsm_set_state(port, bna_port_sm_started);
1137 		bna_port_rx_start(port);
1138 		break;
1139 
1140 	default:
1141 		bfa_sm_fault(port->bna, event);
1142 	}
1143 }
1144 
1145 static void
bna_port_sm_chld_stop_wait_entry(struct bna_port * port)1146 bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
1147 {
1148 	bna_port_chld_stop(port);
1149 }
1150 
1151 static void
bna_port_sm_chld_stop_wait(struct bna_port * port,enum bna_port_event event)1152 bna_port_sm_chld_stop_wait(struct bna_port *port,
1153 				enum bna_port_event event)
1154 {
1155 	switch (event) {
1156 	case PORT_E_FAIL:
1157 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1158 		bna_port_chld_fail(port);
1159 		break;
1160 
1161 	case PORT_E_CHLD_STOPPED:
1162 		bfa_fsm_set_state(port, bna_port_sm_stopped);
1163 		break;
1164 
1165 	default:
1166 		bfa_sm_fault(port->bna, event);
1167 	}
1168 }
1169 
1170 static void
bna_fw_pause_set(struct bna_port * port)1171 bna_fw_pause_set(struct bna_port *port)
1172 {
1173 	struct bfi_ll_set_pause_req ll_req;
1174 
1175 	memset(&ll_req, 0, sizeof(ll_req));
1176 	ll_req.mh.msg_class = BFI_MC_LL;
1177 	ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
1178 	ll_req.mh.mtag.h2i.lpu_id = 0;
1179 
1180 	ll_req.tx_pause = port->pause_config.tx_pause;
1181 	ll_req.rx_pause = port->pause_config.rx_pause;
1182 
1183 	bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1184 			bna_fw_cb_pause_set, port);
1185 
1186 	bna_mbox_send(port->bna, &port->mbox_qe);
1187 }
1188 
1189 static void
bna_fw_cb_pause_set(void * arg,int status)1190 bna_fw_cb_pause_set(void *arg, int status)
1191 {
1192 	struct bna_port *port = (struct bna_port *)arg;
1193 
1194 	bfa_q_qe_init(&port->mbox_qe.qe);
1195 	bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
1196 }
1197 
1198 void
bna_fw_mtu_set(struct bna_port * port)1199 bna_fw_mtu_set(struct bna_port *port)
1200 {
1201 	struct bfi_ll_mtu_info_req ll_req;
1202 
1203 	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
1204 	ll_req.mtu = htons((u16)port->mtu);
1205 
1206 	bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1207 				bna_fw_cb_mtu_set, port);
1208 	bna_mbox_send(port->bna, &port->mbox_qe);
1209 }
1210 
1211 void
bna_fw_cb_mtu_set(void * arg,int status)1212 bna_fw_cb_mtu_set(void *arg, int status)
1213 {
1214 	struct bna_port *port = (struct bna_port *)arg;
1215 
1216 	bfa_q_qe_init(&port->mbox_qe.qe);
1217 	bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
1218 }
1219 
1220 static void
bna_port_cb_chld_stopped(void * arg)1221 bna_port_cb_chld_stopped(void *arg)
1222 {
1223 	struct bna_port *port = (struct bna_port *)arg;
1224 
1225 	bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1226 }
1227 
1228 static void
bna_port_init(struct bna_port * port,struct bna * bna)1229 bna_port_init(struct bna_port *port, struct bna *bna)
1230 {
1231 	port->bna = bna;
1232 	port->flags = 0;
1233 	port->mtu = 0;
1234 	port->type = BNA_PORT_T_REGULAR;
1235 
1236 	port->link_cbfn = bnad_cb_port_link_status;
1237 
1238 	port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
1239 	port->chld_stop_wc.wc_cbarg = port;
1240 	port->chld_stop_wc.wc_count = 0;
1241 
1242 	port->stop_cbfn = NULL;
1243 	port->stop_cbarg = NULL;
1244 
1245 	port->pause_cbfn = NULL;
1246 
1247 	port->mtu_cbfn = NULL;
1248 
1249 	bfa_q_qe_init(&port->mbox_qe.qe);
1250 
1251 	bfa_fsm_set_state(port, bna_port_sm_stopped);
1252 
1253 	bna_llport_init(&port->llport, bna);
1254 }
1255 
1256 static void
bna_port_uninit(struct bna_port * port)1257 bna_port_uninit(struct bna_port *port)
1258 {
1259 	bna_llport_uninit(&port->llport);
1260 
1261 	port->flags = 0;
1262 
1263 	port->bna = NULL;
1264 }
1265 
1266 static int
bna_port_state_get(struct bna_port * port)1267 bna_port_state_get(struct bna_port *port)
1268 {
1269 	return bfa_sm_to_state(port_sm_table, port->fsm);
1270 }
1271 
1272 static void
bna_port_start(struct bna_port * port)1273 bna_port_start(struct bna_port *port)
1274 {
1275 	port->flags |= BNA_PORT_F_DEVICE_READY;
1276 	if (port->flags & BNA_PORT_F_ENABLED)
1277 		bfa_fsm_send_event(port, PORT_E_START);
1278 }
1279 
1280 static void
bna_port_stop(struct bna_port * port)1281 bna_port_stop(struct bna_port *port)
1282 {
1283 	port->stop_cbfn = bna_device_cb_port_stopped;
1284 	port->stop_cbarg = &port->bna->device;
1285 
1286 	port->flags &= ~BNA_PORT_F_DEVICE_READY;
1287 	bfa_fsm_send_event(port, PORT_E_STOP);
1288 }
1289 
1290 static void
bna_port_fail(struct bna_port * port)1291 bna_port_fail(struct bna_port *port)
1292 {
1293 	port->flags &= ~BNA_PORT_F_DEVICE_READY;
1294 	bfa_fsm_send_event(port, PORT_E_FAIL);
1295 }
1296 
1297 void
bna_port_cb_tx_stopped(struct bna_port * port,enum bna_cb_status status)1298 bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
1299 {
1300 	bfa_wc_down(&port->chld_stop_wc);
1301 }
1302 
1303 void
bna_port_cb_rx_stopped(struct bna_port * port,enum bna_cb_status status)1304 bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1305 {
1306 	bfa_wc_down(&port->chld_stop_wc);
1307 }
1308 
1309 int
bna_port_mtu_get(struct bna_port * port)1310 bna_port_mtu_get(struct bna_port *port)
1311 {
1312 	return port->mtu;
1313 }
1314 
1315 void
bna_port_enable(struct bna_port * port)1316 bna_port_enable(struct bna_port *port)
1317 {
1318 	if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
1319 		return;
1320 
1321 	port->flags |= BNA_PORT_F_ENABLED;
1322 
1323 	if (port->flags & BNA_PORT_F_DEVICE_READY)
1324 		bfa_fsm_send_event(port, PORT_E_START);
1325 }
1326 
1327 void
bna_port_disable(struct bna_port * port,enum bna_cleanup_type type,void (* cbfn)(void *,enum bna_cb_status))1328 bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
1329 		 void (*cbfn)(void *, enum bna_cb_status))
1330 {
1331 	if (type == BNA_SOFT_CLEANUP) {
1332 		(*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
1333 		return;
1334 	}
1335 
1336 	port->stop_cbfn = cbfn;
1337 	port->stop_cbarg = port->bna->bnad;
1338 
1339 	port->flags &= ~BNA_PORT_F_ENABLED;
1340 
1341 	bfa_fsm_send_event(port, PORT_E_STOP);
1342 }
1343 
1344 void
bna_port_pause_config(struct bna_port * port,struct bna_pause_config * pause_config,void (* cbfn)(struct bnad *,enum bna_cb_status))1345 bna_port_pause_config(struct bna_port *port,
1346 		      struct bna_pause_config *pause_config,
1347 		      void (*cbfn)(struct bnad *, enum bna_cb_status))
1348 {
1349 	port->pause_config = *pause_config;
1350 
1351 	port->pause_cbfn = cbfn;
1352 
1353 	bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
1354 }
1355 
1356 void
bna_port_mtu_set(struct bna_port * port,int mtu,void (* cbfn)(struct bnad *,enum bna_cb_status))1357 bna_port_mtu_set(struct bna_port *port, int mtu,
1358 		 void (*cbfn)(struct bnad *, enum bna_cb_status))
1359 {
1360 	port->mtu = mtu;
1361 
1362 	port->mtu_cbfn = cbfn;
1363 
1364 	bfa_fsm_send_event(port, PORT_E_MTU_CFG);
1365 }
1366 
1367 void
bna_port_mac_get(struct bna_port * port,mac_t * mac)1368 bna_port_mac_get(struct bna_port *port, mac_t *mac)
1369 {
1370 	*mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
1371 }
1372 
1373 /**
1374  * DEVICE
1375  */
1376 #define enable_mbox_intr(_device)\
1377 do {\
1378 	u32 intr_status;\
1379 	bna_intr_status_get((_device)->bna, intr_status);\
1380 	bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1381 	bna_mbox_intr_enable((_device)->bna);\
1382 } while (0)
1383 
1384 #define disable_mbox_intr(_device)\
1385 do {\
1386 	bna_mbox_intr_disable((_device)->bna);\
1387 	bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1388 } while (0)
1389 
1390 static const struct bna_chip_regs_offset reg_offset[] =
1391 {{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1392 	HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1393 {HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
1394 	HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
1395 {HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
1396 	HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
1397 {HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
1398 	HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
1399 };
1400 
1401 enum bna_device_event {
1402 	DEVICE_E_ENABLE			= 1,
1403 	DEVICE_E_DISABLE		= 2,
1404 	DEVICE_E_IOC_READY		= 3,
1405 	DEVICE_E_IOC_FAILED		= 4,
1406 	DEVICE_E_IOC_DISABLED		= 5,
1407 	DEVICE_E_IOC_RESET		= 6,
1408 	DEVICE_E_PORT_STOPPED		= 7,
1409 };
1410 
1411 enum bna_device_state {
1412 	BNA_DEVICE_STOPPED		= 1,
1413 	BNA_DEVICE_IOC_READY_WAIT 	= 2,
1414 	BNA_DEVICE_READY		= 3,
1415 	BNA_DEVICE_PORT_STOP_WAIT 	= 4,
1416 	BNA_DEVICE_IOC_DISABLE_WAIT 	= 5,
1417 	BNA_DEVICE_FAILED		= 6
1418 };
1419 
1420 bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
1421 			enum bna_device_event);
1422 bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
1423 			enum bna_device_event);
1424 bfa_fsm_state_decl(bna_device, ready, struct bna_device,
1425 			enum bna_device_event);
1426 bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
1427 			enum bna_device_event);
1428 bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
1429 			enum bna_device_event);
1430 bfa_fsm_state_decl(bna_device, failed, struct bna_device,
1431 			enum bna_device_event);
1432 
1433 static struct bfa_sm_table device_sm_table[] = {
1434 	{BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
1435 	{BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
1436 	{BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
1437 	{BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
1438 	{BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
1439 	{BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
1440 };
1441 
1442 static void
bna_device_sm_stopped_entry(struct bna_device * device)1443 bna_device_sm_stopped_entry(struct bna_device *device)
1444 {
1445 	if (device->stop_cbfn)
1446 		device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
1447 
1448 	device->stop_cbfn = NULL;
1449 	device->stop_cbarg = NULL;
1450 }
1451 
1452 static void
bna_device_sm_stopped(struct bna_device * device,enum bna_device_event event)1453 bna_device_sm_stopped(struct bna_device *device,
1454 			enum bna_device_event event)
1455 {
1456 	switch (event) {
1457 	case DEVICE_E_ENABLE:
1458 		if (device->intr_type == BNA_INTR_T_MSIX)
1459 			bna_mbox_msix_idx_set(device);
1460 		bfa_nw_ioc_enable(&device->ioc);
1461 		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1462 		break;
1463 
1464 	case DEVICE_E_DISABLE:
1465 		bfa_fsm_set_state(device, bna_device_sm_stopped);
1466 		break;
1467 
1468 	case DEVICE_E_IOC_RESET:
1469 		enable_mbox_intr(device);
1470 		break;
1471 
1472 	case DEVICE_E_IOC_FAILED:
1473 		bfa_fsm_set_state(device, bna_device_sm_failed);
1474 		break;
1475 
1476 	default:
1477 		bfa_sm_fault(device->bna, event);
1478 	}
1479 }
1480 
1481 static void
bna_device_sm_ioc_ready_wait_entry(struct bna_device * device)1482 bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
1483 {
1484 	/**
1485 	 * Do not call bfa_ioc_enable() here. It must be called in the
1486 	 * previous state due to failed -> ioc_ready_wait transition.
1487 	 */
1488 }
1489 
1490 static void
bna_device_sm_ioc_ready_wait(struct bna_device * device,enum bna_device_event event)1491 bna_device_sm_ioc_ready_wait(struct bna_device *device,
1492 				enum bna_device_event event)
1493 {
1494 	switch (event) {
1495 	case DEVICE_E_DISABLE:
1496 		if (device->ready_cbfn)
1497 			device->ready_cbfn(device->ready_cbarg,
1498 						BNA_CB_INTERRUPT);
1499 		device->ready_cbfn = NULL;
1500 		device->ready_cbarg = NULL;
1501 		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1502 		break;
1503 
1504 	case DEVICE_E_IOC_READY:
1505 		bfa_fsm_set_state(device, bna_device_sm_ready);
1506 		break;
1507 
1508 	case DEVICE_E_IOC_FAILED:
1509 		bfa_fsm_set_state(device, bna_device_sm_failed);
1510 		break;
1511 
1512 	case DEVICE_E_IOC_RESET:
1513 		enable_mbox_intr(device);
1514 		break;
1515 
1516 	default:
1517 		bfa_sm_fault(device->bna, event);
1518 	}
1519 }
1520 
1521 static void
bna_device_sm_ready_entry(struct bna_device * device)1522 bna_device_sm_ready_entry(struct bna_device *device)
1523 {
1524 	bna_mbox_mod_start(&device->bna->mbox_mod);
1525 	bna_port_start(&device->bna->port);
1526 
1527 	if (device->ready_cbfn)
1528 		device->ready_cbfn(device->ready_cbarg,
1529 					BNA_CB_SUCCESS);
1530 	device->ready_cbfn = NULL;
1531 	device->ready_cbarg = NULL;
1532 }
1533 
1534 static void
bna_device_sm_ready(struct bna_device * device,enum bna_device_event event)1535 bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
1536 {
1537 	switch (event) {
1538 	case DEVICE_E_DISABLE:
1539 		bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
1540 		break;
1541 
1542 	case DEVICE_E_IOC_FAILED:
1543 		bfa_fsm_set_state(device, bna_device_sm_failed);
1544 		break;
1545 
1546 	default:
1547 		bfa_sm_fault(device->bna, event);
1548 	}
1549 }
1550 
1551 static void
bna_device_sm_port_stop_wait_entry(struct bna_device * device)1552 bna_device_sm_port_stop_wait_entry(struct bna_device *device)
1553 {
1554 	bna_port_stop(&device->bna->port);
1555 }
1556 
1557 static void
bna_device_sm_port_stop_wait(struct bna_device * device,enum bna_device_event event)1558 bna_device_sm_port_stop_wait(struct bna_device *device,
1559 				enum bna_device_event event)
1560 {
1561 	switch (event) {
1562 	case DEVICE_E_PORT_STOPPED:
1563 		bna_mbox_mod_stop(&device->bna->mbox_mod);
1564 		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1565 		break;
1566 
1567 	case DEVICE_E_IOC_FAILED:
1568 		disable_mbox_intr(device);
1569 		bna_port_fail(&device->bna->port);
1570 		break;
1571 
1572 	default:
1573 		bfa_sm_fault(device->bna, event);
1574 	}
1575 }
1576 
1577 static void
bna_device_sm_ioc_disable_wait_entry(struct bna_device * device)1578 bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
1579 {
1580 	bfa_nw_ioc_disable(&device->ioc);
1581 }
1582 
1583 static void
bna_device_sm_ioc_disable_wait(struct bna_device * device,enum bna_device_event event)1584 bna_device_sm_ioc_disable_wait(struct bna_device *device,
1585 				enum bna_device_event event)
1586 {
1587 	switch (event) {
1588 	case DEVICE_E_IOC_DISABLED:
1589 		disable_mbox_intr(device);
1590 		bfa_fsm_set_state(device, bna_device_sm_stopped);
1591 		break;
1592 
1593 	default:
1594 		bfa_sm_fault(device->bna, event);
1595 	}
1596 }
1597 
1598 static void
bna_device_sm_failed_entry(struct bna_device * device)1599 bna_device_sm_failed_entry(struct bna_device *device)
1600 {
1601 	disable_mbox_intr(device);
1602 	bna_port_fail(&device->bna->port);
1603 	bna_mbox_mod_stop(&device->bna->mbox_mod);
1604 
1605 	if (device->ready_cbfn)
1606 		device->ready_cbfn(device->ready_cbarg,
1607 					BNA_CB_FAIL);
1608 	device->ready_cbfn = NULL;
1609 	device->ready_cbarg = NULL;
1610 }
1611 
1612 static void
bna_device_sm_failed(struct bna_device * device,enum bna_device_event event)1613 bna_device_sm_failed(struct bna_device *device,
1614 			enum bna_device_event event)
1615 {
1616 	switch (event) {
1617 	case DEVICE_E_DISABLE:
1618 		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1619 		break;
1620 
1621 	case DEVICE_E_IOC_RESET:
1622 		enable_mbox_intr(device);
1623 		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1624 		break;
1625 
1626 	default:
1627 		bfa_sm_fault(device->bna, event);
1628 	}
1629 }
1630 
1631 /* IOC callback functions */
1632 
1633 static void
bna_device_cb_iocll_ready(void * dev,enum bfa_status error)1634 bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
1635 {
1636 	struct bna_device *device = (struct bna_device *)dev;
1637 
1638 	if (error)
1639 		bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1640 	else
1641 		bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
1642 }
1643 
1644 static void
bna_device_cb_iocll_disabled(void * dev)1645 bna_device_cb_iocll_disabled(void *dev)
1646 {
1647 	struct bna_device *device = (struct bna_device *)dev;
1648 
1649 	bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
1650 }
1651 
1652 static void
bna_device_cb_iocll_failed(void * dev)1653 bna_device_cb_iocll_failed(void *dev)
1654 {
1655 	struct bna_device *device = (struct bna_device *)dev;
1656 
1657 	bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1658 }
1659 
1660 static void
bna_device_cb_iocll_reset(void * dev)1661 bna_device_cb_iocll_reset(void *dev)
1662 {
1663 	struct bna_device *device = (struct bna_device *)dev;
1664 
1665 	bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
1666 }
1667 
1668 static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1669 	bna_device_cb_iocll_ready,
1670 	bna_device_cb_iocll_disabled,
1671 	bna_device_cb_iocll_failed,
1672 	bna_device_cb_iocll_reset
1673 };
1674 
1675 /* device */
1676 static void
bna_adv_device_init(struct bna_device * device,struct bna * bna,struct bna_res_info * res_info)1677 bna_adv_device_init(struct bna_device *device, struct bna *bna,
1678 		struct bna_res_info *res_info)
1679 {
1680 	u8 *kva;
1681 	u64 dma;
1682 
1683 	device->bna = bna;
1684 
1685 	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1686 
1687 	/**
1688 	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1689 	 * DMA memory.
1690 	 */
1691 	BNA_GET_DMA_ADDR(
1692 		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1693 	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1694 
1695 	bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1696 	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1697 	kva += bfa_nw_cee_meminfo();
1698 	dma += bfa_nw_cee_meminfo();
1699 
1700 }
1701 
1702 static void
bna_device_init(struct bna_device * device,struct bna * bna,struct bna_res_info * res_info)1703 bna_device_init(struct bna_device *device, struct bna *bna,
1704 		struct bna_res_info *res_info)
1705 {
1706 	u64 dma;
1707 
1708 	device->bna = bna;
1709 
1710 	/**
1711 	 * Attach IOC and claim:
1712 	 *	1. DMA memory for IOC attributes
1713 	 *	2. Kernel memory for FW trace
1714 	 */
1715 	bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
1716 	bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
1717 
1718 	BNA_GET_DMA_ADDR(
1719 		&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1720 	bfa_nw_ioc_mem_claim(&device->ioc,
1721 		res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
1722 			  dma);
1723 
1724 	bna_adv_device_init(device, bna, res_info);
1725 	/*
1726 	 * Initialize mbox_mod only after IOC, so that mbox handler
1727 	 * registration goes through
1728 	 */
1729 	device->intr_type =
1730 		res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
1731 	device->vector =
1732 		res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
1733 	bna_mbox_mod_init(&bna->mbox_mod, bna);
1734 
1735 	device->ready_cbfn = device->stop_cbfn = NULL;
1736 	device->ready_cbarg = device->stop_cbarg = NULL;
1737 
1738 	bfa_fsm_set_state(device, bna_device_sm_stopped);
1739 }
1740 
1741 static void
bna_device_uninit(struct bna_device * device)1742 bna_device_uninit(struct bna_device *device)
1743 {
1744 	bna_mbox_mod_uninit(&device->bna->mbox_mod);
1745 
1746 	bfa_nw_ioc_detach(&device->ioc);
1747 
1748 	device->bna = NULL;
1749 }
1750 
1751 static void
bna_device_cb_port_stopped(void * arg,enum bna_cb_status status)1752 bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1753 {
1754 	struct bna_device *device = (struct bna_device *)arg;
1755 
1756 	bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1757 }
1758 
1759 static int
bna_device_status_get(struct bna_device * device)1760 bna_device_status_get(struct bna_device *device)
1761 {
1762 	return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
1763 }
1764 
1765 void
bna_device_enable(struct bna_device * device)1766 bna_device_enable(struct bna_device *device)
1767 {
1768 	if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
1769 		bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
1770 		return;
1771 	}
1772 
1773 	device->ready_cbfn = bnad_cb_device_enabled;
1774 	device->ready_cbarg = device->bna->bnad;
1775 
1776 	bfa_fsm_send_event(device, DEVICE_E_ENABLE);
1777 }
1778 
1779 void
bna_device_disable(struct bna_device * device,enum bna_cleanup_type type)1780 bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1781 {
1782 	if (type == BNA_SOFT_CLEANUP) {
1783 		bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
1784 		return;
1785 	}
1786 
1787 	device->stop_cbfn = bnad_cb_device_disabled;
1788 	device->stop_cbarg = device->bna->bnad;
1789 
1790 	bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1791 }
1792 
1793 static int
bna_device_state_get(struct bna_device * device)1794 bna_device_state_get(struct bna_device *device)
1795 {
1796 	return bfa_sm_to_state(device_sm_table, device->fsm);
1797 }
1798 
1799 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1800 	{12, 12},
1801 	{6, 10},
1802 	{5, 10},
1803 	{4, 8},
1804 	{3, 6},
1805 	{3, 6},
1806 	{2, 4},
1807 	{1, 2},
1808 };
1809 
1810 /* utils */
1811 
1812 static void
bna_adv_res_req(struct bna_res_info * res_info)1813 bna_adv_res_req(struct bna_res_info *res_info)
1814 {
1815 	/* DMA memory for COMMON_MODULE */
1816 	res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1817 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1818 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1819 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1820 				bfa_nw_cee_meminfo(), PAGE_SIZE);
1821 
1822 	/* Virtual memory for retreiving fw_trc */
1823 	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1824 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1825 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1826 	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1827 
1828 	/* DMA memory for retreiving stats */
1829 	res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1830 	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1831 	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1832 	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1833 				ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
1834 
1835 	/* Virtual memory for soft stats */
1836 	res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
1837 	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1838 	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
1839 	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
1840 				sizeof(struct bna_sw_stats);
1841 }
1842 
1843 static void
bna_sw_stats_get(struct bna * bna,struct bna_sw_stats * sw_stats)1844 bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
1845 {
1846 	struct bna_tx *tx;
1847 	struct bna_txq *txq;
1848 	struct bna_rx *rx;
1849 	struct bna_rxp *rxp;
1850 	struct list_head *qe;
1851 	struct list_head *txq_qe;
1852 	struct list_head *rxp_qe;
1853 	struct list_head *mac_qe;
1854 	int i;
1855 
1856 	sw_stats->device_state = bna_device_state_get(&bna->device);
1857 	sw_stats->port_state = bna_port_state_get(&bna->port);
1858 	sw_stats->port_flags = bna->port.flags;
1859 	sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
1860 	sw_stats->priority = bna->port.priority;
1861 
1862 	i = 0;
1863 	list_for_each(qe, &bna->tx_mod.tx_active_q) {
1864 		tx = (struct bna_tx *)qe;
1865 		sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
1866 		sw_stats->tx_stats[i].tx_flags = tx->flags;
1867 
1868 		sw_stats->tx_stats[i].num_txqs = 0;
1869 		sw_stats->tx_stats[i].txq_bmap[0] = 0;
1870 		sw_stats->tx_stats[i].txq_bmap[1] = 0;
1871 		list_for_each(txq_qe, &tx->txq_q) {
1872 			txq = (struct bna_txq *)txq_qe;
1873 			if (txq->txq_id < 32)
1874 				sw_stats->tx_stats[i].txq_bmap[0] |=
1875 						((u32)1 << txq->txq_id);
1876 			else
1877 				sw_stats->tx_stats[i].txq_bmap[1] |=
1878 						((u32)
1879 						 1 << (txq->txq_id - 32));
1880 			sw_stats->tx_stats[i].num_txqs++;
1881 		}
1882 
1883 		sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
1884 
1885 		i++;
1886 	}
1887 	sw_stats->num_active_tx = i;
1888 
1889 	i = 0;
1890 	list_for_each(qe, &bna->rx_mod.rx_active_q) {
1891 		rx = (struct bna_rx *)qe;
1892 		sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
1893 		sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
1894 
1895 		sw_stats->rx_stats[i].num_rxps = 0;
1896 		sw_stats->rx_stats[i].num_rxqs = 0;
1897 		sw_stats->rx_stats[i].rxq_bmap[0] = 0;
1898 		sw_stats->rx_stats[i].rxq_bmap[1] = 0;
1899 		sw_stats->rx_stats[i].cq_bmap[0] = 0;
1900 		sw_stats->rx_stats[i].cq_bmap[1] = 0;
1901 		list_for_each(rxp_qe, &rx->rxp_q) {
1902 			rxp = (struct bna_rxp *)rxp_qe;
1903 
1904 			sw_stats->rx_stats[i].num_rxqs += 1;
1905 
1906 			if (rxp->type == BNA_RXP_SINGLE) {
1907 				if (rxp->rxq.single.only->rxq_id < 32) {
1908 					sw_stats->rx_stats[i].rxq_bmap[0] |=
1909 					((u32)1 <<
1910 					rxp->rxq.single.only->rxq_id);
1911 				} else {
1912 					sw_stats->rx_stats[i].rxq_bmap[1] |=
1913 					((u32)1 <<
1914 					(rxp->rxq.single.only->rxq_id - 32));
1915 				}
1916 			} else {
1917 				if (rxp->rxq.slr.large->rxq_id < 32) {
1918 					sw_stats->rx_stats[i].rxq_bmap[0] |=
1919 					((u32)1 <<
1920 					rxp->rxq.slr.large->rxq_id);
1921 				} else {
1922 					sw_stats->rx_stats[i].rxq_bmap[1] |=
1923 					((u32)1 <<
1924 					(rxp->rxq.slr.large->rxq_id - 32));
1925 				}
1926 
1927 				if (rxp->rxq.slr.small->rxq_id < 32) {
1928 					sw_stats->rx_stats[i].rxq_bmap[0] |=
1929 					((u32)1 <<
1930 					rxp->rxq.slr.small->rxq_id);
1931 				} else {
1932 					sw_stats->rx_stats[i].rxq_bmap[1] |=
1933 				((u32)1 <<
1934 				 (rxp->rxq.slr.small->rxq_id - 32));
1935 				}
1936 				sw_stats->rx_stats[i].num_rxqs += 1;
1937 			}
1938 
1939 			if (rxp->cq.cq_id < 32)
1940 				sw_stats->rx_stats[i].cq_bmap[0] |=
1941 					(1 << rxp->cq.cq_id);
1942 			else
1943 				sw_stats->rx_stats[i].cq_bmap[1] |=
1944 					(1 << (rxp->cq.cq_id - 32));
1945 
1946 			sw_stats->rx_stats[i].num_rxps++;
1947 		}
1948 
1949 		sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
1950 		sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
1951 		sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
1952 
1953 		sw_stats->rx_stats[i].num_active_ucast = 0;
1954 		if (rx->rxf.ucast_active_mac)
1955 			sw_stats->rx_stats[i].num_active_ucast++;
1956 		list_for_each(mac_qe, &rx->rxf.ucast_active_q)
1957 			sw_stats->rx_stats[i].num_active_ucast++;
1958 
1959 		sw_stats->rx_stats[i].num_active_mcast = 0;
1960 		list_for_each(mac_qe, &rx->rxf.mcast_active_q)
1961 			sw_stats->rx_stats[i].num_active_mcast++;
1962 
1963 		sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
1964 		sw_stats->rx_stats[i].vlan_filter_status =
1965 						rx->rxf.vlan_filter_status;
1966 		memcpy(sw_stats->rx_stats[i].vlan_filter_table,
1967 				rx->rxf.vlan_filter_table,
1968 				sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
1969 
1970 		sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
1971 		sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
1972 
1973 		i++;
1974 	}
1975 	sw_stats->num_active_rx = i;
1976 }
1977 
1978 static void
bna_fw_cb_stats_get(void * arg,int status)1979 bna_fw_cb_stats_get(void *arg, int status)
1980 {
1981 	struct bna *bna = (struct bna *)arg;
1982 	u64 *p_stats;
1983 	int i, count;
1984 	int rxf_count, txf_count;
1985 	u64 rxf_bmap, txf_bmap;
1986 
1987 	bfa_q_qe_init(&bna->mbox_qe.qe);
1988 
1989 	if (status == 0) {
1990 		p_stats = (u64 *)bna->stats.hw_stats;
1991 		count = sizeof(struct bfi_ll_stats) / sizeof(u64);
1992 		for (i = 0; i < count; i++)
1993 			p_stats[i] = cpu_to_be64(p_stats[i]);
1994 
1995 		rxf_count = 0;
1996 		rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
1997 			((u64)bna->stats.rxf_bmap[1] << 32);
1998 		for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
1999 			if (rxf_bmap & ((u64)1 << i))
2000 				rxf_count++;
2001 
2002 		txf_count = 0;
2003 		txf_bmap = (u64)bna->stats.txf_bmap[0] |
2004 			((u64)bna->stats.txf_bmap[1] << 32);
2005 		for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
2006 			if (txf_bmap & ((u64)1 << i))
2007 				txf_count++;
2008 
2009 		p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
2010 				((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
2011 				txf_count * sizeof(struct bfi_ll_stats_txf))/
2012 				sizeof(u64));
2013 
2014 		/* Populate the TXF stats from the firmware DMAed copy */
2015 		for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
2016 			if (txf_bmap & ((u64)1 << i)) {
2017 				p_stats -= sizeof(struct bfi_ll_stats_txf)/
2018 						sizeof(u64);
2019 				memcpy(&bna->stats.hw_stats->txf_stats[i],
2020 					p_stats,
2021 					sizeof(struct bfi_ll_stats_txf));
2022 			}
2023 
2024 		/* Populate the RXF stats from the firmware DMAed copy */
2025 		for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
2026 			if (rxf_bmap & ((u64)1 << i)) {
2027 				p_stats -= sizeof(struct bfi_ll_stats_rxf)/
2028 						sizeof(u64);
2029 				memcpy(&bna->stats.hw_stats->rxf_stats[i],
2030 					p_stats,
2031 					sizeof(struct bfi_ll_stats_rxf));
2032 			}
2033 
2034 		bna_sw_stats_get(bna, bna->stats.sw_stats);
2035 		bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
2036 	} else
2037 		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2038 }
2039 
2040 static void
bna_fw_stats_get(struct bna * bna)2041 bna_fw_stats_get(struct bna *bna)
2042 {
2043 	struct bfi_ll_stats_req ll_req;
2044 
2045 	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
2046 	ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2047 
2048 	ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
2049 	ll_req.rxf_id_mask[1] =	htonl(bna->rx_mod.rxf_bmap[1]);
2050 	ll_req.txf_id_mask[0] =	htonl(bna->tx_mod.txf_bmap[0]);
2051 	ll_req.txf_id_mask[1] =	htonl(bna->tx_mod.txf_bmap[1]);
2052 
2053 	ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
2054 	ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
2055 
2056 	bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2057 				bna_fw_cb_stats_get, bna);
2058 	bna_mbox_send(bna, &bna->mbox_qe);
2059 
2060 	bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
2061 	bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
2062 	bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
2063 	bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2064 }
2065 
2066 void
bna_stats_get(struct bna * bna)2067 bna_stats_get(struct bna *bna)
2068 {
2069 	if (bna_device_status_get(&bna->device))
2070 		bna_fw_stats_get(bna);
2071 	else
2072 		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2073 }
2074 
2075 /* IB */
2076 static void
bna_ib_coalescing_timeo_set(struct bna_ib * ib,u8 coalescing_timeo)2077 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2078 {
2079 	ib->ib_config.coalescing_timeo = coalescing_timeo;
2080 
2081 	if (ib->start_count)
2082 		ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
2083 				(u32)ib->ib_config.coalescing_timeo, 0);
2084 }
2085 
2086 /* RxF */
2087 void
bna_rxf_adv_init(struct bna_rxf * rxf,struct bna_rx * rx,struct bna_rx_config * q_config)2088 bna_rxf_adv_init(struct bna_rxf *rxf,
2089 		struct bna_rx *rx,
2090 		struct bna_rx_config *q_config)
2091 {
2092 	switch (q_config->rxp_type) {
2093 	case BNA_RXP_SINGLE:
2094 		/* No-op */
2095 		break;
2096 	case BNA_RXP_SLR:
2097 		rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
2098 		break;
2099 	case BNA_RXP_HDS:
2100 		rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
2101 		rxf->hds_cfg.header_size =
2102 				q_config->hds_config.header_size;
2103 		rxf->forced_offset = 0;
2104 		break;
2105 	default:
2106 		break;
2107 	}
2108 
2109 	if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
2110 		rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
2111 		rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
2112 		rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
2113 		memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
2114 			&q_config->rss_config.toeplitz_hash_key[0],
2115 			sizeof(rxf->rss_cfg.toeplitz_hash_key));
2116 	}
2117 }
2118 
2119 static void
rxf_fltr_mbox_cmd(struct bna_rxf * rxf,u8 cmd,enum bna_status status)2120 rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2121 {
2122 	struct bfi_ll_rxf_req req;
2123 
2124 	bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
2125 
2126 	req.rxf_id = rxf->rxf_id;
2127 	req.enable = status;
2128 
2129 	bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
2130 			rxf_cb_cam_fltr_mbox_cmd, rxf);
2131 
2132 	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2133 }
2134 
2135 int
rxf_process_packet_filter_ucast(struct bna_rxf * rxf)2136 rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
2137 {
2138 	struct bna_mac *mac = NULL;
2139 	struct list_head *qe;
2140 
2141 	/* Add additional MAC entries */
2142 	if (!list_empty(&rxf->ucast_pending_add_q)) {
2143 		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
2144 		bfa_q_qe_init(qe);
2145 		mac = (struct bna_mac *)qe;
2146 		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
2147 		list_add_tail(&mac->qe, &rxf->ucast_active_q);
2148 		return 1;
2149 	}
2150 
2151 	/* Delete MAC addresses previousely added */
2152 	if (!list_empty(&rxf->ucast_pending_del_q)) {
2153 		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2154 		bfa_q_qe_init(qe);
2155 		mac = (struct bna_mac *)qe;
2156 		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2157 		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2158 		return 1;
2159 	}
2160 
2161 	return 0;
2162 }
2163 
2164 int
rxf_process_packet_filter_promisc(struct bna_rxf * rxf)2165 rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
2166 {
2167 	struct bna *bna = rxf->rx->bna;
2168 
2169 	/* Enable/disable promiscuous mode */
2170 	if (is_promisc_enable(rxf->rxmode_pending,
2171 				rxf->rxmode_pending_bitmask)) {
2172 		/* move promisc configuration from pending -> active */
2173 		promisc_inactive(rxf->rxmode_pending,
2174 				rxf->rxmode_pending_bitmask);
2175 		rxf->rxmode_active |= BNA_RXMODE_PROMISC;
2176 
2177 		/* Disable VLAN filter to allow all VLANs */
2178 		__rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2179 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2180 				BNA_STATUS_T_ENABLED);
2181 		return 1;
2182 	} else if (is_promisc_disable(rxf->rxmode_pending,
2183 				rxf->rxmode_pending_bitmask)) {
2184 		/* move promisc configuration from pending -> active */
2185 		promisc_inactive(rxf->rxmode_pending,
2186 				rxf->rxmode_pending_bitmask);
2187 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2188 		bna->rxf_promisc_id = BFI_MAX_RXF;
2189 
2190 		/* Revert VLAN filter */
2191 		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2192 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2193 				BNA_STATUS_T_DISABLED);
2194 		return 1;
2195 	}
2196 
2197 	return 0;
2198 }
2199 
2200 int
rxf_process_packet_filter_allmulti(struct bna_rxf * rxf)2201 rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
2202 {
2203 	/* Enable/disable allmulti mode */
2204 	if (is_allmulti_enable(rxf->rxmode_pending,
2205 				rxf->rxmode_pending_bitmask)) {
2206 		/* move allmulti configuration from pending -> active */
2207 		allmulti_inactive(rxf->rxmode_pending,
2208 				rxf->rxmode_pending_bitmask);
2209 		rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
2210 
2211 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2212 				BNA_STATUS_T_ENABLED);
2213 		return 1;
2214 	} else if (is_allmulti_disable(rxf->rxmode_pending,
2215 					rxf->rxmode_pending_bitmask)) {
2216 		/* move allmulti configuration from pending -> active */
2217 		allmulti_inactive(rxf->rxmode_pending,
2218 				rxf->rxmode_pending_bitmask);
2219 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2220 
2221 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2222 				BNA_STATUS_T_DISABLED);
2223 		return 1;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 int
rxf_clear_packet_filter_ucast(struct bna_rxf * rxf)2230 rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
2231 {
2232 	struct bna_mac *mac = NULL;
2233 	struct list_head *qe;
2234 
2235 	/* 1. delete pending ucast entries */
2236 	if (!list_empty(&rxf->ucast_pending_del_q)) {
2237 		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2238 		bfa_q_qe_init(qe);
2239 		mac = (struct bna_mac *)qe;
2240 		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2241 		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2242 		return 1;
2243 	}
2244 
2245 	/* 2. clear active ucast entries; move them to pending_add_q */
2246 	if (!list_empty(&rxf->ucast_active_q)) {
2247 		bfa_q_deq(&rxf->ucast_active_q, &qe);
2248 		bfa_q_qe_init(qe);
2249 		mac = (struct bna_mac *)qe;
2250 		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2251 		list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2252 		return 1;
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 int
rxf_clear_packet_filter_promisc(struct bna_rxf * rxf)2259 rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
2260 {
2261 	struct bna *bna = rxf->rx->bna;
2262 
2263 	/* 6. Execute pending promisc mode disable command */
2264 	if (is_promisc_disable(rxf->rxmode_pending,
2265 				rxf->rxmode_pending_bitmask)) {
2266 		/* move promisc configuration from pending -> active */
2267 		promisc_inactive(rxf->rxmode_pending,
2268 				rxf->rxmode_pending_bitmask);
2269 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2270 		bna->rxf_promisc_id = BFI_MAX_RXF;
2271 
2272 		/* Revert VLAN filter */
2273 		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2274 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2275 				BNA_STATUS_T_DISABLED);
2276 		return 1;
2277 	}
2278 
2279 	/* 7. Clear active promisc mode; move it to pending enable */
2280 	if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2281 		/* move promisc configuration from active -> pending */
2282 		promisc_enable(rxf->rxmode_pending,
2283 				rxf->rxmode_pending_bitmask);
2284 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2285 
2286 		/* Revert VLAN filter */
2287 		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2288 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2289 				BNA_STATUS_T_DISABLED);
2290 		return 1;
2291 	}
2292 
2293 	return 0;
2294 }
2295 
2296 int
rxf_clear_packet_filter_allmulti(struct bna_rxf * rxf)2297 rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
2298 {
2299 	/* 10. Execute pending allmulti mode disable command */
2300 	if (is_allmulti_disable(rxf->rxmode_pending,
2301 				rxf->rxmode_pending_bitmask)) {
2302 		/* move allmulti configuration from pending -> active */
2303 		allmulti_inactive(rxf->rxmode_pending,
2304 				rxf->rxmode_pending_bitmask);
2305 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2306 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2307 				BNA_STATUS_T_DISABLED);
2308 		return 1;
2309 	}
2310 
2311 	/* 11. Clear active allmulti mode; move it to pending enable */
2312 	if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2313 		/* move allmulti configuration from active -> pending */
2314 		allmulti_enable(rxf->rxmode_pending,
2315 				rxf->rxmode_pending_bitmask);
2316 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2317 		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2318 				BNA_STATUS_T_DISABLED);
2319 		return 1;
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 void
rxf_reset_packet_filter_ucast(struct bna_rxf * rxf)2326 rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
2327 {
2328 	struct list_head *qe;
2329 	struct bna_mac *mac;
2330 
2331 	/* 1. Move active ucast entries to pending_add_q */
2332 	while (!list_empty(&rxf->ucast_active_q)) {
2333 		bfa_q_deq(&rxf->ucast_active_q, &qe);
2334 		bfa_q_qe_init(qe);
2335 		list_add_tail(qe, &rxf->ucast_pending_add_q);
2336 	}
2337 
2338 	/* 2. Throw away delete pending ucast entries */
2339 	while (!list_empty(&rxf->ucast_pending_del_q)) {
2340 		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2341 		bfa_q_qe_init(qe);
2342 		mac = (struct bna_mac *)qe;
2343 		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2344 	}
2345 }
2346 
2347 void
rxf_reset_packet_filter_promisc(struct bna_rxf * rxf)2348 rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
2349 {
2350 	struct bna *bna = rxf->rx->bna;
2351 
2352 	/* 6. Clear pending promisc mode disable */
2353 	if (is_promisc_disable(rxf->rxmode_pending,
2354 				rxf->rxmode_pending_bitmask)) {
2355 		promisc_inactive(rxf->rxmode_pending,
2356 				rxf->rxmode_pending_bitmask);
2357 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2358 		bna->rxf_promisc_id = BFI_MAX_RXF;
2359 	}
2360 
2361 	/* 7. Move promisc mode config from active -> pending */
2362 	if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2363 		promisc_enable(rxf->rxmode_pending,
2364 				rxf->rxmode_pending_bitmask);
2365 		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2366 	}
2367 
2368 }
2369 
2370 void
rxf_reset_packet_filter_allmulti(struct bna_rxf * rxf)2371 rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2372 {
2373 	/* 10. Clear pending allmulti mode disable */
2374 	if (is_allmulti_disable(rxf->rxmode_pending,
2375 				rxf->rxmode_pending_bitmask)) {
2376 		allmulti_inactive(rxf->rxmode_pending,
2377 				rxf->rxmode_pending_bitmask);
2378 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2379 	}
2380 
2381 	/* 11. Move allmulti mode config from active -> pending */
2382 	if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2383 		allmulti_enable(rxf->rxmode_pending,
2384 				rxf->rxmode_pending_bitmask);
2385 		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2386 	}
2387 }
2388 
2389 /**
2390  * Should only be called by bna_rxf_mode_set.
2391  * Helps deciding if h/w configuration is needed or not.
2392  *  Returns:
2393  *	0 = no h/w change
2394  *	1 = need h/w change
2395  */
2396 static int
rxf_promisc_enable(struct bna_rxf * rxf)2397 rxf_promisc_enable(struct bna_rxf *rxf)
2398 {
2399 	struct bna *bna = rxf->rx->bna;
2400 	int ret = 0;
2401 
2402 	/* There can not be any pending disable command */
2403 
2404 	/* Do nothing if pending enable or already enabled */
2405 	if (is_promisc_enable(rxf->rxmode_pending,
2406 			rxf->rxmode_pending_bitmask) ||
2407 			(rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
2408 		/* Schedule enable */
2409 	} else {
2410 		/* Promisc mode should not be active in the system */
2411 		promisc_enable(rxf->rxmode_pending,
2412 				rxf->rxmode_pending_bitmask);
2413 		bna->rxf_promisc_id = rxf->rxf_id;
2414 		ret = 1;
2415 	}
2416 
2417 	return ret;
2418 }
2419 
2420 /**
2421  * Should only be called by bna_rxf_mode_set.
2422  * Helps deciding if h/w configuration is needed or not.
2423  *  Returns:
2424  *	0 = no h/w change
2425  *	1 = need h/w change
2426  */
2427 static int
rxf_promisc_disable(struct bna_rxf * rxf)2428 rxf_promisc_disable(struct bna_rxf *rxf)
2429 {
2430 	struct bna *bna = rxf->rx->bna;
2431 	int ret = 0;
2432 
2433 	/* There can not be any pending disable */
2434 
2435 	/* Turn off pending enable command , if any */
2436 	if (is_promisc_enable(rxf->rxmode_pending,
2437 				rxf->rxmode_pending_bitmask)) {
2438 		/* Promisc mode should not be active */
2439 		/* system promisc state should be pending */
2440 		promisc_inactive(rxf->rxmode_pending,
2441 				rxf->rxmode_pending_bitmask);
2442 		/* Remove the promisc state from the system */
2443 		bna->rxf_promisc_id = BFI_MAX_RXF;
2444 
2445 		/* Schedule disable */
2446 	} else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2447 		/* Promisc mode should be active in the system */
2448 		promisc_disable(rxf->rxmode_pending,
2449 				rxf->rxmode_pending_bitmask);
2450 		ret = 1;
2451 
2452 	/* Do nothing if already disabled */
2453 	} else {
2454 	}
2455 
2456 	return ret;
2457 }
2458 
2459 /**
2460  * Should only be called by bna_rxf_mode_set.
2461  * Helps deciding if h/w configuration is needed or not.
2462  *  Returns:
2463  *	0 = no h/w change
2464  *	1 = need h/w change
2465  */
2466 static int
rxf_allmulti_enable(struct bna_rxf * rxf)2467 rxf_allmulti_enable(struct bna_rxf *rxf)
2468 {
2469 	int ret = 0;
2470 
2471 	/* There can not be any pending disable command */
2472 
2473 	/* Do nothing if pending enable or already enabled */
2474 	if (is_allmulti_enable(rxf->rxmode_pending,
2475 			rxf->rxmode_pending_bitmask) ||
2476 			(rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
2477 		/* Schedule enable */
2478 	} else {
2479 		allmulti_enable(rxf->rxmode_pending,
2480 				rxf->rxmode_pending_bitmask);
2481 		ret = 1;
2482 	}
2483 
2484 	return ret;
2485 }
2486 
2487 /**
2488  * Should only be called by bna_rxf_mode_set.
2489  * Helps deciding if h/w configuration is needed or not.
2490  *  Returns:
2491  *	0 = no h/w change
2492  *	1 = need h/w change
2493  */
2494 static int
rxf_allmulti_disable(struct bna_rxf * rxf)2495 rxf_allmulti_disable(struct bna_rxf *rxf)
2496 {
2497 	int ret = 0;
2498 
2499 	/* There can not be any pending disable */
2500 
2501 	/* Turn off pending enable command , if any */
2502 	if (is_allmulti_enable(rxf->rxmode_pending,
2503 				rxf->rxmode_pending_bitmask)) {
2504 		/* Allmulti mode should not be active */
2505 		allmulti_inactive(rxf->rxmode_pending,
2506 				rxf->rxmode_pending_bitmask);
2507 
2508 	/* Schedule disable */
2509 	} else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2510 		allmulti_disable(rxf->rxmode_pending,
2511 				rxf->rxmode_pending_bitmask);
2512 		ret = 1;
2513 	}
2514 
2515 	return ret;
2516 }
2517 
2518 /* RxF <- bnad */
2519 enum bna_cb_status
bna_rx_mode_set(struct bna_rx * rx,enum bna_rxmode new_mode,enum bna_rxmode bitmask,void (* cbfn)(struct bnad *,struct bna_rx *,enum bna_cb_status))2520 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2521 		enum bna_rxmode bitmask,
2522 		void (*cbfn)(struct bnad *, struct bna_rx *,
2523 			     enum bna_cb_status))
2524 {
2525 	struct bna_rxf *rxf = &rx->rxf;
2526 	int need_hw_config = 0;
2527 
2528 	/* Process the commands */
2529 
2530 	if (is_promisc_enable(new_mode, bitmask)) {
2531 		/* If promisc mode is already enabled elsewhere in the system */
2532 		if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
2533 			(rx->bna->rxf_promisc_id != rxf->rxf_id))
2534 			goto err_return;
2535 		if (rxf_promisc_enable(rxf))
2536 			need_hw_config = 1;
2537 	} else if (is_promisc_disable(new_mode, bitmask)) {
2538 		if (rxf_promisc_disable(rxf))
2539 			need_hw_config = 1;
2540 	}
2541 
2542 	if (is_allmulti_enable(new_mode, bitmask)) {
2543 		if (rxf_allmulti_enable(rxf))
2544 			need_hw_config = 1;
2545 	} else if (is_allmulti_disable(new_mode, bitmask)) {
2546 		if (rxf_allmulti_disable(rxf))
2547 			need_hw_config = 1;
2548 	}
2549 
2550 	/* Trigger h/w if needed */
2551 
2552 	if (need_hw_config) {
2553 		rxf->cam_fltr_cbfn = cbfn;
2554 		rxf->cam_fltr_cbarg = rx->bna->bnad;
2555 		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2556 	} else if (cbfn)
2557 		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2558 
2559 	return BNA_CB_SUCCESS;
2560 
2561 err_return:
2562 	return BNA_CB_FAIL;
2563 }
2564 
2565 void
2566 /* RxF <- bnad */
bna_rx_vlanfilter_enable(struct bna_rx * rx)2567 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2568 {
2569 	struct bna_rxf *rxf = &rx->rxf;
2570 
2571 	if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2572 		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
2573 		rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2574 		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2575 	}
2576 }
2577 
2578 /* Rx */
2579 
2580 /* Rx <- bnad */
2581 void
bna_rx_coalescing_timeo_set(struct bna_rx * rx,int coalescing_timeo)2582 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2583 {
2584 	struct bna_rxp *rxp;
2585 	struct list_head *qe;
2586 
2587 	list_for_each(qe, &rx->rxp_q) {
2588 		rxp = (struct bna_rxp *)qe;
2589 		rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2590 		bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
2591 	}
2592 }
2593 
2594 /* Rx <- bnad */
2595 void
bna_rx_dim_reconfig(struct bna * bna,const u32 vector[][BNA_BIAS_T_MAX])2596 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2597 {
2598 	int i, j;
2599 
2600 	for (i = 0; i < BNA_LOAD_T_MAX; i++)
2601 		for (j = 0; j < BNA_BIAS_T_MAX; j++)
2602 			bna->rx_mod.dim_vector[i][j] = vector[i][j];
2603 }
2604 
2605 /* Rx <- bnad */
2606 void
bna_rx_dim_update(struct bna_ccb * ccb)2607 bna_rx_dim_update(struct bna_ccb *ccb)
2608 {
2609 	struct bna *bna = ccb->cq->rx->bna;
2610 	u32 load, bias;
2611 	u32 pkt_rt, small_rt, large_rt;
2612 	u8 coalescing_timeo;
2613 
2614 	if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2615 		(ccb->pkt_rate.large_pkt_cnt == 0))
2616 		return;
2617 
2618 	/* Arrive at preconfigured coalescing timeo value based on pkt rate */
2619 
2620 	small_rt = ccb->pkt_rate.small_pkt_cnt;
2621 	large_rt = ccb->pkt_rate.large_pkt_cnt;
2622 
2623 	pkt_rt = small_rt + large_rt;
2624 
2625 	if (pkt_rt < BNA_PKT_RATE_10K)
2626 		load = BNA_LOAD_T_LOW_4;
2627 	else if (pkt_rt < BNA_PKT_RATE_20K)
2628 		load = BNA_LOAD_T_LOW_3;
2629 	else if (pkt_rt < BNA_PKT_RATE_30K)
2630 		load = BNA_LOAD_T_LOW_2;
2631 	else if (pkt_rt < BNA_PKT_RATE_40K)
2632 		load = BNA_LOAD_T_LOW_1;
2633 	else if (pkt_rt < BNA_PKT_RATE_50K)
2634 		load = BNA_LOAD_T_HIGH_1;
2635 	else if (pkt_rt < BNA_PKT_RATE_60K)
2636 		load = BNA_LOAD_T_HIGH_2;
2637 	else if (pkt_rt < BNA_PKT_RATE_80K)
2638 		load = BNA_LOAD_T_HIGH_3;
2639 	else
2640 		load = BNA_LOAD_T_HIGH_4;
2641 
2642 	if (small_rt > (large_rt << 1))
2643 		bias = 0;
2644 	else
2645 		bias = 1;
2646 
2647 	ccb->pkt_rate.small_pkt_cnt = 0;
2648 	ccb->pkt_rate.large_pkt_cnt = 0;
2649 
2650 	coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2651 	ccb->rx_coalescing_timeo = coalescing_timeo;
2652 
2653 	/* Set it to IB */
2654 	bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
2655 }
2656 
2657 /* Tx */
2658 /* TX <- bnad */
2659 void
bna_tx_coalescing_timeo_set(struct bna_tx * tx,int coalescing_timeo)2660 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
2661 {
2662 	struct bna_txq *txq;
2663 	struct list_head *qe;
2664 
2665 	list_for_each(qe, &tx->txq_q) {
2666 		txq = (struct bna_txq *)qe;
2667 		bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
2668 	}
2669 }
2670 
2671 /*
2672  * Private data
2673  */
2674 
2675 struct bna_ritseg_pool_cfg {
2676 	u32	pool_size;
2677 	u32	pool_entry_size;
2678 };
2679 init_ritseg_pool(ritseg_pool_cfg);
2680 
2681 /*
2682  * Private functions
2683  */
2684 static void
bna_ucam_mod_init(struct bna_ucam_mod * ucam_mod,struct bna * bna,struct bna_res_info * res_info)2685 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
2686 		  struct bna_res_info *res_info)
2687 {
2688 	int i;
2689 
2690 	ucam_mod->ucmac = (struct bna_mac *)
2691 		res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
2692 
2693 	INIT_LIST_HEAD(&ucam_mod->free_q);
2694 	for (i = 0; i < BFI_MAX_UCMAC; i++) {
2695 		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
2696 		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
2697 	}
2698 
2699 	ucam_mod->bna = bna;
2700 }
2701 
2702 static void
bna_ucam_mod_uninit(struct bna_ucam_mod * ucam_mod)2703 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
2704 {
2705 	struct list_head *qe;
2706 	int i = 0;
2707 
2708 	list_for_each(qe, &ucam_mod->free_q)
2709 		i++;
2710 
2711 	ucam_mod->bna = NULL;
2712 }
2713 
2714 static void
bna_mcam_mod_init(struct bna_mcam_mod * mcam_mod,struct bna * bna,struct bna_res_info * res_info)2715 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
2716 		  struct bna_res_info *res_info)
2717 {
2718 	int i;
2719 
2720 	mcam_mod->mcmac = (struct bna_mac *)
2721 		res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
2722 
2723 	INIT_LIST_HEAD(&mcam_mod->free_q);
2724 	for (i = 0; i < BFI_MAX_MCMAC; i++) {
2725 		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
2726 		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
2727 	}
2728 
2729 	mcam_mod->bna = bna;
2730 }
2731 
2732 static void
bna_mcam_mod_uninit(struct bna_mcam_mod * mcam_mod)2733 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
2734 {
2735 	struct list_head *qe;
2736 	int i = 0;
2737 
2738 	list_for_each(qe, &mcam_mod->free_q)
2739 		i++;
2740 
2741 	mcam_mod->bna = NULL;
2742 }
2743 
2744 static void
bna_rit_mod_init(struct bna_rit_mod * rit_mod,struct bna_res_info * res_info)2745 bna_rit_mod_init(struct bna_rit_mod *rit_mod,
2746 		struct bna_res_info *res_info)
2747 {
2748 	int i;
2749 	int j;
2750 	int count;
2751 	int offset;
2752 
2753 	rit_mod->rit = (struct bna_rit_entry *)
2754 		res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
2755 	rit_mod->rit_segment = (struct bna_rit_segment *)
2756 		res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
2757 
2758 	count = 0;
2759 	offset = 0;
2760 	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
2761 		INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
2762 		for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
2763 			bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
2764 			rit_mod->rit_segment[count].max_rit_size =
2765 					ritseg_pool_cfg[i].pool_entry_size;
2766 			rit_mod->rit_segment[count].rit_offset = offset;
2767 			rit_mod->rit_segment[count].rit =
2768 					&rit_mod->rit[offset];
2769 			list_add_tail(&rit_mod->rit_segment[count].qe,
2770 				&rit_mod->rit_seg_pool[i]);
2771 			count++;
2772 			offset += ritseg_pool_cfg[i].pool_entry_size;
2773 		}
2774 	}
2775 }
2776 
2777 static void
bna_rit_mod_uninit(struct bna_rit_mod * rit_mod)2778 bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
2779 {
2780 	struct bna_rit_segment *rit_segment;
2781 	struct list_head *qe;
2782 	int i;
2783 	int j;
2784 
2785 	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
2786 		j = 0;
2787 		list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
2788 			rit_segment = (struct bna_rit_segment *)qe;
2789 			j++;
2790 		}
2791 	}
2792 }
2793 
2794 /*
2795  * Public functions
2796  */
2797 
2798 /* Called during probe(), before calling bna_init() */
2799 void
bna_res_req(struct bna_res_info * res_info)2800 bna_res_req(struct bna_res_info *res_info)
2801 {
2802 	bna_adv_res_req(res_info);
2803 
2804 	/* DMA memory for retrieving IOC attributes */
2805 	res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
2806 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
2807 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
2808 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
2809 				ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
2810 
2811 	/* DMA memory for index segment of an IB */
2812 	res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2813 	res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
2814 	res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
2815 				BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
2816 	res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
2817 
2818 	/* Virtual memory for IB objects - stored by IB module */
2819 	res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
2820 	res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
2821 								BNA_MEM_T_KVA;
2822 	res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
2823 	res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
2824 				BFI_MAX_IB * sizeof(struct bna_ib);
2825 
2826 	/* Virtual memory for intr objects - stored by IB module */
2827 	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
2828 	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
2829 								BNA_MEM_T_KVA;
2830 	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
2831 	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
2832 				BFI_MAX_IB * sizeof(struct bna_intr);
2833 
2834 	/* Virtual memory for idx_seg objects - stored by IB module */
2835 	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
2836 	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
2837 								BNA_MEM_T_KVA;
2838 	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
2839 	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
2840 			BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
2841 
2842 	/* Virtual memory for Tx objects - stored by Tx module */
2843 	res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
2844 	res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
2845 								BNA_MEM_T_KVA;
2846 	res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
2847 	res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
2848 			BFI_MAX_TXQ * sizeof(struct bna_tx);
2849 
2850 	/* Virtual memory for TxQ - stored by Tx module */
2851 	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
2852 	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
2853 								BNA_MEM_T_KVA;
2854 	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
2855 	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
2856 			BFI_MAX_TXQ * sizeof(struct bna_txq);
2857 
2858 	/* Virtual memory for Rx objects - stored by Rx module */
2859 	res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
2860 	res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
2861 								BNA_MEM_T_KVA;
2862 	res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
2863 	res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
2864 			BFI_MAX_RXQ * sizeof(struct bna_rx);
2865 
2866 	/* Virtual memory for RxPath - stored by Rx module */
2867 	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
2868 	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
2869 								BNA_MEM_T_KVA;
2870 	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
2871 	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
2872 			BFI_MAX_RXQ * sizeof(struct bna_rxp);
2873 
2874 	/* Virtual memory for RxQ - stored by Rx module */
2875 	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
2876 	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
2877 								BNA_MEM_T_KVA;
2878 	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
2879 	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
2880 			BFI_MAX_RXQ * sizeof(struct bna_rxq);
2881 
2882 	/* Virtual memory for Unicast MAC address - stored by ucam module */
2883 	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2884 	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
2885 								BNA_MEM_T_KVA;
2886 	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2887 	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2888 			BFI_MAX_UCMAC * sizeof(struct bna_mac);
2889 
2890 	/* Virtual memory for Multicast MAC address - stored by mcam module */
2891 	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2892 	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2893 								BNA_MEM_T_KVA;
2894 	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2895 	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2896 			BFI_MAX_MCMAC * sizeof(struct bna_mac);
2897 
2898 	/* Virtual memory for RIT entries */
2899 	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
2900 	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
2901 								BNA_MEM_T_KVA;
2902 	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
2903 	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
2904 			BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
2905 
2906 	/* Virtual memory for RIT segment table */
2907 	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
2908 	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
2909 								BNA_MEM_T_KVA;
2910 	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
2911 	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
2912 			BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
2913 
2914 	/* Interrupt resource for mailbox interrupt */
2915 	res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
2916 	res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
2917 							BNA_INTR_T_MSIX;
2918 	res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
2919 }
2920 
2921 /* Called during probe() */
2922 void
bna_init(struct bna * bna,struct bnad * bnad,struct bfa_pcidev * pcidev,struct bna_res_info * res_info)2923 bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
2924 		struct bna_res_info *res_info)
2925 {
2926 	bna->bnad = bnad;
2927 	bna->pcidev = *pcidev;
2928 
2929 	bna->stats.hw_stats = (struct bfi_ll_stats *)
2930 		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2931 	bna->hw_stats_dma.msb =
2932 		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2933 	bna->hw_stats_dma.lsb =
2934 		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2935 	bna->stats.sw_stats = (struct bna_sw_stats *)
2936 		res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
2937 
2938 	bna->regs.page_addr = bna->pcidev.pci_bar_kva +
2939 				reg_offset[bna->pcidev.pci_func].page_addr;
2940 	bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
2941 				reg_offset[bna->pcidev.pci_func].fn_int_status;
2942 	bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
2943 				reg_offset[bna->pcidev.pci_func].fn_int_mask;
2944 
2945 	if (bna->pcidev.pci_func < 3)
2946 		bna->port_num = 0;
2947 	else
2948 		bna->port_num = 1;
2949 
2950 	/* Also initializes diag, cee, sfp, phy_port and mbox_mod */
2951 	bna_device_init(&bna->device, bna, res_info);
2952 
2953 	bna_port_init(&bna->port, bna);
2954 
2955 	bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2956 
2957 	bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2958 
2959 	bna_ib_mod_init(&bna->ib_mod, bna, res_info);
2960 
2961 	bna_rit_mod_init(&bna->rit_mod, res_info);
2962 
2963 	bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2964 
2965 	bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2966 
2967 	bna->rxf_promisc_id = BFI_MAX_RXF;
2968 
2969 	/* Mbox q element for posting stat request to f/w */
2970 	bfa_q_qe_init(&bna->mbox_qe.qe);
2971 }
2972 
2973 void
bna_uninit(struct bna * bna)2974 bna_uninit(struct bna *bna)
2975 {
2976 	bna_mcam_mod_uninit(&bna->mcam_mod);
2977 
2978 	bna_ucam_mod_uninit(&bna->ucam_mod);
2979 
2980 	bna_rit_mod_uninit(&bna->rit_mod);
2981 
2982 	bna_ib_mod_uninit(&bna->ib_mod);
2983 
2984 	bna_rx_mod_uninit(&bna->rx_mod);
2985 
2986 	bna_tx_mod_uninit(&bna->tx_mod);
2987 
2988 	bna_port_uninit(&bna->port);
2989 
2990 	bna_device_uninit(&bna->device);
2991 
2992 	bna->bnad = NULL;
2993 }
2994 
2995 struct bna_mac *
bna_ucam_mod_mac_get(struct bna_ucam_mod * ucam_mod)2996 bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2997 {
2998 	struct list_head *qe;
2999 
3000 	if (list_empty(&ucam_mod->free_q))
3001 		return NULL;
3002 
3003 	bfa_q_deq(&ucam_mod->free_q, &qe);
3004 
3005 	return (struct bna_mac *)qe;
3006 }
3007 
3008 void
bna_ucam_mod_mac_put(struct bna_ucam_mod * ucam_mod,struct bna_mac * mac)3009 bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
3010 {
3011 	list_add_tail(&mac->qe, &ucam_mod->free_q);
3012 }
3013 
3014 struct bna_mac *
bna_mcam_mod_mac_get(struct bna_mcam_mod * mcam_mod)3015 bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
3016 {
3017 	struct list_head *qe;
3018 
3019 	if (list_empty(&mcam_mod->free_q))
3020 		return NULL;
3021 
3022 	bfa_q_deq(&mcam_mod->free_q, &qe);
3023 
3024 	return (struct bna_mac *)qe;
3025 }
3026 
3027 void
bna_mcam_mod_mac_put(struct bna_mcam_mod * mcam_mod,struct bna_mac * mac)3028 bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
3029 {
3030 	list_add_tail(&mac->qe, &mcam_mod->free_q);
3031 }
3032 
3033 /**
3034  * Note: This should be called in the same locking context as the call to
3035  * bna_rit_mod_seg_get()
3036  */
3037 int
bna_rit_mod_can_satisfy(struct bna_rit_mod * rit_mod,int seg_size)3038 bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
3039 {
3040 	int i;
3041 
3042 	/* Select the pool for seg_size */
3043 	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3044 		if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3045 			break;
3046 	}
3047 
3048 	if (i == BFI_RIT_SEG_TOTAL_POOLS)
3049 		return 0;
3050 
3051 	if (list_empty(&rit_mod->rit_seg_pool[i]))
3052 		return 0;
3053 
3054 	return 1;
3055 }
3056 
3057 struct bna_rit_segment *
bna_rit_mod_seg_get(struct bna_rit_mod * rit_mod,int seg_size)3058 bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
3059 {
3060 	struct bna_rit_segment *seg;
3061 	struct list_head *qe;
3062 	int i;
3063 
3064 	/* Select the pool for seg_size */
3065 	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3066 		if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3067 			break;
3068 	}
3069 
3070 	if (i == BFI_RIT_SEG_TOTAL_POOLS)
3071 		return NULL;
3072 
3073 	if (list_empty(&rit_mod->rit_seg_pool[i]))
3074 		return NULL;
3075 
3076 	bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
3077 	seg = (struct bna_rit_segment *)qe;
3078 	bfa_q_qe_init(&seg->qe);
3079 	seg->rit_size = seg_size;
3080 
3081 	return seg;
3082 }
3083 
3084 void
bna_rit_mod_seg_put(struct bna_rit_mod * rit_mod,struct bna_rit_segment * seg)3085 bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
3086 			struct bna_rit_segment *seg)
3087 {
3088 	int i;
3089 
3090 	/* Select the pool for seg->max_rit_size */
3091 	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3092 		if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
3093 			break;
3094 	}
3095 
3096 	seg->rit_size = 0;
3097 	list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
3098 }
3099