1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfa_ioc.h"
20 #include "bfi_ctreg.h"
21 #include "bfa_defs.h"
22 #include "bfa_defs_svc.h"
23 
24 BFA_TRC_FILE(CNA, IOC);
25 
26 /*
27  * IOC local definitions
28  */
29 #define BFA_IOC_TOV		3000	/* msecs */
30 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
31 #define BFA_IOC_HB_TOV		500	/* msecs */
32 #define BFA_IOC_HWINIT_MAX	5
33 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
34 
35 #define bfa_ioc_timer_start(__ioc)					\
36 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
37 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
39 
40 #define bfa_hb_timer_start(__ioc)					\
41 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
42 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
44 
45 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
46 
47 /*
48  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
49  */
50 
51 #define bfa_ioc_firmware_lock(__ioc)			\
52 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53 #define bfa_ioc_firmware_unlock(__ioc)			\
54 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57 #define bfa_ioc_notify_fail(__ioc)              \
58 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 #define bfa_ioc_sync_join(__ioc)                \
60 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61 #define bfa_ioc_sync_leave(__ioc)               \
62 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
63 #define bfa_ioc_sync_ack(__ioc)                 \
64 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
65 #define bfa_ioc_sync_complete(__ioc)            \
66 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
67 
68 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
69 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
70 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
71 
72 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
73 
74 /*
75  * forward declarations
76  */
77 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
78 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
79 static void bfa_ioc_timeout(void *ioc);
80 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
82 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
93 
94 
95 /*
96  * IOC state machine definitions/declarations
97  */
98 enum ioc_event {
99 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
100 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
101 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
102 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
103 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
104 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
105 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
106 	IOC_E_INITFAILED	= 8,	/*  failure notice by iocpf sm	*/
107 	IOC_E_PFFAILED		= 9,	/*  failure notice by iocpf sm	*/
108 	IOC_E_HBFAIL		= 10,	/*  heartbeat failure		*/
109 	IOC_E_HWERROR		= 11,	/*  hardware error interrupt	*/
110 	IOC_E_TIMEOUT		= 12,	/*  timeout			*/
111 };
112 
113 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
122 
123 static struct bfa_sm_table_s ioc_sm_table[] = {
124 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
125 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
126 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
127 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
128 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
129 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
130 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
131 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
132 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
133 };
134 
135 /*
136  * IOCPF state machine definitions/declarations
137  */
138 
139 #define bfa_iocpf_timer_start(__ioc)					\
140 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
141 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
142 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
143 
144 #define bfa_iocpf_recovery_timer_start(__ioc)				\
145 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
146 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
147 
148 #define bfa_sem_timer_start(__ioc)					\
149 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
150 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
151 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
152 
153 /*
154  * Forward declareations for iocpf state machine
155  */
156 static void bfa_iocpf_timeout(void *ioc_arg);
157 static void bfa_iocpf_sem_timeout(void *ioc_arg);
158 
159 /*
160  * IOCPF state machine events
161  */
162 enum iocpf_event {
163 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
164 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
165 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
166 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
167 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
168 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
169 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
170 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
171 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
172 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
173 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
174 };
175 
176 /*
177  * IOCPF states
178  */
179 enum bfa_iocpf_state {
180 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
181 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
182 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
183 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
184 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
185 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
186 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
187 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
188 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
189 };
190 
191 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
199 						enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
205 						enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
207 
208 static struct bfa_sm_table_s iocpf_sm_table[] = {
209 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
210 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
211 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
212 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
213 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
214 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
215 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
217 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
219 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
220 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
222 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
223 };
224 
225 /*
226  * IOC State Machine
227  */
228 
229 /*
230  * Beginning state. IOC uninit state.
231  */
232 
233 static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s * ioc)234 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
235 {
236 }
237 
238 /*
239  * IOC is in uninit state.
240  */
241 static void
bfa_ioc_sm_uninit(struct bfa_ioc_s * ioc,enum ioc_event event)242 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
243 {
244 	bfa_trc(ioc, event);
245 
246 	switch (event) {
247 	case IOC_E_RESET:
248 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
249 		break;
250 
251 	default:
252 		bfa_sm_fault(ioc, event);
253 	}
254 }
255 /*
256  * Reset entry actions -- initialize state machine
257  */
258 static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s * ioc)259 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
260 {
261 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
262 }
263 
264 /*
265  * IOC is in reset state.
266  */
267 static void
bfa_ioc_sm_reset(struct bfa_ioc_s * ioc,enum ioc_event event)268 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
269 {
270 	bfa_trc(ioc, event);
271 
272 	switch (event) {
273 	case IOC_E_ENABLE:
274 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
275 		break;
276 
277 	case IOC_E_DISABLE:
278 		bfa_ioc_disable_comp(ioc);
279 		break;
280 
281 	case IOC_E_DETACH:
282 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
283 		break;
284 
285 	default:
286 		bfa_sm_fault(ioc, event);
287 	}
288 }
289 
290 
291 static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s * ioc)292 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
293 {
294 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
295 }
296 
297 /*
298  * Host IOC function is being enabled, awaiting response from firmware.
299  * Semaphore is acquired.
300  */
301 static void
bfa_ioc_sm_enabling(struct bfa_ioc_s * ioc,enum ioc_event event)302 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
303 {
304 	bfa_trc(ioc, event);
305 
306 	switch (event) {
307 	case IOC_E_ENABLED:
308 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
309 		break;
310 
311 	case IOC_E_PFFAILED:
312 		/* !!! fall through !!! */
313 	case IOC_E_HWERROR:
314 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
315 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
316 		if (event != IOC_E_PFFAILED)
317 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
318 		break;
319 
320 	case IOC_E_DISABLE:
321 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
322 		break;
323 
324 	case IOC_E_DETACH:
325 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
326 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
327 		break;
328 
329 	case IOC_E_ENABLE:
330 		break;
331 
332 	default:
333 		bfa_sm_fault(ioc, event);
334 	}
335 }
336 
337 
338 static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s * ioc)339 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
340 {
341 	bfa_ioc_timer_start(ioc);
342 	bfa_ioc_send_getattr(ioc);
343 }
344 
345 /*
346  * IOC configuration in progress. Timer is active.
347  */
348 static void
bfa_ioc_sm_getattr(struct bfa_ioc_s * ioc,enum ioc_event event)349 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
350 {
351 	bfa_trc(ioc, event);
352 
353 	switch (event) {
354 	case IOC_E_FWRSP_GETATTR:
355 		bfa_ioc_timer_stop(ioc);
356 		bfa_ioc_check_attr_wwns(ioc);
357 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
358 		break;
359 
360 		break;
361 	case IOC_E_PFFAILED:
362 	case IOC_E_HWERROR:
363 		bfa_ioc_timer_stop(ioc);
364 		/* !!! fall through !!! */
365 	case IOC_E_TIMEOUT:
366 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
367 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
368 		if (event != IOC_E_PFFAILED)
369 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
370 		break;
371 
372 	case IOC_E_DISABLE:
373 		bfa_ioc_timer_stop(ioc);
374 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
375 		break;
376 
377 	case IOC_E_ENABLE:
378 		break;
379 
380 	default:
381 		bfa_sm_fault(ioc, event);
382 	}
383 }
384 
385 
386 static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s * ioc)387 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
388 {
389 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
390 
391 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
392 	bfa_ioc_hb_monitor(ioc);
393 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
394 }
395 
396 static void
bfa_ioc_sm_op(struct bfa_ioc_s * ioc,enum ioc_event event)397 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
398 {
399 	bfa_trc(ioc, event);
400 
401 	switch (event) {
402 	case IOC_E_ENABLE:
403 		break;
404 
405 	case IOC_E_DISABLE:
406 		bfa_hb_timer_stop(ioc);
407 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
408 		break;
409 
410 	case IOC_E_PFFAILED:
411 	case IOC_E_HWERROR:
412 		bfa_hb_timer_stop(ioc);
413 		/* !!! fall through !!! */
414 	case IOC_E_HBFAIL:
415 		bfa_ioc_fail_notify(ioc);
416 
417 		if (ioc->iocpf.auto_recover)
418 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
419 		else
420 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
421 
422 		if (event != IOC_E_PFFAILED)
423 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
424 		break;
425 
426 	default:
427 		bfa_sm_fault(ioc, event);
428 	}
429 }
430 
431 
432 static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s * ioc)433 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
434 {
435 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
436 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
437 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
438 }
439 
440 /*
441  * IOC is being disabled
442  */
443 static void
bfa_ioc_sm_disabling(struct bfa_ioc_s * ioc,enum ioc_event event)444 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
445 {
446 	bfa_trc(ioc, event);
447 
448 	switch (event) {
449 	case IOC_E_DISABLED:
450 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
451 		break;
452 
453 	case IOC_E_HWERROR:
454 		/*
455 		 * No state change.  Will move to disabled state
456 		 * after iocpf sm completes failure processing and
457 		 * moves to disabled state.
458 		 */
459 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
460 		break;
461 
462 	default:
463 		bfa_sm_fault(ioc, event);
464 	}
465 }
466 
467 /*
468  * IOC disable completion entry.
469  */
470 static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s * ioc)471 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
472 {
473 	bfa_ioc_disable_comp(ioc);
474 }
475 
476 static void
bfa_ioc_sm_disabled(struct bfa_ioc_s * ioc,enum ioc_event event)477 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
478 {
479 	bfa_trc(ioc, event);
480 
481 	switch (event) {
482 	case IOC_E_ENABLE:
483 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
484 		break;
485 
486 	case IOC_E_DISABLE:
487 		ioc->cbfn->disable_cbfn(ioc->bfa);
488 		break;
489 
490 	case IOC_E_DETACH:
491 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
492 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
493 		break;
494 
495 	default:
496 		bfa_sm_fault(ioc, event);
497 	}
498 }
499 
500 
501 static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s * ioc)502 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
503 {
504 	bfa_trc(ioc, 0);
505 }
506 
507 /*
508  * Hardware initialization retry.
509  */
510 static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s * ioc,enum ioc_event event)511 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
512 {
513 	bfa_trc(ioc, event);
514 
515 	switch (event) {
516 	case IOC_E_ENABLED:
517 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
518 		break;
519 
520 	case IOC_E_PFFAILED:
521 	case IOC_E_HWERROR:
522 		/*
523 		 * Initialization retry failed.
524 		 */
525 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
526 		if (event != IOC_E_PFFAILED)
527 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
528 		break;
529 
530 	case IOC_E_INITFAILED:
531 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
532 		break;
533 
534 	case IOC_E_ENABLE:
535 		break;
536 
537 	case IOC_E_DISABLE:
538 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 		break;
540 
541 	case IOC_E_DETACH:
542 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
544 		break;
545 
546 	default:
547 		bfa_sm_fault(ioc, event);
548 	}
549 }
550 
551 
552 static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s * ioc)553 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
554 {
555 	bfa_trc(ioc, 0);
556 }
557 
558 /*
559  * IOC failure.
560  */
561 static void
bfa_ioc_sm_fail(struct bfa_ioc_s * ioc,enum ioc_event event)562 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
563 {
564 	bfa_trc(ioc, event);
565 
566 	switch (event) {
567 
568 	case IOC_E_ENABLE:
569 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
570 		break;
571 
572 	case IOC_E_DISABLE:
573 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
574 		break;
575 
576 	case IOC_E_DETACH:
577 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579 		break;
580 
581 	case IOC_E_HWERROR:
582 		/*
583 		 * HB failure notification, ignore.
584 		 */
585 		break;
586 	default:
587 		bfa_sm_fault(ioc, event);
588 	}
589 }
590 
591 /*
592  * IOCPF State Machine
593  */
594 
595 /*
596  * Reset entry actions -- initialize state machine
597  */
598 static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s * iocpf)599 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
600 {
601 	iocpf->retry_count = 0;
602 	iocpf->auto_recover = bfa_auto_recover;
603 }
604 
605 /*
606  * Beginning state. IOC is in reset state.
607  */
608 static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s * iocpf,enum iocpf_event event)609 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
610 {
611 	struct bfa_ioc_s *ioc = iocpf->ioc;
612 
613 	bfa_trc(ioc, event);
614 
615 	switch (event) {
616 	case IOCPF_E_ENABLE:
617 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
618 		break;
619 
620 	case IOCPF_E_STOP:
621 		break;
622 
623 	default:
624 		bfa_sm_fault(ioc, event);
625 	}
626 }
627 
628 /*
629  * Semaphore should be acquired for version check.
630  */
631 static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s * iocpf)632 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
633 {
634 	bfa_ioc_hw_sem_get(iocpf->ioc);
635 }
636 
637 /*
638  * Awaiting h/w semaphore to continue with version check.
639  */
640 static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s * iocpf,enum iocpf_event event)641 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
642 {
643 	struct bfa_ioc_s *ioc = iocpf->ioc;
644 
645 	bfa_trc(ioc, event);
646 
647 	switch (event) {
648 	case IOCPF_E_SEMLOCKED:
649 		if (bfa_ioc_firmware_lock(ioc)) {
650 			if (bfa_ioc_sync_complete(ioc)) {
651 				iocpf->retry_count = 0;
652 				bfa_ioc_sync_join(ioc);
653 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 			} else {
655 				bfa_ioc_firmware_unlock(ioc);
656 				writel(1, ioc->ioc_regs.ioc_sem_reg);
657 				bfa_sem_timer_start(ioc);
658 			}
659 		} else {
660 			writel(1, ioc->ioc_regs.ioc_sem_reg);
661 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
662 		}
663 		break;
664 
665 	case IOCPF_E_DISABLE:
666 		bfa_sem_timer_stop(ioc);
667 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
669 		break;
670 
671 	case IOCPF_E_STOP:
672 		bfa_sem_timer_stop(ioc);
673 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 		break;
675 
676 	default:
677 		bfa_sm_fault(ioc, event);
678 	}
679 }
680 
681 /*
682  * Notify enable completion callback.
683  */
684 static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s * iocpf)685 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
686 {
687 	/*
688 	 * Call only the first time sm enters fwmismatch state.
689 	 */
690 	if (iocpf->retry_count == 0)
691 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
692 
693 	iocpf->retry_count++;
694 	bfa_iocpf_timer_start(iocpf->ioc);
695 }
696 
697 /*
698  * Awaiting firmware version match.
699  */
700 static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s * iocpf,enum iocpf_event event)701 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
702 {
703 	struct bfa_ioc_s *ioc = iocpf->ioc;
704 
705 	bfa_trc(ioc, event);
706 
707 	switch (event) {
708 	case IOCPF_E_TIMEOUT:
709 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
710 		break;
711 
712 	case IOCPF_E_DISABLE:
713 		bfa_iocpf_timer_stop(ioc);
714 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
715 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
716 		break;
717 
718 	case IOCPF_E_STOP:
719 		bfa_iocpf_timer_stop(ioc);
720 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
721 		break;
722 
723 	default:
724 		bfa_sm_fault(ioc, event);
725 	}
726 }
727 
728 /*
729  * Request for semaphore.
730  */
731 static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s * iocpf)732 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
733 {
734 	bfa_ioc_hw_sem_get(iocpf->ioc);
735 }
736 
737 /*
738  * Awaiting semaphore for h/w initialzation.
739  */
740 static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s * iocpf,enum iocpf_event event)741 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
742 {
743 	struct bfa_ioc_s *ioc = iocpf->ioc;
744 
745 	bfa_trc(ioc, event);
746 
747 	switch (event) {
748 	case IOCPF_E_SEMLOCKED:
749 		if (bfa_ioc_sync_complete(ioc)) {
750 			bfa_ioc_sync_join(ioc);
751 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
752 		} else {
753 			writel(1, ioc->ioc_regs.ioc_sem_reg);
754 			bfa_sem_timer_start(ioc);
755 		}
756 		break;
757 
758 	case IOCPF_E_DISABLE:
759 		bfa_sem_timer_stop(ioc);
760 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
761 		break;
762 
763 	default:
764 		bfa_sm_fault(ioc, event);
765 	}
766 }
767 
768 static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s * iocpf)769 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
770 {
771 	bfa_iocpf_timer_start(iocpf->ioc);
772 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
773 }
774 
775 /*
776  * Hardware is being initialized. Interrupts are enabled.
777  * Holding hardware semaphore lock.
778  */
779 static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s * iocpf,enum iocpf_event event)780 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
781 {
782 	struct bfa_ioc_s *ioc = iocpf->ioc;
783 
784 	bfa_trc(ioc, event);
785 
786 	switch (event) {
787 	case IOCPF_E_FWREADY:
788 		bfa_iocpf_timer_stop(ioc);
789 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
790 		break;
791 
792 	case IOCPF_E_INITFAIL:
793 		bfa_iocpf_timer_stop(ioc);
794 		/*
795 		 * !!! fall through !!!
796 		 */
797 
798 	case IOCPF_E_TIMEOUT:
799 		writel(1, ioc->ioc_regs.ioc_sem_reg);
800 		if (event == IOCPF_E_TIMEOUT)
801 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
802 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
803 		break;
804 
805 	case IOCPF_E_DISABLE:
806 		bfa_iocpf_timer_stop(ioc);
807 		bfa_ioc_sync_leave(ioc);
808 		writel(1, ioc->ioc_regs.ioc_sem_reg);
809 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
810 		break;
811 
812 	default:
813 		bfa_sm_fault(ioc, event);
814 	}
815 }
816 
817 static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s * iocpf)818 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
819 {
820 	bfa_iocpf_timer_start(iocpf->ioc);
821 	bfa_ioc_send_enable(iocpf->ioc);
822 }
823 
824 /*
825  * Host IOC function is being enabled, awaiting response from firmware.
826  * Semaphore is acquired.
827  */
828 static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)829 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
830 {
831 	struct bfa_ioc_s *ioc = iocpf->ioc;
832 
833 	bfa_trc(ioc, event);
834 
835 	switch (event) {
836 	case IOCPF_E_FWRSP_ENABLE:
837 		bfa_iocpf_timer_stop(ioc);
838 		writel(1, ioc->ioc_regs.ioc_sem_reg);
839 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
840 		break;
841 
842 	case IOCPF_E_INITFAIL:
843 		bfa_iocpf_timer_stop(ioc);
844 		/*
845 		 * !!! fall through !!!
846 		 */
847 
848 	case IOCPF_E_TIMEOUT:
849 		writel(1, ioc->ioc_regs.ioc_sem_reg);
850 		if (event == IOCPF_E_TIMEOUT)
851 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
852 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
853 		break;
854 
855 	case IOCPF_E_DISABLE:
856 		bfa_iocpf_timer_stop(ioc);
857 		writel(1, ioc->ioc_regs.ioc_sem_reg);
858 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
859 		break;
860 
861 	case IOCPF_E_FWREADY:
862 		bfa_ioc_send_enable(ioc);
863 		break;
864 
865 	default:
866 		bfa_sm_fault(ioc, event);
867 	}
868 }
869 
870 static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s * iocpf)871 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
872 {
873 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
874 }
875 
876 static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s * iocpf,enum iocpf_event event)877 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
878 {
879 	struct bfa_ioc_s *ioc = iocpf->ioc;
880 
881 	bfa_trc(ioc, event);
882 
883 	switch (event) {
884 	case IOCPF_E_DISABLE:
885 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
886 		break;
887 
888 	case IOCPF_E_GETATTRFAIL:
889 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
890 		break;
891 
892 	case IOCPF_E_FAIL:
893 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
894 		break;
895 
896 	case IOCPF_E_FWREADY:
897 		if (bfa_ioc_is_operational(ioc)) {
898 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
899 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
900 		} else {
901 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
902 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
903 		}
904 		break;
905 
906 	default:
907 		bfa_sm_fault(ioc, event);
908 	}
909 }
910 
911 static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s * iocpf)912 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
913 {
914 	bfa_iocpf_timer_start(iocpf->ioc);
915 	bfa_ioc_send_disable(iocpf->ioc);
916 }
917 
918 /*
919  * IOC is being disabled
920  */
921 static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)922 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
923 {
924 	struct bfa_ioc_s *ioc = iocpf->ioc;
925 
926 	bfa_trc(ioc, event);
927 
928 	switch (event) {
929 	case IOCPF_E_FWRSP_DISABLE:
930 	case IOCPF_E_FWREADY:
931 		bfa_iocpf_timer_stop(ioc);
932 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
933 		break;
934 
935 	case IOCPF_E_FAIL:
936 		bfa_iocpf_timer_stop(ioc);
937 		/*
938 		 * !!! fall through !!!
939 		 */
940 
941 	case IOCPF_E_TIMEOUT:
942 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
943 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
944 		break;
945 
946 	case IOCPF_E_FWRSP_ENABLE:
947 		break;
948 
949 	default:
950 		bfa_sm_fault(ioc, event);
951 	}
952 }
953 
954 static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s * iocpf)955 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
956 {
957 	bfa_ioc_hw_sem_get(iocpf->ioc);
958 }
959 
960 /*
961  * IOC hb ack request is being removed.
962  */
963 static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)964 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
965 {
966 	struct bfa_ioc_s *ioc = iocpf->ioc;
967 
968 	bfa_trc(ioc, event);
969 
970 	switch (event) {
971 	case IOCPF_E_SEMLOCKED:
972 		bfa_ioc_sync_leave(ioc);
973 		writel(1, ioc->ioc_regs.ioc_sem_reg);
974 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
975 		break;
976 
977 	case IOCPF_E_FAIL:
978 		break;
979 
980 	default:
981 		bfa_sm_fault(ioc, event);
982 	}
983 }
984 
985 /*
986  * IOC disable completion entry.
987  */
988 static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s * iocpf)989 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
990 {
991 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
992 }
993 
994 static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s * iocpf,enum iocpf_event event)995 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
996 {
997 	struct bfa_ioc_s *ioc = iocpf->ioc;
998 
999 	bfa_trc(ioc, event);
1000 
1001 	switch (event) {
1002 	case IOCPF_E_ENABLE:
1003 		iocpf->retry_count = 0;
1004 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1005 		break;
1006 
1007 	case IOCPF_E_STOP:
1008 		bfa_ioc_firmware_unlock(ioc);
1009 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1010 		break;
1011 
1012 	default:
1013 		bfa_sm_fault(ioc, event);
1014 	}
1015 }
1016 
1017 static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s * iocpf)1018 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1019 {
1020 	bfa_ioc_hw_sem_get(iocpf->ioc);
1021 }
1022 
1023 /*
1024  * Hardware initialization failed.
1025  */
1026 static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1027 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1028 {
1029 	struct bfa_ioc_s *ioc = iocpf->ioc;
1030 
1031 	bfa_trc(ioc, event);
1032 
1033 	switch (event) {
1034 	case IOCPF_E_SEMLOCKED:
1035 		bfa_ioc_notify_fail(ioc);
1036 		bfa_ioc_sync_ack(ioc);
1037 		iocpf->retry_count++;
1038 		if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1039 			bfa_ioc_sync_leave(ioc);
1040 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1041 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1042 		} else {
1043 			if (bfa_ioc_sync_complete(ioc))
1044 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1045 			else {
1046 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1047 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1048 			}
1049 		}
1050 		break;
1051 
1052 	case IOCPF_E_DISABLE:
1053 		bfa_sem_timer_stop(ioc);
1054 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055 		break;
1056 
1057 	case IOCPF_E_STOP:
1058 		bfa_sem_timer_stop(ioc);
1059 		bfa_ioc_firmware_unlock(ioc);
1060 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1061 		break;
1062 
1063 	case IOCPF_E_FAIL:
1064 		break;
1065 
1066 	default:
1067 		bfa_sm_fault(ioc, event);
1068 	}
1069 }
1070 
1071 static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s * iocpf)1072 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1073 {
1074 	bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1075 }
1076 
1077 /*
1078  * Hardware initialization failed.
1079  */
1080 static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1081 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1082 {
1083 	struct bfa_ioc_s *ioc = iocpf->ioc;
1084 
1085 	bfa_trc(ioc, event);
1086 
1087 	switch (event) {
1088 	case IOCPF_E_DISABLE:
1089 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 		break;
1091 
1092 	case IOCPF_E_STOP:
1093 		bfa_ioc_firmware_unlock(ioc);
1094 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1095 		break;
1096 
1097 	default:
1098 		bfa_sm_fault(ioc, event);
1099 	}
1100 }
1101 
1102 static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s * iocpf)1103 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1104 {
1105 	/*
1106 	 * Mark IOC as failed in hardware and stop firmware.
1107 	 */
1108 	bfa_ioc_lpu_stop(iocpf->ioc);
1109 
1110 	/*
1111 	 * Flush any queued up mailbox requests.
1112 	 */
1113 	bfa_ioc_mbox_hbfail(iocpf->ioc);
1114 
1115 	bfa_ioc_hw_sem_get(iocpf->ioc);
1116 }
1117 
1118 static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1119 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1120 {
1121 	struct bfa_ioc_s *ioc = iocpf->ioc;
1122 
1123 	bfa_trc(ioc, event);
1124 
1125 	switch (event) {
1126 	case IOCPF_E_SEMLOCKED:
1127 		iocpf->retry_count = 0;
1128 		bfa_ioc_sync_ack(ioc);
1129 		bfa_ioc_notify_fail(ioc);
1130 		if (!iocpf->auto_recover) {
1131 			bfa_ioc_sync_leave(ioc);
1132 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1133 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1134 		} else {
1135 			if (bfa_ioc_sync_complete(ioc))
1136 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1137 			else {
1138 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1139 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1140 			}
1141 		}
1142 		break;
1143 
1144 	case IOCPF_E_DISABLE:
1145 		bfa_sem_timer_stop(ioc);
1146 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1147 		break;
1148 
1149 	case IOCPF_E_FAIL:
1150 		break;
1151 
1152 	default:
1153 		bfa_sm_fault(ioc, event);
1154 	}
1155 }
1156 
1157 static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s * iocpf)1158 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1159 {
1160 }
1161 
1162 /*
1163  * IOC is in failed state.
1164  */
1165 static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1166 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1167 {
1168 	struct bfa_ioc_s *ioc = iocpf->ioc;
1169 
1170 	bfa_trc(ioc, event);
1171 
1172 	switch (event) {
1173 	case IOCPF_E_DISABLE:
1174 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1175 		break;
1176 
1177 	default:
1178 		bfa_sm_fault(ioc, event);
1179 	}
1180 }
1181 
1182 /*
1183  *  BFA IOC private functions
1184  */
1185 
1186 static void
bfa_ioc_disable_comp(struct bfa_ioc_s * ioc)1187 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1188 {
1189 	struct list_head			*qe;
1190 	struct bfa_ioc_hbfail_notify_s	*notify;
1191 
1192 	ioc->cbfn->disable_cbfn(ioc->bfa);
1193 
1194 	/*
1195 	 * Notify common modules registered for notification.
1196 	 */
1197 	list_for_each(qe, &ioc->hb_notify_q) {
1198 		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1199 		notify->cbfn(notify->cbarg);
1200 	}
1201 }
1202 
1203 bfa_boolean_t
bfa_ioc_sem_get(void __iomem * sem_reg)1204 bfa_ioc_sem_get(void __iomem *sem_reg)
1205 {
1206 	u32 r32;
1207 	int cnt = 0;
1208 #define BFA_SEM_SPINCNT	3000
1209 
1210 	r32 = readl(sem_reg);
1211 
1212 	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1213 		cnt++;
1214 		udelay(2);
1215 		r32 = readl(sem_reg);
1216 	}
1217 
1218 	if (r32 == 0)
1219 		return BFA_TRUE;
1220 
1221 	WARN_ON(cnt >= BFA_SEM_SPINCNT);
1222 	return BFA_FALSE;
1223 }
1224 
1225 static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s * ioc)1226 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1227 {
1228 	u32	r32;
1229 
1230 	/*
1231 	 * First read to the semaphore register will return 0, subsequent reads
1232 	 * will return 1. Semaphore is released by writing 1 to the register
1233 	 */
1234 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1235 	if (r32 == 0) {
1236 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1237 		return;
1238 	}
1239 
1240 	bfa_sem_timer_start(ioc);
1241 }
1242 
1243 /*
1244  * Initialize LPU local memory (aka secondary memory / SRAM)
1245  */
1246 static void
bfa_ioc_lmem_init(struct bfa_ioc_s * ioc)1247 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1248 {
1249 	u32	pss_ctl;
1250 	int		i;
1251 #define PSS_LMEM_INIT_TIME  10000
1252 
1253 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1254 	pss_ctl &= ~__PSS_LMEM_RESET;
1255 	pss_ctl |= __PSS_LMEM_INIT_EN;
1256 
1257 	/*
1258 	 * i2c workaround 12.5khz clock
1259 	 */
1260 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1261 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1262 
1263 	/*
1264 	 * wait for memory initialization to be complete
1265 	 */
1266 	i = 0;
1267 	do {
1268 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1269 		i++;
1270 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1271 
1272 	/*
1273 	 * If memory initialization is not successful, IOC timeout will catch
1274 	 * such failures.
1275 	 */
1276 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1277 	bfa_trc(ioc, pss_ctl);
1278 
1279 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1280 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1281 }
1282 
1283 static void
bfa_ioc_lpu_start(struct bfa_ioc_s * ioc)1284 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1285 {
1286 	u32	pss_ctl;
1287 
1288 	/*
1289 	 * Take processor out of reset.
1290 	 */
1291 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1292 	pss_ctl &= ~__PSS_LPU0_RESET;
1293 
1294 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1295 }
1296 
1297 static void
bfa_ioc_lpu_stop(struct bfa_ioc_s * ioc)1298 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1299 {
1300 	u32	pss_ctl;
1301 
1302 	/*
1303 	 * Put processors in reset.
1304 	 */
1305 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1306 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1307 
1308 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1309 }
1310 
1311 /*
1312  * Get driver and firmware versions.
1313  */
1314 void
bfa_ioc_fwver_get(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * fwhdr)1315 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1316 {
1317 	u32	pgnum, pgoff;
1318 	u32	loff = 0;
1319 	int		i;
1320 	u32	*fwsig = (u32 *) fwhdr;
1321 
1322 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1323 	pgoff = PSS_SMEM_PGOFF(loff);
1324 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1325 
1326 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1327 	     i++) {
1328 		fwsig[i] =
1329 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1330 		loff += sizeof(u32);
1331 	}
1332 }
1333 
1334 /*
1335  * Returns TRUE if same.
1336  */
1337 bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * fwhdr)1338 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1339 {
1340 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1341 	int i;
1342 
1343 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1344 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1345 
1346 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1347 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1348 			bfa_trc(ioc, i);
1349 			bfa_trc(ioc, fwhdr->md5sum[i]);
1350 			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1351 			return BFA_FALSE;
1352 		}
1353 	}
1354 
1355 	bfa_trc(ioc, fwhdr->md5sum[0]);
1356 	return BFA_TRUE;
1357 }
1358 
1359 /*
1360  * Return true if current running version is valid. Firmware signature and
1361  * execution context (driver/bios) must match.
1362  */
1363 static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s * ioc,u32 boot_env)1364 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1365 {
1366 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1367 
1368 	bfa_ioc_fwver_get(ioc, &fwhdr);
1369 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1370 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1371 
1372 	if (fwhdr.signature != drv_fwhdr->signature) {
1373 		bfa_trc(ioc, fwhdr.signature);
1374 		bfa_trc(ioc, drv_fwhdr->signature);
1375 		return BFA_FALSE;
1376 	}
1377 
1378 	if (swab32(fwhdr.param) != boot_env) {
1379 		bfa_trc(ioc, fwhdr.param);
1380 		bfa_trc(ioc, boot_env);
1381 		return BFA_FALSE;
1382 	}
1383 
1384 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1385 }
1386 
1387 /*
1388  * Conditionally flush any pending message from firmware at start.
1389  */
1390 static void
bfa_ioc_msgflush(struct bfa_ioc_s * ioc)1391 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1392 {
1393 	u32	r32;
1394 
1395 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1396 	if (r32)
1397 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1398 }
1399 
1400 static void
bfa_ioc_hwinit(struct bfa_ioc_s * ioc,bfa_boolean_t force)1401 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1402 {
1403 	enum bfi_ioc_state ioc_fwstate;
1404 	bfa_boolean_t fwvalid;
1405 	u32 boot_type;
1406 	u32 boot_env;
1407 
1408 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1409 
1410 	if (force)
1411 		ioc_fwstate = BFI_IOC_UNINIT;
1412 
1413 	bfa_trc(ioc, ioc_fwstate);
1414 
1415 	boot_type = BFI_BOOT_TYPE_NORMAL;
1416 	boot_env = BFI_BOOT_LOADER_OS;
1417 
1418 	/*
1419 	 * check if firmware is valid
1420 	 */
1421 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1422 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1423 
1424 	if (!fwvalid) {
1425 		bfa_ioc_boot(ioc, boot_type, boot_env);
1426 		return;
1427 	}
1428 
1429 	/*
1430 	 * If hardware initialization is in progress (initialized by other IOC),
1431 	 * just wait for an initialization completion interrupt.
1432 	 */
1433 	if (ioc_fwstate == BFI_IOC_INITING) {
1434 		ioc->cbfn->reset_cbfn(ioc->bfa);
1435 		return;
1436 	}
1437 
1438 	/*
1439 	 * If IOC function is disabled and firmware version is same,
1440 	 * just re-enable IOC.
1441 	 *
1442 	 * If option rom, IOC must not be in operational state. With
1443 	 * convergence, IOC will be in operational state when 2nd driver
1444 	 * is loaded.
1445 	 */
1446 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1447 
1448 		/*
1449 		 * When using MSI-X any pending firmware ready event should
1450 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1451 		 */
1452 		bfa_ioc_msgflush(ioc);
1453 		ioc->cbfn->reset_cbfn(ioc->bfa);
1454 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1455 		return;
1456 	}
1457 
1458 	/*
1459 	 * Initialize the h/w for any other states.
1460 	 */
1461 	bfa_ioc_boot(ioc, boot_type, boot_env);
1462 }
1463 
1464 static void
bfa_ioc_timeout(void * ioc_arg)1465 bfa_ioc_timeout(void *ioc_arg)
1466 {
1467 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1468 
1469 	bfa_trc(ioc, 0);
1470 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1471 }
1472 
1473 void
bfa_ioc_mbox_send(struct bfa_ioc_s * ioc,void * ioc_msg,int len)1474 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1475 {
1476 	u32 *msgp = (u32 *) ioc_msg;
1477 	u32 i;
1478 
1479 	bfa_trc(ioc, msgp[0]);
1480 	bfa_trc(ioc, len);
1481 
1482 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1483 
1484 	/*
1485 	 * first write msg to mailbox registers
1486 	 */
1487 	for (i = 0; i < len / sizeof(u32); i++)
1488 		writel(cpu_to_le32(msgp[i]),
1489 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1490 
1491 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1492 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1493 
1494 	/*
1495 	 * write 1 to mailbox CMD to trigger LPU event
1496 	 */
1497 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1498 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1499 }
1500 
1501 static void
bfa_ioc_send_enable(struct bfa_ioc_s * ioc)1502 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1503 {
1504 	struct bfi_ioc_ctrl_req_s enable_req;
1505 	struct timeval tv;
1506 
1507 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1508 		    bfa_ioc_portid(ioc));
1509 	enable_req.ioc_class = ioc->ioc_mc;
1510 	do_gettimeofday(&tv);
1511 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1512 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1513 }
1514 
1515 static void
bfa_ioc_send_disable(struct bfa_ioc_s * ioc)1516 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1517 {
1518 	struct bfi_ioc_ctrl_req_s disable_req;
1519 
1520 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1521 		    bfa_ioc_portid(ioc));
1522 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1523 }
1524 
1525 static void
bfa_ioc_send_getattr(struct bfa_ioc_s * ioc)1526 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1527 {
1528 	struct bfi_ioc_getattr_req_s	attr_req;
1529 
1530 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1531 		    bfa_ioc_portid(ioc));
1532 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1533 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1534 }
1535 
1536 static void
bfa_ioc_hb_check(void * cbarg)1537 bfa_ioc_hb_check(void *cbarg)
1538 {
1539 	struct bfa_ioc_s  *ioc = cbarg;
1540 	u32	hb_count;
1541 
1542 	hb_count = readl(ioc->ioc_regs.heartbeat);
1543 	if (ioc->hb_count == hb_count) {
1544 		bfa_ioc_recover(ioc);
1545 		return;
1546 	} else {
1547 		ioc->hb_count = hb_count;
1548 	}
1549 
1550 	bfa_ioc_mbox_poll(ioc);
1551 	bfa_hb_timer_start(ioc);
1552 }
1553 
1554 static void
bfa_ioc_hb_monitor(struct bfa_ioc_s * ioc)1555 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1556 {
1557 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1558 	bfa_hb_timer_start(ioc);
1559 }
1560 
1561 /*
1562  *	Initiate a full firmware download.
1563  */
1564 static void
bfa_ioc_download_fw(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)1565 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1566 		    u32 boot_env)
1567 {
1568 	u32 *fwimg;
1569 	u32 pgnum, pgoff;
1570 	u32 loff = 0;
1571 	u32 chunkno = 0;
1572 	u32 i;
1573 
1574 	/*
1575 	 * Initialize LMEM first before code download
1576 	 */
1577 	bfa_ioc_lmem_init(ioc);
1578 
1579 	bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1580 	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1581 
1582 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1583 	pgoff = PSS_SMEM_PGOFF(loff);
1584 
1585 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1586 
1587 	for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1588 
1589 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1590 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1591 			fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1592 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1593 		}
1594 
1595 		/*
1596 		 * write smem
1597 		 */
1598 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1599 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1600 
1601 		loff += sizeof(u32);
1602 
1603 		/*
1604 		 * handle page offset wrap around
1605 		 */
1606 		loff = PSS_SMEM_PGOFF(loff);
1607 		if (loff == 0) {
1608 			pgnum++;
1609 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1610 		}
1611 	}
1612 
1613 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1614 			ioc->ioc_regs.host_page_num_fn);
1615 
1616 	/*
1617 	 * Set boot type and boot param at the end.
1618 	*/
1619 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1620 			swab32(boot_type));
1621 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1622 			swab32(boot_env));
1623 }
1624 
1625 
1626 /*
1627  * Update BFA configuration from firmware configuration.
1628  */
1629 static void
bfa_ioc_getattr_reply(struct bfa_ioc_s * ioc)1630 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1631 {
1632 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1633 
1634 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1635 	attr->card_type     = be32_to_cpu(attr->card_type);
1636 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1637 
1638 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1639 }
1640 
1641 /*
1642  * Attach time initialization of mbox logic.
1643  */
1644 static void
bfa_ioc_mbox_attach(struct bfa_ioc_s * ioc)1645 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1646 {
1647 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1648 	int	mc;
1649 
1650 	INIT_LIST_HEAD(&mod->cmd_q);
1651 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1652 		mod->mbhdlr[mc].cbfn = NULL;
1653 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1654 	}
1655 }
1656 
1657 /*
1658  * Mbox poll timer -- restarts any pending mailbox requests.
1659  */
1660 static void
bfa_ioc_mbox_poll(struct bfa_ioc_s * ioc)1661 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1662 {
1663 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1664 	struct bfa_mbox_cmd_s		*cmd;
1665 	u32			stat;
1666 
1667 	/*
1668 	 * If no command pending, do nothing
1669 	 */
1670 	if (list_empty(&mod->cmd_q))
1671 		return;
1672 
1673 	/*
1674 	 * If previous command is not yet fetched by firmware, do nothing
1675 	 */
1676 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1677 	if (stat)
1678 		return;
1679 
1680 	/*
1681 	 * Enqueue command to firmware.
1682 	 */
1683 	bfa_q_deq(&mod->cmd_q, &cmd);
1684 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1685 }
1686 
1687 /*
1688  * Cleanup any pending requests.
1689  */
1690 static void
bfa_ioc_mbox_hbfail(struct bfa_ioc_s * ioc)1691 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1692 {
1693 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1694 	struct bfa_mbox_cmd_s		*cmd;
1695 
1696 	while (!list_empty(&mod->cmd_q))
1697 		bfa_q_deq(&mod->cmd_q, &cmd);
1698 }
1699 
1700 /*
1701  * Read data from SMEM to host through PCI memmap
1702  *
1703  * @param[in]	ioc	memory for IOC
1704  * @param[in]	tbuf	app memory to store data from smem
1705  * @param[in]	soff	smem offset
1706  * @param[in]	sz	size of smem in bytes
1707  */
1708 static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s * ioc,void * tbuf,u32 soff,u32 sz)1709 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1710 {
1711 	u32 pgnum, loff;
1712 	__be32 r32;
1713 	int i, len;
1714 	u32 *buf = tbuf;
1715 
1716 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1717 	loff = PSS_SMEM_PGOFF(soff);
1718 	bfa_trc(ioc, pgnum);
1719 	bfa_trc(ioc, loff);
1720 	bfa_trc(ioc, sz);
1721 
1722 	/*
1723 	 *  Hold semaphore to serialize pll init and fwtrc.
1724 	 */
1725 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1726 		bfa_trc(ioc, 0);
1727 		return BFA_STATUS_FAILED;
1728 	}
1729 
1730 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1731 
1732 	len = sz/sizeof(u32);
1733 	bfa_trc(ioc, len);
1734 	for (i = 0; i < len; i++) {
1735 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1736 		buf[i] = be32_to_cpu(r32);
1737 		loff += sizeof(u32);
1738 
1739 		/*
1740 		 * handle page offset wrap around
1741 		 */
1742 		loff = PSS_SMEM_PGOFF(loff);
1743 		if (loff == 0) {
1744 			pgnum++;
1745 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1746 		}
1747 	}
1748 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1749 			ioc->ioc_regs.host_page_num_fn);
1750 	/*
1751 	 *  release semaphore.
1752 	 */
1753 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1754 
1755 	bfa_trc(ioc, pgnum);
1756 	return BFA_STATUS_OK;
1757 }
1758 
1759 /*
1760  * Clear SMEM data from host through PCI memmap
1761  *
1762  * @param[in]	ioc	memory for IOC
1763  * @param[in]	soff	smem offset
1764  * @param[in]	sz	size of smem in bytes
1765  */
1766 static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s * ioc,u32 soff,u32 sz)1767 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1768 {
1769 	int i, len;
1770 	u32 pgnum, loff;
1771 
1772 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1773 	loff = PSS_SMEM_PGOFF(soff);
1774 	bfa_trc(ioc, pgnum);
1775 	bfa_trc(ioc, loff);
1776 	bfa_trc(ioc, sz);
1777 
1778 	/*
1779 	 *  Hold semaphore to serialize pll init and fwtrc.
1780 	 */
1781 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1782 		bfa_trc(ioc, 0);
1783 		return BFA_STATUS_FAILED;
1784 	}
1785 
1786 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1787 
1788 	len = sz/sizeof(u32); /* len in words */
1789 	bfa_trc(ioc, len);
1790 	for (i = 0; i < len; i++) {
1791 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1792 		loff += sizeof(u32);
1793 
1794 		/*
1795 		 * handle page offset wrap around
1796 		 */
1797 		loff = PSS_SMEM_PGOFF(loff);
1798 		if (loff == 0) {
1799 			pgnum++;
1800 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1801 		}
1802 	}
1803 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1804 			ioc->ioc_regs.host_page_num_fn);
1805 
1806 	/*
1807 	 *  release semaphore.
1808 	 */
1809 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1810 	bfa_trc(ioc, pgnum);
1811 	return BFA_STATUS_OK;
1812 }
1813 
1814 static void
bfa_ioc_fail_notify(struct bfa_ioc_s * ioc)1815 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1816 {
1817 	struct list_head		*qe;
1818 	struct bfa_ioc_hbfail_notify_s	*notify;
1819 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1820 
1821 	/*
1822 	 * Notify driver and common modules registered for notification.
1823 	 */
1824 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
1825 	list_for_each(qe, &ioc->hb_notify_q) {
1826 		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1827 		notify->cbfn(notify->cbarg);
1828 	}
1829 
1830 	bfa_ioc_debug_save_ftrc(ioc);
1831 
1832 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1833 		"Heart Beat of IOC has failed\n");
1834 
1835 }
1836 
1837 static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s * ioc)1838 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1839 {
1840 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1841 	/*
1842 	 * Provide enable completion callback.
1843 	 */
1844 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1845 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1846 		"Running firmware version is incompatible "
1847 		"with the driver version\n");
1848 }
1849 
1850 bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s * ioc)1851 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1852 {
1853 
1854 	/*
1855 	 *  Hold semaphore so that nobody can access the chip during init.
1856 	 */
1857 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1858 
1859 	bfa_ioc_pll_init_asic(ioc);
1860 
1861 	ioc->pllinit = BFA_TRUE;
1862 	/*
1863 	 *  release semaphore.
1864 	 */
1865 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1866 
1867 	return BFA_STATUS_OK;
1868 }
1869 
1870 /*
1871  * Interface used by diag module to do firmware boot with memory test
1872  * as the entry vector.
1873  */
1874 void
bfa_ioc_boot(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)1875 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1876 {
1877 	void __iomem *rb;
1878 
1879 	bfa_ioc_stats(ioc, ioc_boots);
1880 
1881 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1882 		return;
1883 
1884 	/*
1885 	 * Initialize IOC state of all functions on a chip reset.
1886 	 */
1887 	rb = ioc->pcidev.pci_bar_kva;
1888 	if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1889 		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1890 		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1891 	} else {
1892 		writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1893 		writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1894 	}
1895 
1896 	bfa_ioc_msgflush(ioc);
1897 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
1898 
1899 	/*
1900 	 * Enable interrupts just before starting LPU
1901 	 */
1902 	ioc->cbfn->reset_cbfn(ioc->bfa);
1903 	bfa_ioc_lpu_start(ioc);
1904 }
1905 
1906 /*
1907  * Enable/disable IOC failure auto recovery.
1908  */
1909 void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)1910 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1911 {
1912 	bfa_auto_recover = auto_recover;
1913 }
1914 
1915 
1916 
1917 bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s * ioc)1918 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1919 {
1920 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1921 }
1922 
1923 bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s * ioc)1924 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1925 {
1926 	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1927 
1928 	return ((r32 != BFI_IOC_UNINIT) &&
1929 		(r32 != BFI_IOC_INITING) &&
1930 		(r32 != BFI_IOC_MEMTEST));
1931 }
1932 
1933 void
bfa_ioc_msgget(struct bfa_ioc_s * ioc,void * mbmsg)1934 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1935 {
1936 	__be32	*msgp = mbmsg;
1937 	u32	r32;
1938 	int		i;
1939 
1940 	/*
1941 	 * read the MBOX msg
1942 	 */
1943 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1944 	     i++) {
1945 		r32 = readl(ioc->ioc_regs.lpu_mbox +
1946 				   i * sizeof(u32));
1947 		msgp[i] = cpu_to_be32(r32);
1948 	}
1949 
1950 	/*
1951 	 * turn off mailbox interrupt by clearing mailbox status
1952 	 */
1953 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1954 	readl(ioc->ioc_regs.lpu_mbox_cmd);
1955 }
1956 
1957 void
bfa_ioc_isr(struct bfa_ioc_s * ioc,struct bfi_mbmsg_s * m)1958 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1959 {
1960 	union bfi_ioc_i2h_msg_u	*msg;
1961 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1962 
1963 	msg = (union bfi_ioc_i2h_msg_u *) m;
1964 
1965 	bfa_ioc_stats(ioc, ioc_isrs);
1966 
1967 	switch (msg->mh.msg_id) {
1968 	case BFI_IOC_I2H_HBEAT:
1969 		break;
1970 
1971 	case BFI_IOC_I2H_READY_EVENT:
1972 		bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1973 		break;
1974 
1975 	case BFI_IOC_I2H_ENABLE_REPLY:
1976 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1977 		break;
1978 
1979 	case BFI_IOC_I2H_DISABLE_REPLY:
1980 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1981 		break;
1982 
1983 	case BFI_IOC_I2H_GETATTR_REPLY:
1984 		bfa_ioc_getattr_reply(ioc);
1985 		break;
1986 
1987 	default:
1988 		bfa_trc(ioc, msg->mh.msg_id);
1989 		WARN_ON(1);
1990 	}
1991 }
1992 
1993 /*
1994  * IOC attach time initialization and setup.
1995  *
1996  * @param[in]	ioc	memory for IOC
1997  * @param[in]	bfa	driver instance structure
1998  */
1999 void
bfa_ioc_attach(struct bfa_ioc_s * ioc,void * bfa,struct bfa_ioc_cbfn_s * cbfn,struct bfa_timer_mod_s * timer_mod)2000 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2001 	       struct bfa_timer_mod_s *timer_mod)
2002 {
2003 	ioc->bfa	= bfa;
2004 	ioc->cbfn	= cbfn;
2005 	ioc->timer_mod	= timer_mod;
2006 	ioc->fcmode	= BFA_FALSE;
2007 	ioc->pllinit	= BFA_FALSE;
2008 	ioc->dbg_fwsave_once = BFA_TRUE;
2009 	ioc->iocpf.ioc	= ioc;
2010 
2011 	bfa_ioc_mbox_attach(ioc);
2012 	INIT_LIST_HEAD(&ioc->hb_notify_q);
2013 
2014 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2015 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2016 }
2017 
2018 /*
2019  * Driver detach time IOC cleanup.
2020  */
2021 void
bfa_ioc_detach(struct bfa_ioc_s * ioc)2022 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2023 {
2024 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2025 }
2026 
2027 /*
2028  * Setup IOC PCI properties.
2029  *
2030  * @param[in]	pcidev	PCI device information for this IOC
2031  */
2032 void
bfa_ioc_pci_init(struct bfa_ioc_s * ioc,struct bfa_pcidev_s * pcidev,enum bfi_mclass mc)2033 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2034 		 enum bfi_mclass mc)
2035 {
2036 	ioc->ioc_mc	= mc;
2037 	ioc->pcidev	= *pcidev;
2038 	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
2039 	ioc->cna	= ioc->ctdev && !ioc->fcmode;
2040 
2041 	/*
2042 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2043 	 */
2044 	if (ioc->ctdev)
2045 		bfa_ioc_set_ct_hwif(ioc);
2046 	else
2047 		bfa_ioc_set_cb_hwif(ioc);
2048 
2049 	bfa_ioc_map_port(ioc);
2050 	bfa_ioc_reg_init(ioc);
2051 }
2052 
2053 /*
2054  * Initialize IOC dma memory
2055  *
2056  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2057  * @param[in]	dm_pa	physical address of IOC dma memory
2058  */
2059 void
bfa_ioc_mem_claim(struct bfa_ioc_s * ioc,u8 * dm_kva,u64 dm_pa)2060 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2061 {
2062 	/*
2063 	 * dma memory for firmware attribute
2064 	 */
2065 	ioc->attr_dma.kva = dm_kva;
2066 	ioc->attr_dma.pa = dm_pa;
2067 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2068 }
2069 
2070 void
bfa_ioc_enable(struct bfa_ioc_s * ioc)2071 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2072 {
2073 	bfa_ioc_stats(ioc, ioc_enables);
2074 	ioc->dbg_fwsave_once = BFA_TRUE;
2075 
2076 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2077 }
2078 
2079 void
bfa_ioc_disable(struct bfa_ioc_s * ioc)2080 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2081 {
2082 	bfa_ioc_stats(ioc, ioc_disables);
2083 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2084 }
2085 
2086 
2087 /*
2088  * Initialize memory for saving firmware trace. Driver must initialize
2089  * trace memory before call bfa_ioc_enable().
2090  */
2091 void
bfa_ioc_debug_memclaim(struct bfa_ioc_s * ioc,void * dbg_fwsave)2092 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2093 {
2094 	ioc->dbg_fwsave	    = dbg_fwsave;
2095 	ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2096 }
2097 
2098 /*
2099  * Register mailbox message handler functions
2100  *
2101  * @param[in]	ioc		IOC instance
2102  * @param[in]	mcfuncs		message class handler functions
2103  */
2104 void
bfa_ioc_mbox_register(struct bfa_ioc_s * ioc,bfa_ioc_mbox_mcfunc_t * mcfuncs)2105 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2106 {
2107 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2108 	int				mc;
2109 
2110 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2111 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2112 }
2113 
2114 /*
2115  * Register mailbox message handler function, to be called by common modules
2116  */
2117 void
bfa_ioc_mbox_regisr(struct bfa_ioc_s * ioc,enum bfi_mclass mc,bfa_ioc_mbox_mcfunc_t cbfn,void * cbarg)2118 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2119 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2120 {
2121 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2122 
2123 	mod->mbhdlr[mc].cbfn	= cbfn;
2124 	mod->mbhdlr[mc].cbarg	= cbarg;
2125 }
2126 
2127 /*
2128  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2129  * Responsibility of caller to serialize
2130  *
2131  * @param[in]	ioc	IOC instance
2132  * @param[i]	cmd	Mailbox command
2133  */
2134 void
bfa_ioc_mbox_queue(struct bfa_ioc_s * ioc,struct bfa_mbox_cmd_s * cmd)2135 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2136 {
2137 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2138 	u32			stat;
2139 
2140 	/*
2141 	 * If a previous command is pending, queue new command
2142 	 */
2143 	if (!list_empty(&mod->cmd_q)) {
2144 		list_add_tail(&cmd->qe, &mod->cmd_q);
2145 		return;
2146 	}
2147 
2148 	/*
2149 	 * If mailbox is busy, queue command for poll timer
2150 	 */
2151 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2152 	if (stat) {
2153 		list_add_tail(&cmd->qe, &mod->cmd_q);
2154 		return;
2155 	}
2156 
2157 	/*
2158 	 * mailbox is free -- queue command to firmware
2159 	 */
2160 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2161 }
2162 
2163 /*
2164  * Handle mailbox interrupts
2165  */
2166 void
bfa_ioc_mbox_isr(struct bfa_ioc_s * ioc)2167 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2168 {
2169 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2170 	struct bfi_mbmsg_s		m;
2171 	int				mc;
2172 
2173 	bfa_ioc_msgget(ioc, &m);
2174 
2175 	/*
2176 	 * Treat IOC message class as special.
2177 	 */
2178 	mc = m.mh.msg_class;
2179 	if (mc == BFI_MC_IOC) {
2180 		bfa_ioc_isr(ioc, &m);
2181 		return;
2182 	}
2183 
2184 	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2185 		return;
2186 
2187 	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2188 }
2189 
2190 void
bfa_ioc_error_isr(struct bfa_ioc_s * ioc)2191 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2192 {
2193 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2194 }
2195 
2196 void
bfa_ioc_set_fcmode(struct bfa_ioc_s * ioc)2197 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2198 {
2199 	ioc->fcmode  = BFA_TRUE;
2200 	ioc->port_id = bfa_ioc_pcifn(ioc);
2201 }
2202 
2203 /*
2204  * return true if IOC is disabled
2205  */
2206 bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s * ioc)2207 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2208 {
2209 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2210 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2211 }
2212 
2213 /*
2214  * return true if IOC firmware is different.
2215  */
2216 bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s * ioc)2217 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2218 {
2219 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2220 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2221 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2222 }
2223 
2224 #define bfa_ioc_state_disabled(__sm)		\
2225 	(((__sm) == BFI_IOC_UNINIT) ||		\
2226 	 ((__sm) == BFI_IOC_INITING) ||		\
2227 	 ((__sm) == BFI_IOC_HWINIT) ||		\
2228 	 ((__sm) == BFI_IOC_DISABLED) ||	\
2229 	 ((__sm) == BFI_IOC_FAIL) ||		\
2230 	 ((__sm) == BFI_IOC_CFG_DISABLED))
2231 
2232 /*
2233  * Check if adapter is disabled -- both IOCs should be in a disabled
2234  * state.
2235  */
2236 bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s * ioc)2237 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2238 {
2239 	u32	ioc_state;
2240 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
2241 
2242 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2243 		return BFA_FALSE;
2244 
2245 	ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2246 	if (!bfa_ioc_state_disabled(ioc_state))
2247 		return BFA_FALSE;
2248 
2249 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2250 		ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2251 		if (!bfa_ioc_state_disabled(ioc_state))
2252 			return BFA_FALSE;
2253 	}
2254 
2255 	return BFA_TRUE;
2256 }
2257 
2258 /*
2259  * Reset IOC fwstate registers.
2260  */
2261 void
bfa_ioc_reset_fwstate(struct bfa_ioc_s * ioc)2262 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2263 {
2264 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2265 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2266 }
2267 
2268 #define BFA_MFG_NAME "Brocade"
2269 void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s * ioc,struct bfa_adapter_attr_s * ad_attr)2270 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2271 			 struct bfa_adapter_attr_s *ad_attr)
2272 {
2273 	struct bfi_ioc_attr_s	*ioc_attr;
2274 
2275 	ioc_attr = ioc->attr;
2276 
2277 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2278 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2279 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2280 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2281 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2282 		      sizeof(struct bfa_mfg_vpd_s));
2283 
2284 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2285 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2286 
2287 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2288 	/* For now, model descr uses same model string */
2289 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2290 
2291 	ad_attr->card_type = ioc_attr->card_type;
2292 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2293 
2294 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2295 		ad_attr->prototype = 1;
2296 	else
2297 		ad_attr->prototype = 0;
2298 
2299 	ad_attr->pwwn = ioc->attr->pwwn;
2300 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2301 
2302 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2303 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2304 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2305 	ad_attr->asic_rev = ioc_attr->asic_rev;
2306 
2307 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2308 
2309 	ad_attr->cna_capable = ioc->cna;
2310 	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
2311 				!ad_attr->is_mezz;
2312 }
2313 
2314 enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s * ioc)2315 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2316 {
2317 	if (!ioc->ctdev || ioc->fcmode)
2318 		return BFA_IOC_TYPE_FC;
2319 	else if (ioc->ioc_mc == BFI_MC_IOCFC)
2320 		return BFA_IOC_TYPE_FCoE;
2321 	else if (ioc->ioc_mc == BFI_MC_LL)
2322 		return BFA_IOC_TYPE_LL;
2323 	else {
2324 		WARN_ON(ioc->ioc_mc != BFI_MC_LL);
2325 		return BFA_IOC_TYPE_LL;
2326 	}
2327 }
2328 
2329 void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s * ioc,char * serial_num)2330 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2331 {
2332 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2333 	memcpy((void *)serial_num,
2334 			(void *)ioc->attr->brcd_serialnum,
2335 			BFA_ADAPTER_SERIAL_NUM_LEN);
2336 }
2337 
2338 void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s * ioc,char * fw_ver)2339 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2340 {
2341 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2342 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2343 }
2344 
2345 void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s * ioc,char * chip_rev)2346 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2347 {
2348 	WARN_ON(!chip_rev);
2349 
2350 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2351 
2352 	chip_rev[0] = 'R';
2353 	chip_rev[1] = 'e';
2354 	chip_rev[2] = 'v';
2355 	chip_rev[3] = '-';
2356 	chip_rev[4] = ioc->attr->asic_rev;
2357 	chip_rev[5] = '\0';
2358 }
2359 
2360 void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s * ioc,char * optrom_ver)2361 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2362 {
2363 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2364 	memcpy(optrom_ver, ioc->attr->optrom_version,
2365 		      BFA_VERSION_LEN);
2366 }
2367 
2368 void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s * ioc,char * manufacturer)2369 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2370 {
2371 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2372 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2373 }
2374 
2375 void
bfa_ioc_get_adapter_model(struct bfa_ioc_s * ioc,char * model)2376 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2377 {
2378 	struct bfi_ioc_attr_s	*ioc_attr;
2379 
2380 	WARN_ON(!model);
2381 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2382 
2383 	ioc_attr = ioc->attr;
2384 
2385 	/*
2386 	 * model name
2387 	 */
2388 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2389 		BFA_MFG_NAME, ioc_attr->card_type);
2390 }
2391 
2392 enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s * ioc)2393 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2394 {
2395 	enum bfa_iocpf_state iocpf_st;
2396 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2397 
2398 	if (ioc_st == BFA_IOC_ENABLING ||
2399 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2400 
2401 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2402 
2403 		switch (iocpf_st) {
2404 		case BFA_IOCPF_SEMWAIT:
2405 			ioc_st = BFA_IOC_SEMWAIT;
2406 			break;
2407 
2408 		case BFA_IOCPF_HWINIT:
2409 			ioc_st = BFA_IOC_HWINIT;
2410 			break;
2411 
2412 		case BFA_IOCPF_FWMISMATCH:
2413 			ioc_st = BFA_IOC_FWMISMATCH;
2414 			break;
2415 
2416 		case BFA_IOCPF_FAIL:
2417 			ioc_st = BFA_IOC_FAIL;
2418 			break;
2419 
2420 		case BFA_IOCPF_INITFAIL:
2421 			ioc_st = BFA_IOC_INITFAIL;
2422 			break;
2423 
2424 		default:
2425 			break;
2426 		}
2427 	}
2428 
2429 	return ioc_st;
2430 }
2431 
2432 void
bfa_ioc_get_attr(struct bfa_ioc_s * ioc,struct bfa_ioc_attr_s * ioc_attr)2433 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2434 {
2435 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2436 
2437 	ioc_attr->state = bfa_ioc_get_state(ioc);
2438 	ioc_attr->port_id = ioc->port_id;
2439 
2440 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2441 
2442 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2443 
2444 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2445 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2446 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2447 }
2448 
2449 mac_t
bfa_ioc_get_mac(struct bfa_ioc_s * ioc)2450 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2451 {
2452 	/*
2453 	 * Check the IOC type and return the appropriate MAC
2454 	 */
2455 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2456 		return ioc->attr->fcoe_mac;
2457 	else
2458 		return ioc->attr->mac;
2459 }
2460 
2461 mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s * ioc)2462 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2463 {
2464 	mac_t	m;
2465 
2466 	m = ioc->attr->mfg_mac;
2467 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2468 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2469 	else
2470 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2471 			bfa_ioc_pcifn(ioc));
2472 
2473 	return m;
2474 }
2475 
2476 bfa_boolean_t
bfa_ioc_get_fcmode(struct bfa_ioc_s * ioc)2477 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2478 {
2479 	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2480 }
2481 
2482 /*
2483  * Retrieve saved firmware trace from a prior IOC failure.
2484  */
2485 bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2486 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2487 {
2488 	int	tlen;
2489 
2490 	if (ioc->dbg_fwsave_len == 0)
2491 		return BFA_STATUS_ENOFSAVE;
2492 
2493 	tlen = *trclen;
2494 	if (tlen > ioc->dbg_fwsave_len)
2495 		tlen = ioc->dbg_fwsave_len;
2496 
2497 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2498 	*trclen = tlen;
2499 	return BFA_STATUS_OK;
2500 }
2501 
2502 
2503 /*
2504  * Retrieve saved firmware trace from a prior IOC failure.
2505  */
2506 bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2507 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2508 {
2509 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2510 	int tlen;
2511 	bfa_status_t status;
2512 
2513 	bfa_trc(ioc, *trclen);
2514 
2515 	tlen = *trclen;
2516 	if (tlen > BFA_DBG_FWTRC_LEN)
2517 		tlen = BFA_DBG_FWTRC_LEN;
2518 
2519 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2520 	*trclen = tlen;
2521 	return status;
2522 }
2523 
2524 static void
bfa_ioc_send_fwsync(struct bfa_ioc_s * ioc)2525 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2526 {
2527 	struct bfa_mbox_cmd_s cmd;
2528 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2529 
2530 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2531 		    bfa_ioc_portid(ioc));
2532 	req->ioc_class = ioc->ioc_mc;
2533 	bfa_ioc_mbox_queue(ioc, &cmd);
2534 }
2535 
2536 static void
bfa_ioc_fwsync(struct bfa_ioc_s * ioc)2537 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2538 {
2539 	u32 fwsync_iter = 1000;
2540 
2541 	bfa_ioc_send_fwsync(ioc);
2542 
2543 	/*
2544 	 * After sending a fw sync mbox command wait for it to
2545 	 * take effect.  We will not wait for a response because
2546 	 *    1. fw_sync mbox cmd doesn't have a response.
2547 	 *    2. Even if we implement that,  interrupts might not
2548 	 *	 be enabled when we call this function.
2549 	 * So, just keep checking if any mbox cmd is pending, and
2550 	 * after waiting for a reasonable amount of time, go ahead.
2551 	 * It is possible that fw has crashed and the mbox command
2552 	 * is never acknowledged.
2553 	 */
2554 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2555 		fwsync_iter--;
2556 }
2557 
2558 /*
2559  * Dump firmware smem
2560  */
2561 bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s * ioc,void * buf,u32 * offset,int * buflen)2562 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2563 				u32 *offset, int *buflen)
2564 {
2565 	u32 loff;
2566 	int dlen;
2567 	bfa_status_t status;
2568 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2569 
2570 	if (*offset >= smem_len) {
2571 		*offset = *buflen = 0;
2572 		return BFA_STATUS_EINVAL;
2573 	}
2574 
2575 	loff = *offset;
2576 	dlen = *buflen;
2577 
2578 	/*
2579 	 * First smem read, sync smem before proceeding
2580 	 * No need to sync before reading every chunk.
2581 	 */
2582 	if (loff == 0)
2583 		bfa_ioc_fwsync(ioc);
2584 
2585 	if ((loff + dlen) >= smem_len)
2586 		dlen = smem_len - loff;
2587 
2588 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2589 
2590 	if (status != BFA_STATUS_OK) {
2591 		*offset = *buflen = 0;
2592 		return status;
2593 	}
2594 
2595 	*offset += dlen;
2596 
2597 	if (*offset >= smem_len)
2598 		*offset = 0;
2599 
2600 	*buflen = dlen;
2601 
2602 	return status;
2603 }
2604 
2605 /*
2606  * Firmware statistics
2607  */
2608 bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s * ioc,void * stats)2609 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2610 {
2611 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2612 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2613 	int tlen;
2614 	bfa_status_t status;
2615 
2616 	if (ioc->stats_busy) {
2617 		bfa_trc(ioc, ioc->stats_busy);
2618 		return BFA_STATUS_DEVBUSY;
2619 	}
2620 	ioc->stats_busy = BFA_TRUE;
2621 
2622 	tlen = sizeof(struct bfa_fw_stats_s);
2623 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2624 
2625 	ioc->stats_busy = BFA_FALSE;
2626 	return status;
2627 }
2628 
2629 bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s * ioc)2630 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2631 {
2632 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2633 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2634 	int tlen;
2635 	bfa_status_t status;
2636 
2637 	if (ioc->stats_busy) {
2638 		bfa_trc(ioc, ioc->stats_busy);
2639 		return BFA_STATUS_DEVBUSY;
2640 	}
2641 	ioc->stats_busy = BFA_TRUE;
2642 
2643 	tlen = sizeof(struct bfa_fw_stats_s);
2644 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
2645 
2646 	ioc->stats_busy = BFA_FALSE;
2647 	return status;
2648 }
2649 
2650 /*
2651  * Save firmware trace if configured.
2652  */
2653 static void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s * ioc)2654 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2655 {
2656 	int		tlen;
2657 
2658 	if (ioc->dbg_fwsave_once) {
2659 		ioc->dbg_fwsave_once = BFA_FALSE;
2660 		if (ioc->dbg_fwsave_len) {
2661 			tlen = ioc->dbg_fwsave_len;
2662 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2663 		}
2664 	}
2665 }
2666 
2667 /*
2668  * Firmware failure detected. Start recovery actions.
2669  */
2670 static void
bfa_ioc_recover(struct bfa_ioc_s * ioc)2671 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2672 {
2673 	bfa_ioc_stats(ioc, ioc_hbfails);
2674 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2675 }
2676 
2677 static void
bfa_ioc_check_attr_wwns(struct bfa_ioc_s * ioc)2678 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2679 {
2680 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2681 		return;
2682 }
2683 
2684 /*
2685  *  BFA IOC PF private functions
2686  */
2687 static void
bfa_iocpf_timeout(void * ioc_arg)2688 bfa_iocpf_timeout(void *ioc_arg)
2689 {
2690 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2691 
2692 	bfa_trc(ioc, 0);
2693 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2694 }
2695 
2696 static void
bfa_iocpf_sem_timeout(void * ioc_arg)2697 bfa_iocpf_sem_timeout(void *ioc_arg)
2698 {
2699 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2700 
2701 	bfa_ioc_hw_sem_get(ioc);
2702 }
2703 
2704 /*
2705  *  bfa timer function
2706  */
2707 void
bfa_timer_beat(struct bfa_timer_mod_s * mod)2708 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2709 {
2710 	struct list_head *qh = &mod->timer_q;
2711 	struct list_head *qe, *qe_next;
2712 	struct bfa_timer_s *elem;
2713 	struct list_head timedout_q;
2714 
2715 	INIT_LIST_HEAD(&timedout_q);
2716 
2717 	qe = bfa_q_next(qh);
2718 
2719 	while (qe != qh) {
2720 		qe_next = bfa_q_next(qe);
2721 
2722 		elem = (struct bfa_timer_s *) qe;
2723 		if (elem->timeout <= BFA_TIMER_FREQ) {
2724 			elem->timeout = 0;
2725 			list_del(&elem->qe);
2726 			list_add_tail(&elem->qe, &timedout_q);
2727 		} else {
2728 			elem->timeout -= BFA_TIMER_FREQ;
2729 		}
2730 
2731 		qe = qe_next;	/* go to next elem */
2732 	}
2733 
2734 	/*
2735 	 * Pop all the timeout entries
2736 	 */
2737 	while (!list_empty(&timedout_q)) {
2738 		bfa_q_deq(&timedout_q, &elem);
2739 		elem->timercb(elem->arg);
2740 	}
2741 }
2742 
2743 /*
2744  * Should be called with lock protection
2745  */
2746 void
bfa_timer_begin(struct bfa_timer_mod_s * mod,struct bfa_timer_s * timer,void (* timercb)(void *),void * arg,unsigned int timeout)2747 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2748 		    void (*timercb) (void *), void *arg, unsigned int timeout)
2749 {
2750 
2751 	WARN_ON(timercb == NULL);
2752 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2753 
2754 	timer->timeout = timeout;
2755 	timer->timercb = timercb;
2756 	timer->arg = arg;
2757 
2758 	list_add_tail(&timer->qe, &mod->timer_q);
2759 }
2760 
2761 /*
2762  * Should be called with lock protection
2763  */
2764 void
bfa_timer_stop(struct bfa_timer_s * timer)2765 bfa_timer_stop(struct bfa_timer_s *timer)
2766 {
2767 	WARN_ON(list_empty(&timer->qe));
2768 
2769 	list_del(&timer->qe);
2770 }
2771