1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfa_plog.h"
20 #include "bfa_cs.h"
21 #include "bfa_modules.h"
22 
23 BFA_TRC_FILE(HAL, FCXP);
24 BFA_MODULE(fcxp);
25 BFA_MODULE(sgpg);
26 BFA_MODULE(lps);
27 BFA_MODULE(fcport);
28 BFA_MODULE(rport);
29 BFA_MODULE(uf);
30 
31 /*
32  * LPS related definitions
33  */
34 #define BFA_LPS_MIN_LPORTS      (1)
35 #define BFA_LPS_MAX_LPORTS      (256)
36 
37 /*
38  * Maximum Vports supported per physical port or vf.
39  */
40 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
41 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
42 
43 
44 /*
45  * FC PORT related definitions
46  */
47 /*
48  * The port is considered disabled if corresponding physical port or IOC are
49  * disabled explicitly
50  */
51 #define BFA_PORT_IS_DISABLED(bfa) \
52 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
54 
55 /*
56  * BFA port state machine events
57  */
58 enum bfa_fcport_sm_event {
59 	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
60 	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
61 	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
62 	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
63 	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
64 	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
65 	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
66 	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
67 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
68 };
69 
70 /*
71  * BFA port link notification state machine events
72  */
73 
74 enum bfa_fcport_ln_sm_event {
75 	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
76 	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
77 	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
78 };
79 
80 /*
81  * RPORT related definitions
82  */
83 #define bfa_rport_offline_cb(__rp) do {					\
84 	if ((__rp)->bfa->fcs)						\
85 		bfa_cb_rport_offline((__rp)->rport_drv);      \
86 	else {								\
87 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
88 				__bfa_cb_rport_offline, (__rp));      \
89 	}								\
90 } while (0)
91 
92 #define bfa_rport_online_cb(__rp) do {					\
93 	if ((__rp)->bfa->fcs)						\
94 		bfa_cb_rport_online((__rp)->rport_drv);      \
95 	else {								\
96 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
97 				  __bfa_cb_rport_online, (__rp));      \
98 		}							\
99 } while (0)
100 
101 /*
102  * forward declarations FCXP related functions
103  */
104 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
105 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
106 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
107 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
108 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
109 static void	bfa_fcxp_qresume(void *cbarg);
110 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
111 				struct bfi_fcxp_send_req_s *send_req);
112 
113 /*
114  * forward declarations for LPS functions
115  */
116 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
117 				u32 *dm_len);
118 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
119 				struct bfa_iocfc_cfg_s *cfg,
120 				struct bfa_meminfo_s *meminfo,
121 				struct bfa_pcidev_s *pcidev);
122 static void bfa_lps_detach(struct bfa_s *bfa);
123 static void bfa_lps_start(struct bfa_s *bfa);
124 static void bfa_lps_stop(struct bfa_s *bfa);
125 static void bfa_lps_iocdisable(struct bfa_s *bfa);
126 static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 				struct bfi_lps_login_rsp_s *rsp);
128 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
129 				struct bfi_lps_logout_rsp_s *rsp);
130 static void bfa_lps_reqq_resume(void *lps_arg);
131 static void bfa_lps_free(struct bfa_lps_s *lps);
132 static void bfa_lps_send_login(struct bfa_lps_s *lps);
133 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
134 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
135 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
136 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
137 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
138 
139 /*
140  * forward declaration for LPS state machine
141  */
142 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
143 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
144 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
145 					event);
146 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
147 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
148 					enum bfa_lps_event event);
149 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
150 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
151 					event);
152 
153 /*
154  * forward declaration for FC Port functions
155  */
156 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
157 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
158 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
159 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
161 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
162 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
163 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
164 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
165 				enum bfa_port_linkstate event);
166 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
167 static void bfa_fcport_stats_get_timeout(void *cbarg);
168 static void bfa_fcport_stats_clr_timeout(void *cbarg);
169 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
170 
171 /*
172  * forward declaration for FC PORT state machine
173  */
174 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
175 					enum bfa_fcport_sm_event event);
176 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
177 					enum bfa_fcport_sm_event event);
178 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
179 					enum bfa_fcport_sm_event event);
180 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
181 					enum bfa_fcport_sm_event event);
182 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
183 					enum bfa_fcport_sm_event event);
184 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
185 					enum bfa_fcport_sm_event event);
186 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
187 					enum bfa_fcport_sm_event event);
188 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
189 					enum bfa_fcport_sm_event event);
190 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
191 					enum bfa_fcport_sm_event event);
192 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
193 					enum bfa_fcport_sm_event event);
194 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
195 					enum bfa_fcport_sm_event event);
196 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
197 					enum bfa_fcport_sm_event event);
198 
199 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
200 					enum bfa_fcport_ln_sm_event event);
201 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
202 					enum bfa_fcport_ln_sm_event event);
203 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
204 					enum bfa_fcport_ln_sm_event event);
205 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
206 					enum bfa_fcport_ln_sm_event event);
207 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
208 					enum bfa_fcport_ln_sm_event event);
209 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
210 					enum bfa_fcport_ln_sm_event event);
211 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
212 					enum bfa_fcport_ln_sm_event event);
213 
214 static struct bfa_sm_table_s hal_port_sm_table[] = {
215 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
216 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
217 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
218 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
219 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
220 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
221 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
222 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
223 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
224 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
225 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
226 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
227 };
228 
229 
230 /*
231  * forward declaration for RPORT related functions
232  */
233 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
234 static void		bfa_rport_free(struct bfa_rport_s *rport);
235 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
236 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
237 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
238 static void		__bfa_cb_rport_online(void *cbarg,
239 						bfa_boolean_t complete);
240 static void		__bfa_cb_rport_offline(void *cbarg,
241 						bfa_boolean_t complete);
242 
243 /*
244  * forward declaration for RPORT state machine
245  */
246 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
247 					enum bfa_rport_event event);
248 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
249 					enum bfa_rport_event event);
250 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
251 					enum bfa_rport_event event);
252 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
253 					enum bfa_rport_event event);
254 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
255 					enum bfa_rport_event event);
256 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
257 					enum bfa_rport_event event);
258 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
259 					enum bfa_rport_event event);
260 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
261 					enum bfa_rport_event event);
262 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
263 					enum bfa_rport_event event);
264 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
265 					enum bfa_rport_event event);
266 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
267 					enum bfa_rport_event event);
268 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
269 					enum bfa_rport_event event);
270 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
271 					enum bfa_rport_event event);
272 
273 /*
274  * PLOG related definitions
275  */
276 static int
plkd_validate_logrec(struct bfa_plog_rec_s * pl_rec)277 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
278 {
279 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
280 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
281 		return 1;
282 
283 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
284 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
285 		return 1;
286 
287 	return 0;
288 }
289 
290 static u64
bfa_get_log_time(void)291 bfa_get_log_time(void)
292 {
293 	u64 system_time = 0;
294 	struct timeval tv;
295 	do_gettimeofday(&tv);
296 
297 	/* We are interested in seconds only. */
298 	system_time = tv.tv_sec;
299 	return system_time;
300 }
301 
302 static void
bfa_plog_add(struct bfa_plog_s * plog,struct bfa_plog_rec_s * pl_rec)303 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
304 {
305 	u16 tail;
306 	struct bfa_plog_rec_s *pl_recp;
307 
308 	if (plog->plog_enabled == 0)
309 		return;
310 
311 	if (plkd_validate_logrec(pl_rec)) {
312 		WARN_ON(1);
313 		return;
314 	}
315 
316 	tail = plog->tail;
317 
318 	pl_recp = &(plog->plog_recs[tail]);
319 
320 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
321 
322 	pl_recp->tv = bfa_get_log_time();
323 	BFA_PL_LOG_REC_INCR(plog->tail);
324 
325 	if (plog->head == plog->tail)
326 		BFA_PL_LOG_REC_INCR(plog->head);
327 }
328 
329 void
bfa_plog_init(struct bfa_plog_s * plog)330 bfa_plog_init(struct bfa_plog_s *plog)
331 {
332 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
333 
334 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
335 	plog->head = plog->tail = 0;
336 	plog->plog_enabled = 1;
337 }
338 
339 void
bfa_plog_str(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,char * log_str)340 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
341 		enum bfa_plog_eid event,
342 		u16 misc, char *log_str)
343 {
344 	struct bfa_plog_rec_s  lp;
345 
346 	if (plog->plog_enabled) {
347 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
348 		lp.mid = mid;
349 		lp.eid = event;
350 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
351 		lp.misc = misc;
352 		strncpy(lp.log_entry.string_log, log_str,
353 			BFA_PL_STRING_LOG_SZ - 1);
354 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
355 		bfa_plog_add(plog, &lp);
356 	}
357 }
358 
359 void
bfa_plog_intarr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,u32 * intarr,u32 num_ints)360 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
361 		enum bfa_plog_eid event,
362 		u16 misc, u32 *intarr, u32 num_ints)
363 {
364 	struct bfa_plog_rec_s  lp;
365 	u32 i;
366 
367 	if (num_ints > BFA_PL_INT_LOG_SZ)
368 		num_ints = BFA_PL_INT_LOG_SZ;
369 
370 	if (plog->plog_enabled) {
371 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
372 		lp.mid = mid;
373 		lp.eid = event;
374 		lp.log_type = BFA_PL_LOG_TYPE_INT;
375 		lp.misc = misc;
376 
377 		for (i = 0; i < num_ints; i++)
378 			lp.log_entry.int_log[i] = intarr[i];
379 
380 		lp.log_num_ints = (u8) num_ints;
381 
382 		bfa_plog_add(plog, &lp);
383 	}
384 }
385 
386 void
bfa_plog_fchdr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr)387 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
388 			enum bfa_plog_eid event,
389 			u16 misc, struct fchs_s *fchdr)
390 {
391 	struct bfa_plog_rec_s  lp;
392 	u32	*tmp_int = (u32 *) fchdr;
393 	u32	ints[BFA_PL_INT_LOG_SZ];
394 
395 	if (plog->plog_enabled) {
396 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
397 
398 		ints[0] = tmp_int[0];
399 		ints[1] = tmp_int[1];
400 		ints[2] = tmp_int[4];
401 
402 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
403 	}
404 }
405 
406 void
bfa_plog_fchdr_and_pl(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr,u32 pld_w0)407 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
408 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
409 		      u32 pld_w0)
410 {
411 	struct bfa_plog_rec_s  lp;
412 	u32	*tmp_int = (u32 *) fchdr;
413 	u32	ints[BFA_PL_INT_LOG_SZ];
414 
415 	if (plog->plog_enabled) {
416 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
417 
418 		ints[0] = tmp_int[0];
419 		ints[1] = tmp_int[1];
420 		ints[2] = tmp_int[4];
421 		ints[3] = pld_w0;
422 
423 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
424 	}
425 }
426 
427 
428 /*
429  *  fcxp_pvt BFA FCXP private functions
430  */
431 
432 static void
claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s * mod,struct bfa_meminfo_s * mi)433 claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
434 {
435 	u8	       *dm_kva = NULL;
436 	u64	dm_pa;
437 	u32	buf_pool_sz;
438 
439 	dm_kva = bfa_meminfo_dma_virt(mi);
440 	dm_pa = bfa_meminfo_dma_phys(mi);
441 
442 	buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
443 
444 	/*
445 	 * Initialize the fcxp req payload list
446 	 */
447 	mod->req_pld_list_kva = dm_kva;
448 	mod->req_pld_list_pa = dm_pa;
449 	dm_kva += buf_pool_sz;
450 	dm_pa += buf_pool_sz;
451 	memset(mod->req_pld_list_kva, 0, buf_pool_sz);
452 
453 	/*
454 	 * Initialize the fcxp rsp payload list
455 	 */
456 	buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
457 	mod->rsp_pld_list_kva = dm_kva;
458 	mod->rsp_pld_list_pa = dm_pa;
459 	dm_kva += buf_pool_sz;
460 	dm_pa += buf_pool_sz;
461 	memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
462 
463 	bfa_meminfo_dma_virt(mi) = dm_kva;
464 	bfa_meminfo_dma_phys(mi) = dm_pa;
465 }
466 
467 static void
claim_fcxps_mem(struct bfa_fcxp_mod_s * mod,struct bfa_meminfo_s * mi)468 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
469 {
470 	u16	i;
471 	struct bfa_fcxp_s *fcxp;
472 
473 	fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
474 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
475 
476 	INIT_LIST_HEAD(&mod->fcxp_free_q);
477 	INIT_LIST_HEAD(&mod->fcxp_active_q);
478 
479 	mod->fcxp_list = fcxp;
480 
481 	for (i = 0; i < mod->num_fcxps; i++) {
482 		fcxp->fcxp_mod = mod;
483 		fcxp->fcxp_tag = i;
484 
485 		list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
486 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
487 		fcxp->reqq_waiting = BFA_FALSE;
488 
489 		fcxp = fcxp + 1;
490 	}
491 
492 	bfa_meminfo_kva(mi) = (void *)fcxp;
493 }
494 
495 static void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * ndm_len,u32 * dm_len)496 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
497 		 u32 *dm_len)
498 {
499 	u16	num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
500 
501 	if (num_fcxp_reqs == 0)
502 		return;
503 
504 	/*
505 	 * Account for req/rsp payload
506 	 */
507 	*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
508 	if (cfg->drvcfg.min_cfg)
509 		*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
510 	else
511 		*dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
512 
513 	/*
514 	 * Account for fcxp structs
515 	 */
516 	*ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
517 }
518 
519 static void
bfa_fcxp_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)520 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
521 		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
522 {
523 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
524 
525 	memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
526 	mod->bfa = bfa;
527 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
528 
529 	/*
530 	 * Initialize FCXP request and response payload sizes.
531 	 */
532 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
533 	if (!cfg->drvcfg.min_cfg)
534 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
535 
536 	INIT_LIST_HEAD(&mod->wait_q);
537 
538 	claim_fcxp_req_rsp_mem(mod, meminfo);
539 	claim_fcxps_mem(mod, meminfo);
540 }
541 
542 static void
bfa_fcxp_detach(struct bfa_s * bfa)543 bfa_fcxp_detach(struct bfa_s *bfa)
544 {
545 }
546 
547 static void
bfa_fcxp_start(struct bfa_s * bfa)548 bfa_fcxp_start(struct bfa_s *bfa)
549 {
550 }
551 
552 static void
bfa_fcxp_stop(struct bfa_s * bfa)553 bfa_fcxp_stop(struct bfa_s *bfa)
554 {
555 }
556 
557 static void
bfa_fcxp_iocdisable(struct bfa_s * bfa)558 bfa_fcxp_iocdisable(struct bfa_s *bfa)
559 {
560 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
561 	struct bfa_fcxp_s *fcxp;
562 	struct list_head	      *qe, *qen;
563 
564 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
565 		fcxp = (struct bfa_fcxp_s *) qe;
566 		if (fcxp->caller == NULL) {
567 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
568 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
569 			bfa_fcxp_free(fcxp);
570 		} else {
571 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
572 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
573 				     __bfa_fcxp_send_cbfn, fcxp);
574 		}
575 	}
576 }
577 
578 static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s * fm)579 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
580 {
581 	struct bfa_fcxp_s *fcxp;
582 
583 	bfa_q_deq(&fm->fcxp_free_q, &fcxp);
584 
585 	if (fcxp)
586 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
587 
588 	return fcxp;
589 }
590 
591 static void
bfa_fcxp_init_reqrsp(struct bfa_fcxp_s * fcxp,struct bfa_s * bfa,u8 * use_ibuf,u32 * nr_sgles,bfa_fcxp_get_sgaddr_t * r_sga_cbfn,bfa_fcxp_get_sglen_t * r_sglen_cbfn,struct list_head * r_sgpg_q,int n_sgles,bfa_fcxp_get_sgaddr_t sga_cbfn,bfa_fcxp_get_sglen_t sglen_cbfn)592 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
593 	       struct bfa_s *bfa,
594 	       u8 *use_ibuf,
595 	       u32 *nr_sgles,
596 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
597 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
598 	       struct list_head *r_sgpg_q,
599 	       int n_sgles,
600 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
601 	       bfa_fcxp_get_sglen_t sglen_cbfn)
602 {
603 
604 	WARN_ON(bfa == NULL);
605 
606 	bfa_trc(bfa, fcxp->fcxp_tag);
607 
608 	if (n_sgles == 0) {
609 		*use_ibuf = 1;
610 	} else {
611 		WARN_ON(*sga_cbfn == NULL);
612 		WARN_ON(*sglen_cbfn == NULL);
613 
614 		*use_ibuf = 0;
615 		*r_sga_cbfn = sga_cbfn;
616 		*r_sglen_cbfn = sglen_cbfn;
617 
618 		*nr_sgles = n_sgles;
619 
620 		/*
621 		 * alloc required sgpgs
622 		 */
623 		if (n_sgles > BFI_SGE_INLINE)
624 			WARN_ON(1);
625 	}
626 
627 }
628 
629 static void
bfa_fcxp_init(struct bfa_fcxp_s * fcxp,void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)630 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
631 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
632 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
633 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
634 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
635 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
636 {
637 
638 	WARN_ON(bfa == NULL);
639 
640 	bfa_trc(bfa, fcxp->fcxp_tag);
641 
642 	fcxp->caller = caller;
643 
644 	bfa_fcxp_init_reqrsp(fcxp, bfa,
645 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
646 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
647 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
648 
649 	bfa_fcxp_init_reqrsp(fcxp, bfa,
650 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
651 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
652 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
653 
654 }
655 
656 static void
bfa_fcxp_put(struct bfa_fcxp_s * fcxp)657 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
658 {
659 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
660 	struct bfa_fcxp_wqe_s *wqe;
661 
662 	bfa_q_deq(&mod->wait_q, &wqe);
663 	if (wqe) {
664 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
665 
666 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
667 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
668 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
669 			wqe->rsp_sglen_cbfn);
670 
671 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
672 		return;
673 	}
674 
675 	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
676 	list_del(&fcxp->qe);
677 	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
678 }
679 
680 static void
bfa_fcxp_null_comp(void * bfad_fcxp,struct bfa_fcxp_s * fcxp,void * cbarg,bfa_status_t req_status,u32 rsp_len,u32 resid_len,struct fchs_s * rsp_fchs)681 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
682 		   bfa_status_t req_status, u32 rsp_len,
683 		   u32 resid_len, struct fchs_s *rsp_fchs)
684 {
685 	/* discarded fcxp completion */
686 }
687 
688 static void
__bfa_fcxp_send_cbfn(void * cbarg,bfa_boolean_t complete)689 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
690 {
691 	struct bfa_fcxp_s *fcxp = cbarg;
692 
693 	if (complete) {
694 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
695 				fcxp->rsp_status, fcxp->rsp_len,
696 				fcxp->residue_len, &fcxp->rsp_fchs);
697 	} else {
698 		bfa_fcxp_free(fcxp);
699 	}
700 }
701 
702 static void
hal_fcxp_send_comp(struct bfa_s * bfa,struct bfi_fcxp_send_rsp_s * fcxp_rsp)703 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
704 {
705 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
706 	struct bfa_fcxp_s	*fcxp;
707 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
708 
709 	bfa_trc(bfa, fcxp_tag);
710 
711 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
712 
713 	/*
714 	 * @todo f/w should not set residue to non-0 when everything
715 	 *	 is received.
716 	 */
717 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
718 		fcxp_rsp->residue_len = 0;
719 	else
720 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
721 
722 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
723 
724 	WARN_ON(fcxp->send_cbfn == NULL);
725 
726 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
727 
728 	if (fcxp->send_cbfn != NULL) {
729 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
730 		if (fcxp->caller == NULL) {
731 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
732 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
733 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
734 			/*
735 			 * fcxp automatically freed on return from the callback
736 			 */
737 			bfa_fcxp_free(fcxp);
738 		} else {
739 			fcxp->rsp_status = fcxp_rsp->req_status;
740 			fcxp->rsp_len = fcxp_rsp->rsp_len;
741 			fcxp->residue_len = fcxp_rsp->residue_len;
742 			fcxp->rsp_fchs = fcxp_rsp->fchs;
743 
744 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
745 					__bfa_fcxp_send_cbfn, fcxp);
746 		}
747 	} else {
748 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
749 	}
750 }
751 
752 static void
hal_fcxp_set_local_sges(struct bfi_sge_s * sge,u32 reqlen,u64 req_pa)753 hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
754 {
755 	union bfi_addr_u      sga_zero = { {0} };
756 
757 	sge->sg_len = reqlen;
758 	sge->flags = BFI_SGE_DATA_LAST;
759 	bfa_dma_addr_set(sge[0].sga, req_pa);
760 	bfa_sge_to_be(sge);
761 	sge++;
762 
763 	sge->sga = sga_zero;
764 	sge->sg_len = reqlen;
765 	sge->flags = BFI_SGE_PGDLEN;
766 	bfa_sge_to_be(sge);
767 }
768 
769 static void
hal_fcxp_tx_plog(struct bfa_s * bfa,u32 reqlen,struct bfa_fcxp_s * fcxp,struct fchs_s * fchs)770 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
771 		 struct fchs_s *fchs)
772 {
773 	/*
774 	 * TODO: TX ox_id
775 	 */
776 	if (reqlen > 0) {
777 		if (fcxp->use_ireqbuf) {
778 			u32	pld_w0 =
779 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
780 
781 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
782 					BFA_PL_EID_TX,
783 					reqlen + sizeof(struct fchs_s), fchs,
784 					pld_w0);
785 		} else {
786 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
787 					BFA_PL_EID_TX,
788 					reqlen + sizeof(struct fchs_s),
789 					fchs);
790 		}
791 	} else {
792 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
793 			       reqlen + sizeof(struct fchs_s), fchs);
794 	}
795 }
796 
797 static void
hal_fcxp_rx_plog(struct bfa_s * bfa,struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_rsp_s * fcxp_rsp)798 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
799 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
800 {
801 	if (fcxp_rsp->rsp_len > 0) {
802 		if (fcxp->use_irspbuf) {
803 			u32	pld_w0 =
804 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
805 
806 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
807 					      BFA_PL_EID_RX,
808 					      (u16) fcxp_rsp->rsp_len,
809 					      &fcxp_rsp->fchs, pld_w0);
810 		} else {
811 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
812 				       BFA_PL_EID_RX,
813 				       (u16) fcxp_rsp->rsp_len,
814 				       &fcxp_rsp->fchs);
815 		}
816 	} else {
817 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
818 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
819 	}
820 }
821 
822 /*
823  * Handler to resume sending fcxp when space in available in cpe queue.
824  */
825 static void
bfa_fcxp_qresume(void * cbarg)826 bfa_fcxp_qresume(void *cbarg)
827 {
828 	struct bfa_fcxp_s		*fcxp = cbarg;
829 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
830 	struct bfi_fcxp_send_req_s	*send_req;
831 
832 	fcxp->reqq_waiting = BFA_FALSE;
833 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
834 	bfa_fcxp_queue(fcxp, send_req);
835 }
836 
837 /*
838  * Queue fcxp send request to foimrware.
839  */
840 static void
bfa_fcxp_queue(struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_req_s * send_req)841 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
842 {
843 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
844 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
845 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
846 	struct bfa_rport_s		*rport = reqi->bfa_rport;
847 
848 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
849 		    bfa_lpuid(bfa));
850 
851 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
852 	if (rport) {
853 		send_req->rport_fw_hndl = rport->fw_handle;
854 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
855 		if (send_req->max_frmsz == 0)
856 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
857 	} else {
858 		send_req->rport_fw_hndl = 0;
859 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
860 	}
861 
862 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
863 	send_req->lp_tag = reqi->lp_tag;
864 	send_req->class = reqi->class;
865 	send_req->rsp_timeout = rspi->rsp_timeout;
866 	send_req->cts = reqi->cts;
867 	send_req->fchs = reqi->fchs;
868 
869 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
870 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
871 
872 	/*
873 	 * setup req sgles
874 	 */
875 	if (fcxp->use_ireqbuf == 1) {
876 		hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
877 					BFA_FCXP_REQ_PLD_PA(fcxp));
878 	} else {
879 		if (fcxp->nreq_sgles > 0) {
880 			WARN_ON(fcxp->nreq_sgles != 1);
881 			hal_fcxp_set_local_sges(send_req->req_sge,
882 						reqi->req_tot_len,
883 						fcxp->req_sga_cbfn(fcxp->caller,
884 								   0));
885 		} else {
886 			WARN_ON(reqi->req_tot_len != 0);
887 			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
888 		}
889 	}
890 
891 	/*
892 	 * setup rsp sgles
893 	 */
894 	if (fcxp->use_irspbuf == 1) {
895 		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
896 
897 		hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
898 					BFA_FCXP_RSP_PLD_PA(fcxp));
899 
900 	} else {
901 		if (fcxp->nrsp_sgles > 0) {
902 			WARN_ON(fcxp->nrsp_sgles != 1);
903 			hal_fcxp_set_local_sges(send_req->rsp_sge,
904 						rspi->rsp_maxlen,
905 						fcxp->rsp_sga_cbfn(fcxp->caller,
906 								   0));
907 		} else {
908 			WARN_ON(rspi->rsp_maxlen != 0);
909 			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
910 		}
911 	}
912 
913 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
914 
915 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
916 
917 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
918 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
919 }
920 
921 /*
922  * Allocate an FCXP instance to send a response or to send a request
923  * that has a response. Request/response buffers are allocated by caller.
924  *
925  * @param[in]	bfa		BFA bfa instance
926  * @param[in]	nreq_sgles	Number of SG elements required for request
927  *				buffer. 0, if fcxp internal buffers are	used.
928  *				Use bfa_fcxp_get_reqbuf() to get the
929  *				internal req buffer.
930  * @param[in]	req_sgles	SG elements describing request buffer. Will be
931  *				copied in by BFA and hence can be freed on
932  *				return from this function.
933  * @param[in]	get_req_sga	function ptr to be called to get a request SG
934  *				Address (given the sge index).
935  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
936  *				len (given the sge index).
937  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
938  *				Address (given the sge index).
939  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
940  *				len (given the sge index).
941  *
942  * @return FCXP instance. NULL on failure.
943  */
944 struct bfa_fcxp_s *
bfa_fcxp_alloc(void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)945 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
946 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
947 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
948 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
949 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
950 {
951 	struct bfa_fcxp_s *fcxp = NULL;
952 
953 	WARN_ON(bfa == NULL);
954 
955 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
956 	if (fcxp == NULL)
957 		return NULL;
958 
959 	bfa_trc(bfa, fcxp->fcxp_tag);
960 
961 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
962 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
963 
964 	return fcxp;
965 }
966 
967 /*
968  * Get the internal request buffer pointer
969  *
970  * @param[in]	fcxp	BFA fcxp pointer
971  *
972  * @return		pointer to the internal request buffer
973  */
974 void *
bfa_fcxp_get_reqbuf(struct bfa_fcxp_s * fcxp)975 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
976 {
977 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
978 	void	*reqbuf;
979 
980 	WARN_ON(fcxp->use_ireqbuf != 1);
981 	reqbuf = ((u8 *)mod->req_pld_list_kva) +
982 		fcxp->fcxp_tag * mod->req_pld_sz;
983 	return reqbuf;
984 }
985 
986 u32
bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s * fcxp)987 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
988 {
989 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
990 
991 	return mod->req_pld_sz;
992 }
993 
994 /*
995  * Get the internal response buffer pointer
996  *
997  * @param[in]	fcxp	BFA fcxp pointer
998  *
999  * @return		pointer to the internal request buffer
1000  */
1001 void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s * fcxp)1002 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1003 {
1004 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1005 	void	*rspbuf;
1006 
1007 	WARN_ON(fcxp->use_irspbuf != 1);
1008 
1009 	rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1010 		fcxp->fcxp_tag * mod->rsp_pld_sz;
1011 	return rspbuf;
1012 }
1013 
1014 /*
1015  * Free the BFA FCXP
1016  *
1017  * @param[in]	fcxp			BFA fcxp pointer
1018  *
1019  * @return		void
1020  */
1021 void
bfa_fcxp_free(struct bfa_fcxp_s * fcxp)1022 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1023 {
1024 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1025 
1026 	WARN_ON(fcxp == NULL);
1027 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
1028 	bfa_fcxp_put(fcxp);
1029 }
1030 
1031 /*
1032  * Send a FCXP request
1033  *
1034  * @param[in]	fcxp	BFA fcxp pointer
1035  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
1036  * @param[in]	vf_id	virtual Fabric ID
1037  * @param[in]	lp_tag	lport tag
1038  * @param[in]	cts	use Continuous sequence
1039  * @param[in]	cos	fc Class of Service
1040  * @param[in]	reqlen	request length, does not include FCHS length
1041  * @param[in]	fchs	fc Header Pointer. The header content will be copied
1042  *			in by BFA.
1043  *
1044  * @param[in]	cbfn	call back function to be called on receiving
1045  *								the response
1046  * @param[in]	cbarg	arg for cbfn
1047  * @param[in]	rsp_timeout
1048  *			response timeout
1049  *
1050  * @return		bfa_status_t
1051  */
1052 void
bfa_fcxp_send(struct bfa_fcxp_s * fcxp,struct bfa_rport_s * rport,u16 vf_id,u8 lp_tag,bfa_boolean_t cts,enum fc_cos cos,u32 reqlen,struct fchs_s * fchs,bfa_cb_fcxp_send_t cbfn,void * cbarg,u32 rsp_maxlen,u8 rsp_timeout)1053 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1054 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1055 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1056 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1057 {
1058 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
1059 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
1060 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
1061 	struct bfi_fcxp_send_req_s	*send_req;
1062 
1063 	bfa_trc(bfa, fcxp->fcxp_tag);
1064 
1065 	/*
1066 	 * setup request/response info
1067 	 */
1068 	reqi->bfa_rport = rport;
1069 	reqi->vf_id = vf_id;
1070 	reqi->lp_tag = lp_tag;
1071 	reqi->class = cos;
1072 	rspi->rsp_timeout = rsp_timeout;
1073 	reqi->cts = cts;
1074 	reqi->fchs = *fchs;
1075 	reqi->req_tot_len = reqlen;
1076 	rspi->rsp_maxlen = rsp_maxlen;
1077 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1078 	fcxp->send_cbarg = cbarg;
1079 
1080 	/*
1081 	 * If no room in CPE queue, wait for space in request queue
1082 	 */
1083 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1084 	if (!send_req) {
1085 		bfa_trc(bfa, fcxp->fcxp_tag);
1086 		fcxp->reqq_waiting = BFA_TRUE;
1087 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1088 		return;
1089 	}
1090 
1091 	bfa_fcxp_queue(fcxp, send_req);
1092 }
1093 
1094 /*
1095  * Abort a BFA FCXP
1096  *
1097  * @param[in]	fcxp	BFA fcxp pointer
1098  *
1099  * @return		void
1100  */
1101 bfa_status_t
bfa_fcxp_abort(struct bfa_fcxp_s * fcxp)1102 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1103 {
1104 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1105 	WARN_ON(1);
1106 	return BFA_STATUS_OK;
1107 }
1108 
1109 void
bfa_fcxp_alloc_wait(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe,bfa_fcxp_alloc_cbfn_t alloc_cbfn,void * alloc_cbarg,void * caller,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)1110 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1111 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1112 	       void *caller, int nreq_sgles,
1113 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1114 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1115 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1116 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1117 {
1118 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1119 
1120 	WARN_ON(!list_empty(&mod->fcxp_free_q));
1121 
1122 	wqe->alloc_cbfn = alloc_cbfn;
1123 	wqe->alloc_cbarg = alloc_cbarg;
1124 	wqe->caller = caller;
1125 	wqe->bfa = bfa;
1126 	wqe->nreq_sgles = nreq_sgles;
1127 	wqe->nrsp_sgles = nrsp_sgles;
1128 	wqe->req_sga_cbfn = req_sga_cbfn;
1129 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1130 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1131 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1132 
1133 	list_add_tail(&wqe->qe, &mod->wait_q);
1134 }
1135 
1136 void
bfa_fcxp_walloc_cancel(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe)1137 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1138 {
1139 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1140 
1141 	WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1142 	list_del(&wqe->qe);
1143 }
1144 
1145 void
bfa_fcxp_discard(struct bfa_fcxp_s * fcxp)1146 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1147 {
1148 	/*
1149 	 * If waiting for room in request queue, cancel reqq wait
1150 	 * and free fcxp.
1151 	 */
1152 	if (fcxp->reqq_waiting) {
1153 		fcxp->reqq_waiting = BFA_FALSE;
1154 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1155 		bfa_fcxp_free(fcxp);
1156 		return;
1157 	}
1158 
1159 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1160 }
1161 
1162 void
bfa_fcxp_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)1163 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1164 {
1165 	switch (msg->mhdr.msg_id) {
1166 	case BFI_FCXP_I2H_SEND_RSP:
1167 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1168 		break;
1169 
1170 	default:
1171 		bfa_trc(bfa, msg->mhdr.msg_id);
1172 		WARN_ON(1);
1173 	}
1174 }
1175 
1176 u32
bfa_fcxp_get_maxrsp(struct bfa_s * bfa)1177 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1178 {
1179 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1180 
1181 	return mod->rsp_pld_sz;
1182 }
1183 
1184 
1185 /*
1186  *  BFA LPS state machine functions
1187  */
1188 
1189 /*
1190  * Init state -- no login
1191  */
1192 static void
bfa_lps_sm_init(struct bfa_lps_s * lps,enum bfa_lps_event event)1193 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1194 {
1195 	bfa_trc(lps->bfa, lps->lp_tag);
1196 	bfa_trc(lps->bfa, event);
1197 
1198 	switch (event) {
1199 	case BFA_LPS_SM_LOGIN:
1200 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1201 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1202 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1203 		} else {
1204 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1205 			bfa_lps_send_login(lps);
1206 		}
1207 
1208 		if (lps->fdisc)
1209 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1210 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1211 		else
1212 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1213 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1214 		break;
1215 
1216 	case BFA_LPS_SM_LOGOUT:
1217 		bfa_lps_logout_comp(lps);
1218 		break;
1219 
1220 	case BFA_LPS_SM_DELETE:
1221 		bfa_lps_free(lps);
1222 		break;
1223 
1224 	case BFA_LPS_SM_RX_CVL:
1225 	case BFA_LPS_SM_OFFLINE:
1226 		break;
1227 
1228 	case BFA_LPS_SM_FWRSP:
1229 		/*
1230 		 * Could happen when fabric detects loopback and discards
1231 		 * the lps request. Fw will eventually sent out the timeout
1232 		 * Just ignore
1233 		 */
1234 		break;
1235 
1236 	default:
1237 		bfa_sm_fault(lps->bfa, event);
1238 	}
1239 }
1240 
1241 /*
1242  * login is in progress -- awaiting response from firmware
1243  */
1244 static void
bfa_lps_sm_login(struct bfa_lps_s * lps,enum bfa_lps_event event)1245 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1246 {
1247 	bfa_trc(lps->bfa, lps->lp_tag);
1248 	bfa_trc(lps->bfa, event);
1249 
1250 	switch (event) {
1251 	case BFA_LPS_SM_FWRSP:
1252 		if (lps->status == BFA_STATUS_OK) {
1253 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1254 			if (lps->fdisc)
1255 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1257 			else
1258 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1259 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1260 			/* If N2N, send the assigned PID to FW */
1261 			bfa_trc(lps->bfa, lps->fport);
1262 			bfa_trc(lps->bfa, lps->lp_pid);
1263 
1264 			if (!lps->fport && lps->lp_pid)
1265 				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1266 		} else {
1267 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1268 			if (lps->fdisc)
1269 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1270 					BFA_PL_EID_LOGIN, 0,
1271 					"FDISC Fail (RJT or timeout)");
1272 			else
1273 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1274 					BFA_PL_EID_LOGIN, 0,
1275 					"FLOGI Fail (RJT or timeout)");
1276 		}
1277 		bfa_lps_login_comp(lps);
1278 		break;
1279 
1280 	case BFA_LPS_SM_OFFLINE:
1281 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1282 		break;
1283 
1284 	case BFA_LPS_SM_SET_N2N_PID:
1285 		bfa_trc(lps->bfa, lps->fport);
1286 		bfa_trc(lps->bfa, lps->lp_pid);
1287 		break;
1288 
1289 	default:
1290 		bfa_sm_fault(lps->bfa, event);
1291 	}
1292 }
1293 
1294 /*
1295  * login pending - awaiting space in request queue
1296  */
1297 static void
bfa_lps_sm_loginwait(struct bfa_lps_s * lps,enum bfa_lps_event event)1298 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1299 {
1300 	bfa_trc(lps->bfa, lps->lp_tag);
1301 	bfa_trc(lps->bfa, event);
1302 
1303 	switch (event) {
1304 	case BFA_LPS_SM_RESUME:
1305 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1306 		break;
1307 
1308 	case BFA_LPS_SM_OFFLINE:
1309 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1310 		bfa_reqq_wcancel(&lps->wqe);
1311 		break;
1312 
1313 	case BFA_LPS_SM_RX_CVL:
1314 		/*
1315 		 * Login was not even sent out; so when getting out
1316 		 * of this state, it will appear like a login retry
1317 		 * after Clear virtual link
1318 		 */
1319 		break;
1320 
1321 	default:
1322 		bfa_sm_fault(lps->bfa, event);
1323 	}
1324 }
1325 
1326 /*
1327  * login complete
1328  */
1329 static void
bfa_lps_sm_online(struct bfa_lps_s * lps,enum bfa_lps_event event)1330 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1331 {
1332 	bfa_trc(lps->bfa, lps->lp_tag);
1333 	bfa_trc(lps->bfa, event);
1334 
1335 	switch (event) {
1336 	case BFA_LPS_SM_LOGOUT:
1337 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1338 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1339 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1340 		} else {
1341 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1342 			bfa_lps_send_logout(lps);
1343 		}
1344 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1345 			BFA_PL_EID_LOGO, 0, "Logout");
1346 		break;
1347 
1348 	case BFA_LPS_SM_RX_CVL:
1349 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1350 
1351 		/* Let the vport module know about this event */
1352 		bfa_lps_cvl_event(lps);
1353 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1354 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1355 		break;
1356 
1357 	case BFA_LPS_SM_SET_N2N_PID:
1358 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1359 			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1360 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1361 		} else
1362 			bfa_lps_send_set_n2n_pid(lps);
1363 		break;
1364 
1365 	case BFA_LPS_SM_OFFLINE:
1366 	case BFA_LPS_SM_DELETE:
1367 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1368 		break;
1369 
1370 	default:
1371 		bfa_sm_fault(lps->bfa, event);
1372 	}
1373 }
1374 
1375 /*
1376  * login complete
1377  */
1378 static void
bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s * lps,enum bfa_lps_event event)1379 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1380 {
1381 	bfa_trc(lps->bfa, lps->lp_tag);
1382 	bfa_trc(lps->bfa, event);
1383 
1384 	switch (event) {
1385 	case BFA_LPS_SM_RESUME:
1386 		bfa_sm_set_state(lps, bfa_lps_sm_online);
1387 		bfa_lps_send_set_n2n_pid(lps);
1388 		break;
1389 
1390 	case BFA_LPS_SM_LOGOUT:
1391 		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1392 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1393 			BFA_PL_EID_LOGO, 0, "Logout");
1394 		break;
1395 
1396 	case BFA_LPS_SM_RX_CVL:
1397 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1398 		bfa_reqq_wcancel(&lps->wqe);
1399 
1400 		/* Let the vport module know about this event */
1401 		bfa_lps_cvl_event(lps);
1402 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1403 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1404 		break;
1405 
1406 	case BFA_LPS_SM_OFFLINE:
1407 	case BFA_LPS_SM_DELETE:
1408 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1409 		bfa_reqq_wcancel(&lps->wqe);
1410 		break;
1411 
1412 	default:
1413 		bfa_sm_fault(lps->bfa, event);
1414 	}
1415 }
1416 
1417 /*
1418  * logout in progress - awaiting firmware response
1419  */
1420 static void
bfa_lps_sm_logout(struct bfa_lps_s * lps,enum bfa_lps_event event)1421 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1422 {
1423 	bfa_trc(lps->bfa, lps->lp_tag);
1424 	bfa_trc(lps->bfa, event);
1425 
1426 	switch (event) {
1427 	case BFA_LPS_SM_FWRSP:
1428 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1429 		bfa_lps_logout_comp(lps);
1430 		break;
1431 
1432 	case BFA_LPS_SM_OFFLINE:
1433 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1434 		break;
1435 
1436 	default:
1437 		bfa_sm_fault(lps->bfa, event);
1438 	}
1439 }
1440 
1441 /*
1442  * logout pending -- awaiting space in request queue
1443  */
1444 static void
bfa_lps_sm_logowait(struct bfa_lps_s * lps,enum bfa_lps_event event)1445 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1446 {
1447 	bfa_trc(lps->bfa, lps->lp_tag);
1448 	bfa_trc(lps->bfa, event);
1449 
1450 	switch (event) {
1451 	case BFA_LPS_SM_RESUME:
1452 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1453 		bfa_lps_send_logout(lps);
1454 		break;
1455 
1456 	case BFA_LPS_SM_OFFLINE:
1457 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1458 		bfa_reqq_wcancel(&lps->wqe);
1459 		break;
1460 
1461 	default:
1462 		bfa_sm_fault(lps->bfa, event);
1463 	}
1464 }
1465 
1466 
1467 
1468 /*
1469  *  lps_pvt BFA LPS private functions
1470  */
1471 
1472 /*
1473  * return memory requirement
1474  */
1475 static void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * ndm_len,u32 * dm_len)1476 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1477 	u32 *dm_len)
1478 {
1479 	if (cfg->drvcfg.min_cfg)
1480 		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1481 	else
1482 		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1483 }
1484 
1485 /*
1486  * bfa module attach at initialization time
1487  */
1488 static void
bfa_lps_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)1489 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1490 	struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1491 {
1492 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1493 	struct bfa_lps_s	*lps;
1494 	int			i;
1495 
1496 	memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1497 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1498 	if (cfg->drvcfg.min_cfg)
1499 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1500 	else
1501 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1502 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1503 
1504 	bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1505 
1506 	INIT_LIST_HEAD(&mod->lps_free_q);
1507 	INIT_LIST_HEAD(&mod->lps_active_q);
1508 
1509 	for (i = 0; i < mod->num_lps; i++, lps++) {
1510 		lps->bfa	= bfa;
1511 		lps->lp_tag	= (u8) i;
1512 		lps->reqq	= BFA_REQQ_LPS;
1513 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1514 		list_add_tail(&lps->qe, &mod->lps_free_q);
1515 	}
1516 }
1517 
1518 static void
bfa_lps_detach(struct bfa_s * bfa)1519 bfa_lps_detach(struct bfa_s *bfa)
1520 {
1521 }
1522 
1523 static void
bfa_lps_start(struct bfa_s * bfa)1524 bfa_lps_start(struct bfa_s *bfa)
1525 {
1526 }
1527 
1528 static void
bfa_lps_stop(struct bfa_s * bfa)1529 bfa_lps_stop(struct bfa_s *bfa)
1530 {
1531 }
1532 
1533 /*
1534  * IOC in disabled state -- consider all lps offline
1535  */
1536 static void
bfa_lps_iocdisable(struct bfa_s * bfa)1537 bfa_lps_iocdisable(struct bfa_s *bfa)
1538 {
1539 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1540 	struct bfa_lps_s	*lps;
1541 	struct list_head		*qe, *qen;
1542 
1543 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1544 		lps = (struct bfa_lps_s *) qe;
1545 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1546 	}
1547 }
1548 
1549 /*
1550  * Firmware login response
1551  */
1552 static void
bfa_lps_login_rsp(struct bfa_s * bfa,struct bfi_lps_login_rsp_s * rsp)1553 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1554 {
1555 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1556 	struct bfa_lps_s	*lps;
1557 
1558 	WARN_ON(rsp->lp_tag >= mod->num_lps);
1559 	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1560 
1561 	lps->status = rsp->status;
1562 	switch (rsp->status) {
1563 	case BFA_STATUS_OK:
1564 		lps->fport	= rsp->f_port;
1565 		if (lps->fport)
1566 			lps->lp_pid = rsp->lp_pid;
1567 		lps->npiv_en	= rsp->npiv_en;
1568 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1569 		lps->pr_pwwn	= rsp->port_name;
1570 		lps->pr_nwwn	= rsp->node_name;
1571 		lps->auth_req	= rsp->auth_req;
1572 		lps->lp_mac	= rsp->lp_mac;
1573 		lps->brcd_switch = rsp->brcd_switch;
1574 		lps->fcf_mac	= rsp->fcf_mac;
1575 
1576 		break;
1577 
1578 	case BFA_STATUS_FABRIC_RJT:
1579 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1580 		lps->lsrjt_expl = rsp->lsrjt_expl;
1581 
1582 		break;
1583 
1584 	case BFA_STATUS_EPROTOCOL:
1585 		lps->ext_status = rsp->ext_status;
1586 
1587 		break;
1588 
1589 	default:
1590 		/* Nothing to do with other status */
1591 		break;
1592 	}
1593 
1594 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595 }
1596 
1597 /*
1598  * Firmware logout response
1599  */
1600 static void
bfa_lps_logout_rsp(struct bfa_s * bfa,struct bfi_lps_logout_rsp_s * rsp)1601 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1602 {
1603 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1604 	struct bfa_lps_s	*lps;
1605 
1606 	WARN_ON(rsp->lp_tag >= mod->num_lps);
1607 	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1608 
1609 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1610 }
1611 
1612 /*
1613  * Firmware received a Clear virtual link request (for FCoE)
1614  */
1615 static void
bfa_lps_rx_cvl_event(struct bfa_s * bfa,struct bfi_lps_cvl_event_s * cvl)1616 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1617 {
1618 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1619 	struct bfa_lps_s	*lps;
1620 
1621 	lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1622 
1623 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1624 }
1625 
1626 /*
1627  * Space is available in request queue, resume queueing request to firmware.
1628  */
1629 static void
bfa_lps_reqq_resume(void * lps_arg)1630 bfa_lps_reqq_resume(void *lps_arg)
1631 {
1632 	struct bfa_lps_s	*lps = lps_arg;
1633 
1634 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1635 }
1636 
1637 /*
1638  * lps is freed -- triggered by vport delete
1639  */
1640 static void
bfa_lps_free(struct bfa_lps_s * lps)1641 bfa_lps_free(struct bfa_lps_s *lps)
1642 {
1643 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1644 
1645 	lps->lp_pid = 0;
1646 	list_del(&lps->qe);
1647 	list_add_tail(&lps->qe, &mod->lps_free_q);
1648 }
1649 
1650 /*
1651  * send login request to firmware
1652  */
1653 static void
bfa_lps_send_login(struct bfa_lps_s * lps)1654 bfa_lps_send_login(struct bfa_lps_s *lps)
1655 {
1656 	struct bfi_lps_login_req_s	*m;
1657 
1658 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1659 	WARN_ON(!m);
1660 
1661 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1662 		bfa_lpuid(lps->bfa));
1663 
1664 	m->lp_tag	= lps->lp_tag;
1665 	m->alpa		= lps->alpa;
1666 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1667 	m->pwwn		= lps->pwwn;
1668 	m->nwwn		= lps->nwwn;
1669 	m->fdisc	= lps->fdisc;
1670 	m->auth_en	= lps->auth_en;
1671 
1672 	bfa_reqq_produce(lps->bfa, lps->reqq);
1673 }
1674 
1675 /*
1676  * send logout request to firmware
1677  */
1678 static void
bfa_lps_send_logout(struct bfa_lps_s * lps)1679 bfa_lps_send_logout(struct bfa_lps_s *lps)
1680 {
1681 	struct bfi_lps_logout_req_s *m;
1682 
1683 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1684 	WARN_ON(!m);
1685 
1686 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1687 		bfa_lpuid(lps->bfa));
1688 
1689 	m->lp_tag    = lps->lp_tag;
1690 	m->port_name = lps->pwwn;
1691 	bfa_reqq_produce(lps->bfa, lps->reqq);
1692 }
1693 
1694 /*
1695  * send n2n pid set request to firmware
1696  */
1697 static void
bfa_lps_send_set_n2n_pid(struct bfa_lps_s * lps)1698 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1699 {
1700 	struct bfi_lps_n2n_pid_req_s *m;
1701 
1702 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1703 	WARN_ON(!m);
1704 
1705 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1706 		bfa_lpuid(lps->bfa));
1707 
1708 	m->lp_tag = lps->lp_tag;
1709 	m->lp_pid = lps->lp_pid;
1710 	bfa_reqq_produce(lps->bfa, lps->reqq);
1711 }
1712 
1713 /*
1714  * Indirect login completion handler for non-fcs
1715  */
1716 static void
bfa_lps_login_comp_cb(void * arg,bfa_boolean_t complete)1717 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1718 {
1719 	struct bfa_lps_s *lps	= arg;
1720 
1721 	if (!complete)
1722 		return;
1723 
1724 	if (lps->fdisc)
1725 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1726 	else
1727 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1728 }
1729 
1730 /*
1731  * Login completion handler -- direct call for fcs, queue for others
1732  */
1733 static void
bfa_lps_login_comp(struct bfa_lps_s * lps)1734 bfa_lps_login_comp(struct bfa_lps_s *lps)
1735 {
1736 	if (!lps->bfa->fcs) {
1737 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1738 			lps);
1739 		return;
1740 	}
1741 
1742 	if (lps->fdisc)
1743 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1744 	else
1745 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1746 }
1747 
1748 /*
1749  * Indirect logout completion handler for non-fcs
1750  */
1751 static void
bfa_lps_logout_comp_cb(void * arg,bfa_boolean_t complete)1752 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1753 {
1754 	struct bfa_lps_s *lps	= arg;
1755 
1756 	if (!complete)
1757 		return;
1758 
1759 	if (lps->fdisc)
1760 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1761 }
1762 
1763 /*
1764  * Logout completion handler -- direct call for fcs, queue for others
1765  */
1766 static void
bfa_lps_logout_comp(struct bfa_lps_s * lps)1767 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1768 {
1769 	if (!lps->bfa->fcs) {
1770 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1771 			lps);
1772 		return;
1773 	}
1774 	if (lps->fdisc)
1775 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1776 }
1777 
1778 /*
1779  * Clear virtual link completion handler for non-fcs
1780  */
1781 static void
bfa_lps_cvl_event_cb(void * arg,bfa_boolean_t complete)1782 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1783 {
1784 	struct bfa_lps_s *lps	= arg;
1785 
1786 	if (!complete)
1787 		return;
1788 
1789 	/* Clear virtual link to base port will result in link down */
1790 	if (lps->fdisc)
1791 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1792 }
1793 
1794 /*
1795  * Received Clear virtual link event --direct call for fcs,
1796  * queue for others
1797  */
1798 static void
bfa_lps_cvl_event(struct bfa_lps_s * lps)1799 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1800 {
1801 	if (!lps->bfa->fcs) {
1802 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1803 			lps);
1804 		return;
1805 	}
1806 
1807 	/* Clear virtual link to base port will result in link down */
1808 	if (lps->fdisc)
1809 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1810 }
1811 
1812 
1813 
1814 /*
1815  *  lps_public BFA LPS public functions
1816  */
1817 
1818 u32
bfa_lps_get_max_vport(struct bfa_s * bfa)1819 bfa_lps_get_max_vport(struct bfa_s *bfa)
1820 {
1821 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1822 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1823 	else
1824 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1825 }
1826 
1827 /*
1828  * Allocate a lport srvice tag.
1829  */
1830 struct bfa_lps_s  *
bfa_lps_alloc(struct bfa_s * bfa)1831 bfa_lps_alloc(struct bfa_s *bfa)
1832 {
1833 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1834 	struct bfa_lps_s	*lps = NULL;
1835 
1836 	bfa_q_deq(&mod->lps_free_q, &lps);
1837 
1838 	if (lps == NULL)
1839 		return NULL;
1840 
1841 	list_add_tail(&lps->qe, &mod->lps_active_q);
1842 
1843 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1844 	return lps;
1845 }
1846 
1847 /*
1848  * Free lport service tag. This can be called anytime after an alloc.
1849  * No need to wait for any pending login/logout completions.
1850  */
1851 void
bfa_lps_delete(struct bfa_lps_s * lps)1852 bfa_lps_delete(struct bfa_lps_s *lps)
1853 {
1854 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1855 }
1856 
1857 /*
1858  * Initiate a lport login.
1859  */
1860 void
bfa_lps_flogi(struct bfa_lps_s * lps,void * uarg,u8 alpa,u16 pdusz,wwn_t pwwn,wwn_t nwwn,bfa_boolean_t auth_en)1861 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1862 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1863 {
1864 	lps->uarg	= uarg;
1865 	lps->alpa	= alpa;
1866 	lps->pdusz	= pdusz;
1867 	lps->pwwn	= pwwn;
1868 	lps->nwwn	= nwwn;
1869 	lps->fdisc	= BFA_FALSE;
1870 	lps->auth_en	= auth_en;
1871 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1872 }
1873 
1874 /*
1875  * Initiate a lport fdisc login.
1876  */
1877 void
bfa_lps_fdisc(struct bfa_lps_s * lps,void * uarg,u16 pdusz,wwn_t pwwn,wwn_t nwwn)1878 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1879 	wwn_t nwwn)
1880 {
1881 	lps->uarg	= uarg;
1882 	lps->alpa	= 0;
1883 	lps->pdusz	= pdusz;
1884 	lps->pwwn	= pwwn;
1885 	lps->nwwn	= nwwn;
1886 	lps->fdisc	= BFA_TRUE;
1887 	lps->auth_en	= BFA_FALSE;
1888 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1889 }
1890 
1891 
1892 /*
1893  * Initiate a lport FDSIC logout.
1894  */
1895 void
bfa_lps_fdisclogo(struct bfa_lps_s * lps)1896 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1897 {
1898 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1899 }
1900 
1901 
1902 /*
1903  * Return lport services tag given the pid
1904  */
1905 u8
bfa_lps_get_tag_from_pid(struct bfa_s * bfa,u32 pid)1906 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1907 {
1908 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1909 	struct bfa_lps_s	*lps;
1910 	int			i;
1911 
1912 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1913 		if (lps->lp_pid == pid)
1914 			return lps->lp_tag;
1915 	}
1916 
1917 	/* Return base port tag anyway */
1918 	return 0;
1919 }
1920 
1921 
1922 /*
1923  * return port id assigned to the base lport
1924  */
1925 u32
bfa_lps_get_base_pid(struct bfa_s * bfa)1926 bfa_lps_get_base_pid(struct bfa_s *bfa)
1927 {
1928 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1929 
1930 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1931 }
1932 
1933 /*
1934  * Set PID in case of n2n (which is assigned during PLOGI)
1935  */
1936 void
bfa_lps_set_n2n_pid(struct bfa_lps_s * lps,uint32_t n2n_pid)1937 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1938 {
1939 	bfa_trc(lps->bfa, lps->lp_tag);
1940 	bfa_trc(lps->bfa, n2n_pid);
1941 
1942 	lps->lp_pid = n2n_pid;
1943 	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1944 }
1945 
1946 /*
1947  * LPS firmware message class handler.
1948  */
1949 void
bfa_lps_isr(struct bfa_s * bfa,struct bfi_msg_s * m)1950 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1951 {
1952 	union bfi_lps_i2h_msg_u	msg;
1953 
1954 	bfa_trc(bfa, m->mhdr.msg_id);
1955 	msg.msg = m;
1956 
1957 	switch (m->mhdr.msg_id) {
1958 	case BFI_LPS_H2I_LOGIN_RSP:
1959 		bfa_lps_login_rsp(bfa, msg.login_rsp);
1960 		break;
1961 
1962 	case BFI_LPS_H2I_LOGOUT_RSP:
1963 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1964 		break;
1965 
1966 	case BFI_LPS_H2I_CVL_EVENT:
1967 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1968 		break;
1969 
1970 	default:
1971 		bfa_trc(bfa, m->mhdr.msg_id);
1972 		WARN_ON(1);
1973 	}
1974 }
1975 
1976 /*
1977  * FC PORT state machine functions
1978  */
1979 static void
bfa_fcport_sm_uninit(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)1980 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1981 			enum bfa_fcport_sm_event event)
1982 {
1983 	bfa_trc(fcport->bfa, event);
1984 
1985 	switch (event) {
1986 	case BFA_FCPORT_SM_START:
1987 		/*
1988 		 * Start event after IOC is configured and BFA is started.
1989 		 */
1990 		fcport->use_flash_cfg = BFA_TRUE;
1991 
1992 		if (bfa_fcport_send_enable(fcport)) {
1993 			bfa_trc(fcport->bfa, BFA_TRUE);
1994 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1995 		} else {
1996 			bfa_trc(fcport->bfa, BFA_FALSE);
1997 			bfa_sm_set_state(fcport,
1998 					bfa_fcport_sm_enabling_qwait);
1999 		}
2000 		break;
2001 
2002 	case BFA_FCPORT_SM_ENABLE:
2003 		/*
2004 		 * Port is persistently configured to be in enabled state. Do
2005 		 * not change state. Port enabling is done when START event is
2006 		 * received.
2007 		 */
2008 		break;
2009 
2010 	case BFA_FCPORT_SM_DISABLE:
2011 		/*
2012 		 * If a port is persistently configured to be disabled, the
2013 		 * first event will a port disable request.
2014 		 */
2015 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2016 		break;
2017 
2018 	case BFA_FCPORT_SM_HWFAIL:
2019 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2020 		break;
2021 
2022 	default:
2023 		bfa_sm_fault(fcport->bfa, event);
2024 	}
2025 }
2026 
2027 static void
bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2028 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2029 				enum bfa_fcport_sm_event event)
2030 {
2031 	char pwwn_buf[BFA_STRING_32];
2032 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2033 	bfa_trc(fcport->bfa, event);
2034 
2035 	switch (event) {
2036 	case BFA_FCPORT_SM_QRESUME:
2037 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2038 		bfa_fcport_send_enable(fcport);
2039 		break;
2040 
2041 	case BFA_FCPORT_SM_STOP:
2042 		bfa_reqq_wcancel(&fcport->reqq_wait);
2043 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2044 		break;
2045 
2046 	case BFA_FCPORT_SM_ENABLE:
2047 		/*
2048 		 * Already enable is in progress.
2049 		 */
2050 		break;
2051 
2052 	case BFA_FCPORT_SM_DISABLE:
2053 		/*
2054 		 * Just send disable request to firmware when room becomes
2055 		 * available in request queue.
2056 		 */
2057 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2058 		bfa_reqq_wcancel(&fcport->reqq_wait);
2059 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2060 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2061 		wwn2str(pwwn_buf, fcport->pwwn);
2062 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2063 			"Base port disabled: WWN = %s\n", pwwn_buf);
2064 		break;
2065 
2066 	case BFA_FCPORT_SM_LINKUP:
2067 	case BFA_FCPORT_SM_LINKDOWN:
2068 		/*
2069 		 * Possible to get link events when doing back-to-back
2070 		 * enable/disables.
2071 		 */
2072 		break;
2073 
2074 	case BFA_FCPORT_SM_HWFAIL:
2075 		bfa_reqq_wcancel(&fcport->reqq_wait);
2076 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2077 		break;
2078 
2079 	default:
2080 		bfa_sm_fault(fcport->bfa, event);
2081 	}
2082 }
2083 
2084 static void
bfa_fcport_sm_enabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2085 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2086 						enum bfa_fcport_sm_event event)
2087 {
2088 	char pwwn_buf[BFA_STRING_32];
2089 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2090 	bfa_trc(fcport->bfa, event);
2091 
2092 	switch (event) {
2093 	case BFA_FCPORT_SM_FWRSP:
2094 	case BFA_FCPORT_SM_LINKDOWN:
2095 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2096 		break;
2097 
2098 	case BFA_FCPORT_SM_LINKUP:
2099 		bfa_fcport_update_linkinfo(fcport);
2100 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2101 
2102 		WARN_ON(!fcport->event_cbfn);
2103 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2104 		break;
2105 
2106 	case BFA_FCPORT_SM_ENABLE:
2107 		/*
2108 		 * Already being enabled.
2109 		 */
2110 		break;
2111 
2112 	case BFA_FCPORT_SM_DISABLE:
2113 		if (bfa_fcport_send_disable(fcport))
2114 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2115 		else
2116 			bfa_sm_set_state(fcport,
2117 					 bfa_fcport_sm_disabling_qwait);
2118 
2119 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2120 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2121 		wwn2str(pwwn_buf, fcport->pwwn);
2122 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2123 			"Base port disabled: WWN = %s\n", pwwn_buf);
2124 		break;
2125 
2126 	case BFA_FCPORT_SM_STOP:
2127 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2128 		break;
2129 
2130 	case BFA_FCPORT_SM_HWFAIL:
2131 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2132 		break;
2133 
2134 	default:
2135 		bfa_sm_fault(fcport->bfa, event);
2136 	}
2137 }
2138 
2139 static void
bfa_fcport_sm_linkdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2140 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2141 						enum bfa_fcport_sm_event event)
2142 {
2143 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2144 	char pwwn_buf[BFA_STRING_32];
2145 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2146 
2147 	bfa_trc(fcport->bfa, event);
2148 
2149 	switch (event) {
2150 	case BFA_FCPORT_SM_LINKUP:
2151 		bfa_fcport_update_linkinfo(fcport);
2152 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2153 		WARN_ON(!fcport->event_cbfn);
2154 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2155 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2156 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2157 
2158 			bfa_trc(fcport->bfa,
2159 				pevent->link_state.vc_fcf.fcf.fipenabled);
2160 			bfa_trc(fcport->bfa,
2161 				pevent->link_state.vc_fcf.fcf.fipfailed);
2162 
2163 			if (pevent->link_state.vc_fcf.fcf.fipfailed)
2164 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2165 					BFA_PL_EID_FIP_FCF_DISC, 0,
2166 					"FIP FCF Discovery Failed");
2167 			else
2168 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2169 					BFA_PL_EID_FIP_FCF_DISC, 0,
2170 					"FIP FCF Discovered");
2171 		}
2172 
2173 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2174 		wwn2str(pwwn_buf, fcport->pwwn);
2175 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2176 			"Base port online: WWN = %s\n", pwwn_buf);
2177 		break;
2178 
2179 	case BFA_FCPORT_SM_LINKDOWN:
2180 		/*
2181 		 * Possible to get link down event.
2182 		 */
2183 		break;
2184 
2185 	case BFA_FCPORT_SM_ENABLE:
2186 		/*
2187 		 * Already enabled.
2188 		 */
2189 		break;
2190 
2191 	case BFA_FCPORT_SM_DISABLE:
2192 		if (bfa_fcport_send_disable(fcport))
2193 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2194 		else
2195 			bfa_sm_set_state(fcport,
2196 					 bfa_fcport_sm_disabling_qwait);
2197 
2198 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2200 		wwn2str(pwwn_buf, fcport->pwwn);
2201 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2202 			"Base port disabled: WWN = %s\n", pwwn_buf);
2203 		break;
2204 
2205 	case BFA_FCPORT_SM_STOP:
2206 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2207 		break;
2208 
2209 	case BFA_FCPORT_SM_HWFAIL:
2210 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2211 		break;
2212 
2213 	default:
2214 		bfa_sm_fault(fcport->bfa, event);
2215 	}
2216 }
2217 
2218 static void
bfa_fcport_sm_linkup(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2219 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2220 	enum bfa_fcport_sm_event event)
2221 {
2222 	char pwwn_buf[BFA_STRING_32];
2223 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2224 
2225 	bfa_trc(fcport->bfa, event);
2226 
2227 	switch (event) {
2228 	case BFA_FCPORT_SM_ENABLE:
2229 		/*
2230 		 * Already enabled.
2231 		 */
2232 		break;
2233 
2234 	case BFA_FCPORT_SM_DISABLE:
2235 		if (bfa_fcport_send_disable(fcport))
2236 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2237 		else
2238 			bfa_sm_set_state(fcport,
2239 					 bfa_fcport_sm_disabling_qwait);
2240 
2241 		bfa_fcport_reset_linkinfo(fcport);
2242 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2243 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2244 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2245 		wwn2str(pwwn_buf, fcport->pwwn);
2246 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2247 			"Base port offline: WWN = %s\n", pwwn_buf);
2248 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2249 			"Base port disabled: WWN = %s\n", pwwn_buf);
2250 		break;
2251 
2252 	case BFA_FCPORT_SM_LINKDOWN:
2253 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2254 		bfa_fcport_reset_linkinfo(fcport);
2255 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2256 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2257 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2258 		wwn2str(pwwn_buf, fcport->pwwn);
2259 		if (BFA_PORT_IS_DISABLED(fcport->bfa))
2260 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2261 				"Base port offline: WWN = %s\n", pwwn_buf);
2262 		else
2263 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2264 				"Base port (WWN = %s) "
2265 				"lost fabric connectivity\n", pwwn_buf);
2266 		break;
2267 
2268 	case BFA_FCPORT_SM_STOP:
2269 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2270 		bfa_fcport_reset_linkinfo(fcport);
2271 		wwn2str(pwwn_buf, fcport->pwwn);
2272 		if (BFA_PORT_IS_DISABLED(fcport->bfa))
2273 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2274 				"Base port offline: WWN = %s\n", pwwn_buf);
2275 		else
2276 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2277 				"Base port (WWN = %s) "
2278 				"lost fabric connectivity\n", pwwn_buf);
2279 		break;
2280 
2281 	case BFA_FCPORT_SM_HWFAIL:
2282 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2283 		bfa_fcport_reset_linkinfo(fcport);
2284 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2285 		wwn2str(pwwn_buf, fcport->pwwn);
2286 		if (BFA_PORT_IS_DISABLED(fcport->bfa))
2287 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2288 				"Base port offline: WWN = %s\n", pwwn_buf);
2289 		else
2290 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2291 				"Base port (WWN = %s) "
2292 				"lost fabric connectivity\n", pwwn_buf);
2293 		break;
2294 
2295 	default:
2296 		bfa_sm_fault(fcport->bfa, event);
2297 	}
2298 }
2299 
2300 static void
bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2301 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2302 				 enum bfa_fcport_sm_event event)
2303 {
2304 	bfa_trc(fcport->bfa, event);
2305 
2306 	switch (event) {
2307 	case BFA_FCPORT_SM_QRESUME:
2308 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2309 		bfa_fcport_send_disable(fcport);
2310 		break;
2311 
2312 	case BFA_FCPORT_SM_STOP:
2313 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2314 		bfa_reqq_wcancel(&fcport->reqq_wait);
2315 		break;
2316 
2317 	case BFA_FCPORT_SM_ENABLE:
2318 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2319 		break;
2320 
2321 	case BFA_FCPORT_SM_DISABLE:
2322 		/*
2323 		 * Already being disabled.
2324 		 */
2325 		break;
2326 
2327 	case BFA_FCPORT_SM_LINKUP:
2328 	case BFA_FCPORT_SM_LINKDOWN:
2329 		/*
2330 		 * Possible to get link events when doing back-to-back
2331 		 * enable/disables.
2332 		 */
2333 		break;
2334 
2335 	case BFA_FCPORT_SM_HWFAIL:
2336 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2337 		bfa_reqq_wcancel(&fcport->reqq_wait);
2338 		break;
2339 
2340 	default:
2341 		bfa_sm_fault(fcport->bfa, event);
2342 	}
2343 }
2344 
2345 static void
bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2346 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2347 				 enum bfa_fcport_sm_event event)
2348 {
2349 	bfa_trc(fcport->bfa, event);
2350 
2351 	switch (event) {
2352 	case BFA_FCPORT_SM_QRESUME:
2353 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2354 		bfa_fcport_send_disable(fcport);
2355 		if (bfa_fcport_send_enable(fcport))
2356 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2357 		else
2358 			bfa_sm_set_state(fcport,
2359 					 bfa_fcport_sm_enabling_qwait);
2360 		break;
2361 
2362 	case BFA_FCPORT_SM_STOP:
2363 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2364 		bfa_reqq_wcancel(&fcport->reqq_wait);
2365 		break;
2366 
2367 	case BFA_FCPORT_SM_ENABLE:
2368 		break;
2369 
2370 	case BFA_FCPORT_SM_DISABLE:
2371 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2372 		break;
2373 
2374 	case BFA_FCPORT_SM_LINKUP:
2375 	case BFA_FCPORT_SM_LINKDOWN:
2376 		/*
2377 		 * Possible to get link events when doing back-to-back
2378 		 * enable/disables.
2379 		 */
2380 		break;
2381 
2382 	case BFA_FCPORT_SM_HWFAIL:
2383 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2384 		bfa_reqq_wcancel(&fcport->reqq_wait);
2385 		break;
2386 
2387 	default:
2388 		bfa_sm_fault(fcport->bfa, event);
2389 	}
2390 }
2391 
2392 static void
bfa_fcport_sm_disabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2393 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2394 						enum bfa_fcport_sm_event event)
2395 {
2396 	char pwwn_buf[BFA_STRING_32];
2397 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2398 	bfa_trc(fcport->bfa, event);
2399 
2400 	switch (event) {
2401 	case BFA_FCPORT_SM_FWRSP:
2402 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2403 		break;
2404 
2405 	case BFA_FCPORT_SM_DISABLE:
2406 		/*
2407 		 * Already being disabled.
2408 		 */
2409 		break;
2410 
2411 	case BFA_FCPORT_SM_ENABLE:
2412 		if (bfa_fcport_send_enable(fcport))
2413 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2414 		else
2415 			bfa_sm_set_state(fcport,
2416 					 bfa_fcport_sm_enabling_qwait);
2417 
2418 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2419 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2420 		wwn2str(pwwn_buf, fcport->pwwn);
2421 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2422 			"Base port enabled: WWN = %s\n", pwwn_buf);
2423 		break;
2424 
2425 	case BFA_FCPORT_SM_STOP:
2426 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2427 		break;
2428 
2429 	case BFA_FCPORT_SM_LINKUP:
2430 	case BFA_FCPORT_SM_LINKDOWN:
2431 		/*
2432 		 * Possible to get link events when doing back-to-back
2433 		 * enable/disables.
2434 		 */
2435 		break;
2436 
2437 	case BFA_FCPORT_SM_HWFAIL:
2438 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2439 		break;
2440 
2441 	default:
2442 		bfa_sm_fault(fcport->bfa, event);
2443 	}
2444 }
2445 
2446 static void
bfa_fcport_sm_disabled(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2447 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2448 						enum bfa_fcport_sm_event event)
2449 {
2450 	char pwwn_buf[BFA_STRING_32];
2451 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2452 	bfa_trc(fcport->bfa, event);
2453 
2454 	switch (event) {
2455 	case BFA_FCPORT_SM_START:
2456 		/*
2457 		 * Ignore start event for a port that is disabled.
2458 		 */
2459 		break;
2460 
2461 	case BFA_FCPORT_SM_STOP:
2462 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2463 		break;
2464 
2465 	case BFA_FCPORT_SM_ENABLE:
2466 		if (bfa_fcport_send_enable(fcport))
2467 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2468 		else
2469 			bfa_sm_set_state(fcport,
2470 					 bfa_fcport_sm_enabling_qwait);
2471 
2472 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2473 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2474 		wwn2str(pwwn_buf, fcport->pwwn);
2475 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2476 			"Base port enabled: WWN = %s\n", pwwn_buf);
2477 		break;
2478 
2479 	case BFA_FCPORT_SM_DISABLE:
2480 		/*
2481 		 * Already disabled.
2482 		 */
2483 		break;
2484 
2485 	case BFA_FCPORT_SM_HWFAIL:
2486 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2487 		break;
2488 
2489 	default:
2490 		bfa_sm_fault(fcport->bfa, event);
2491 	}
2492 }
2493 
2494 static void
bfa_fcport_sm_stopped(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2495 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2496 			 enum bfa_fcport_sm_event event)
2497 {
2498 	bfa_trc(fcport->bfa, event);
2499 
2500 	switch (event) {
2501 	case BFA_FCPORT_SM_START:
2502 		if (bfa_fcport_send_enable(fcport))
2503 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2504 		else
2505 			bfa_sm_set_state(fcport,
2506 					 bfa_fcport_sm_enabling_qwait);
2507 		break;
2508 
2509 	default:
2510 		/*
2511 		 * Ignore all other events.
2512 		 */
2513 		;
2514 	}
2515 }
2516 
2517 /*
2518  * Port is enabled. IOC is down/failed.
2519  */
2520 static void
bfa_fcport_sm_iocdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2521 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2522 			 enum bfa_fcport_sm_event event)
2523 {
2524 	bfa_trc(fcport->bfa, event);
2525 
2526 	switch (event) {
2527 	case BFA_FCPORT_SM_START:
2528 		if (bfa_fcport_send_enable(fcport))
2529 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2530 		else
2531 			bfa_sm_set_state(fcport,
2532 					 bfa_fcport_sm_enabling_qwait);
2533 		break;
2534 
2535 	default:
2536 		/*
2537 		 * Ignore all events.
2538 		 */
2539 		;
2540 	}
2541 }
2542 
2543 /*
2544  * Port is disabled. IOC is down/failed.
2545  */
2546 static void
bfa_fcport_sm_iocfail(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2547 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2548 			 enum bfa_fcport_sm_event event)
2549 {
2550 	bfa_trc(fcport->bfa, event);
2551 
2552 	switch (event) {
2553 	case BFA_FCPORT_SM_START:
2554 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2555 		break;
2556 
2557 	case BFA_FCPORT_SM_ENABLE:
2558 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2559 		break;
2560 
2561 	default:
2562 		/*
2563 		 * Ignore all events.
2564 		 */
2565 		;
2566 	}
2567 }
2568 
2569 /*
2570  * Link state is down
2571  */
2572 static void
bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2573 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2574 		enum bfa_fcport_ln_sm_event event)
2575 {
2576 	bfa_trc(ln->fcport->bfa, event);
2577 
2578 	switch (event) {
2579 	case BFA_FCPORT_LN_SM_LINKUP:
2580 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2581 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2582 		break;
2583 
2584 	default:
2585 		bfa_sm_fault(ln->fcport->bfa, event);
2586 	}
2587 }
2588 
2589 /*
2590  * Link state is waiting for down notification
2591  */
2592 static void
bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2593 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2594 		enum bfa_fcport_ln_sm_event event)
2595 {
2596 	bfa_trc(ln->fcport->bfa, event);
2597 
2598 	switch (event) {
2599 	case BFA_FCPORT_LN_SM_LINKUP:
2600 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2601 		break;
2602 
2603 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2604 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2605 		break;
2606 
2607 	default:
2608 		bfa_sm_fault(ln->fcport->bfa, event);
2609 	}
2610 }
2611 
2612 /*
2613  * Link state is waiting for down notification and there is a pending up
2614  */
2615 static void
bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2616 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2617 		enum bfa_fcport_ln_sm_event event)
2618 {
2619 	bfa_trc(ln->fcport->bfa, event);
2620 
2621 	switch (event) {
2622 	case BFA_FCPORT_LN_SM_LINKDOWN:
2623 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2624 		break;
2625 
2626 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2627 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2628 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2629 		break;
2630 
2631 	default:
2632 		bfa_sm_fault(ln->fcport->bfa, event);
2633 	}
2634 }
2635 
2636 /*
2637  * Link state is up
2638  */
2639 static void
bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2640 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2641 		enum bfa_fcport_ln_sm_event event)
2642 {
2643 	bfa_trc(ln->fcport->bfa, event);
2644 
2645 	switch (event) {
2646 	case BFA_FCPORT_LN_SM_LINKDOWN:
2647 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2648 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2649 		break;
2650 
2651 	default:
2652 		bfa_sm_fault(ln->fcport->bfa, event);
2653 	}
2654 }
2655 
2656 /*
2657  * Link state is waiting for up notification
2658  */
2659 static void
bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2660 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2661 		enum bfa_fcport_ln_sm_event event)
2662 {
2663 	bfa_trc(ln->fcport->bfa, event);
2664 
2665 	switch (event) {
2666 	case BFA_FCPORT_LN_SM_LINKDOWN:
2667 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2668 		break;
2669 
2670 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2671 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2672 		break;
2673 
2674 	default:
2675 		bfa_sm_fault(ln->fcport->bfa, event);
2676 	}
2677 }
2678 
2679 /*
2680  * Link state is waiting for up notification and there is a pending down
2681  */
2682 static void
bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2683 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2684 		enum bfa_fcport_ln_sm_event event)
2685 {
2686 	bfa_trc(ln->fcport->bfa, event);
2687 
2688 	switch (event) {
2689 	case BFA_FCPORT_LN_SM_LINKUP:
2690 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2691 		break;
2692 
2693 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2694 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2695 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2696 		break;
2697 
2698 	default:
2699 		bfa_sm_fault(ln->fcport->bfa, event);
2700 	}
2701 }
2702 
2703 /*
2704  * Link state is waiting for up notification and there are pending down and up
2705  */
2706 static void
bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2707 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2708 			enum bfa_fcport_ln_sm_event event)
2709 {
2710 	bfa_trc(ln->fcport->bfa, event);
2711 
2712 	switch (event) {
2713 	case BFA_FCPORT_LN_SM_LINKDOWN:
2714 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2715 		break;
2716 
2717 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2718 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2719 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2720 		break;
2721 
2722 	default:
2723 		bfa_sm_fault(ln->fcport->bfa, event);
2724 	}
2725 }
2726 
2727 static void
__bfa_cb_fcport_event(void * cbarg,bfa_boolean_t complete)2728 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2729 {
2730 	struct bfa_fcport_ln_s *ln = cbarg;
2731 
2732 	if (complete)
2733 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2734 	else
2735 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2736 }
2737 
2738 /*
2739  * Send SCN notification to upper layers.
2740  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2741  */
2742 static void
bfa_fcport_scn(struct bfa_fcport_s * fcport,enum bfa_port_linkstate event,bfa_boolean_t trunk)2743 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2744 	bfa_boolean_t trunk)
2745 {
2746 	if (fcport->cfg.trunked && !trunk)
2747 		return;
2748 
2749 	switch (event) {
2750 	case BFA_PORT_LINKUP:
2751 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2752 		break;
2753 	case BFA_PORT_LINKDOWN:
2754 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2755 		break;
2756 	default:
2757 		WARN_ON(1);
2758 	}
2759 }
2760 
2761 static void
bfa_fcport_queue_cb(struct bfa_fcport_ln_s * ln,enum bfa_port_linkstate event)2762 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2763 {
2764 	struct bfa_fcport_s *fcport = ln->fcport;
2765 
2766 	if (fcport->bfa->fcs) {
2767 		fcport->event_cbfn(fcport->event_cbarg, event);
2768 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2769 	} else {
2770 		ln->ln_event = event;
2771 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2772 			__bfa_cb_fcport_event, ln);
2773 	}
2774 }
2775 
2776 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2777 							BFA_CACHELINE_SZ))
2778 
2779 static void
bfa_fcport_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * ndm_len,u32 * dm_len)2780 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2781 		u32 *dm_len)
2782 {
2783 	*dm_len += FCPORT_STATS_DMA_SZ;
2784 }
2785 
2786 static void
bfa_fcport_qresume(void * cbarg)2787 bfa_fcport_qresume(void *cbarg)
2788 {
2789 	struct bfa_fcport_s *fcport = cbarg;
2790 
2791 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2792 }
2793 
2794 static void
bfa_fcport_mem_claim(struct bfa_fcport_s * fcport,struct bfa_meminfo_s * meminfo)2795 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2796 {
2797 	u8		*dm_kva;
2798 	u64	dm_pa;
2799 
2800 	dm_kva = bfa_meminfo_dma_virt(meminfo);
2801 	dm_pa  = bfa_meminfo_dma_phys(meminfo);
2802 
2803 	fcport->stats_kva = dm_kva;
2804 	fcport->stats_pa  = dm_pa;
2805 	fcport->stats	  = (union bfa_fcport_stats_u *) dm_kva;
2806 
2807 	dm_kva += FCPORT_STATS_DMA_SZ;
2808 	dm_pa  += FCPORT_STATS_DMA_SZ;
2809 
2810 	bfa_meminfo_dma_virt(meminfo) = dm_kva;
2811 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
2812 }
2813 
2814 /*
2815  * Memory initialization.
2816  */
2817 static void
bfa_fcport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)2818 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2819 		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2820 {
2821 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2822 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2823 	struct bfa_fcport_ln_s *ln = &fcport->ln;
2824 	struct timeval tv;
2825 
2826 	memset(fcport, 0, sizeof(struct bfa_fcport_s));
2827 	fcport->bfa = bfa;
2828 	ln->fcport = fcport;
2829 
2830 	bfa_fcport_mem_claim(fcport, meminfo);
2831 
2832 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2833 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2834 
2835 	/*
2836 	 * initialize time stamp for stats reset
2837 	 */
2838 	do_gettimeofday(&tv);
2839 	fcport->stats_reset_time = tv.tv_sec;
2840 
2841 	/*
2842 	 * initialize and set default configuration
2843 	 */
2844 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2845 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
2846 	port_cfg->trunked = BFA_FALSE;
2847 	port_cfg->maxfrsize = 0;
2848 
2849 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2850 
2851 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2852 }
2853 
2854 static void
bfa_fcport_detach(struct bfa_s * bfa)2855 bfa_fcport_detach(struct bfa_s *bfa)
2856 {
2857 }
2858 
2859 /*
2860  * Called when IOC is ready.
2861  */
2862 static void
bfa_fcport_start(struct bfa_s * bfa)2863 bfa_fcport_start(struct bfa_s *bfa)
2864 {
2865 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2866 }
2867 
2868 /*
2869  * Called before IOC is stopped.
2870  */
2871 static void
bfa_fcport_stop(struct bfa_s * bfa)2872 bfa_fcport_stop(struct bfa_s *bfa)
2873 {
2874 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2875 	bfa_trunk_iocdisable(bfa);
2876 }
2877 
2878 /*
2879  * Called when IOC failure is detected.
2880  */
2881 static void
bfa_fcport_iocdisable(struct bfa_s * bfa)2882 bfa_fcport_iocdisable(struct bfa_s *bfa)
2883 {
2884 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2885 
2886 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2887 	bfa_trunk_iocdisable(bfa);
2888 }
2889 
2890 static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s * fcport)2891 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2892 {
2893 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2894 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2895 
2896 	fcport->speed = pevent->link_state.speed;
2897 	fcport->topology = pevent->link_state.topology;
2898 
2899 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2900 		fcport->myalpa = 0;
2901 
2902 	/* QoS Details */
2903 	fcport->qos_attr = pevent->link_state.qos_attr;
2904 	fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2905 
2906 	/*
2907 	 * update trunk state if applicable
2908 	 */
2909 	if (!fcport->cfg.trunked)
2910 		trunk->attr.state = BFA_TRUNK_DISABLED;
2911 
2912 	/* update FCoE specific */
2913 	fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2914 
2915 	bfa_trc(fcport->bfa, fcport->speed);
2916 	bfa_trc(fcport->bfa, fcport->topology);
2917 }
2918 
2919 static void
bfa_fcport_reset_linkinfo(struct bfa_fcport_s * fcport)2920 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2921 {
2922 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2923 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2924 }
2925 
2926 /*
2927  * Send port enable message to firmware.
2928  */
2929 static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s * fcport)2930 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2931 {
2932 	struct bfi_fcport_enable_req_s *m;
2933 
2934 	/*
2935 	 * Increment message tag before queue check, so that responses to old
2936 	 * requests are discarded.
2937 	 */
2938 	fcport->msgtag++;
2939 
2940 	/*
2941 	 * check for room in queue to send request now
2942 	 */
2943 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2944 	if (!m) {
2945 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2946 							&fcport->reqq_wait);
2947 		return BFA_FALSE;
2948 	}
2949 
2950 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2951 			bfa_lpuid(fcport->bfa));
2952 	m->nwwn = fcport->nwwn;
2953 	m->pwwn = fcport->pwwn;
2954 	m->port_cfg = fcport->cfg;
2955 	m->msgtag = fcport->msgtag;
2956 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
2957 	 m->use_flash_cfg = fcport->use_flash_cfg;
2958 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2959 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2960 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2961 
2962 	/*
2963 	 * queue I/O message to firmware
2964 	 */
2965 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2966 	return BFA_TRUE;
2967 }
2968 
2969 /*
2970  * Send port disable message to firmware.
2971  */
2972 static	bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s * fcport)2973 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2974 {
2975 	struct bfi_fcport_req_s *m;
2976 
2977 	/*
2978 	 * Increment message tag before queue check, so that responses to old
2979 	 * requests are discarded.
2980 	 */
2981 	fcport->msgtag++;
2982 
2983 	/*
2984 	 * check for room in queue to send request now
2985 	 */
2986 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2987 	if (!m) {
2988 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2989 							&fcport->reqq_wait);
2990 		return BFA_FALSE;
2991 	}
2992 
2993 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2994 			bfa_lpuid(fcport->bfa));
2995 	m->msgtag = fcport->msgtag;
2996 
2997 	/*
2998 	 * queue I/O message to firmware
2999 	 */
3000 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3001 
3002 	return BFA_TRUE;
3003 }
3004 
3005 static void
bfa_fcport_set_wwns(struct bfa_fcport_s * fcport)3006 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3007 {
3008 	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3009 	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3010 
3011 	bfa_trc(fcport->bfa, fcport->pwwn);
3012 	bfa_trc(fcport->bfa, fcport->nwwn);
3013 }
3014 
3015 static void
bfa_fcport_send_txcredit(void * port_cbarg)3016 bfa_fcport_send_txcredit(void *port_cbarg)
3017 {
3018 
3019 	struct bfa_fcport_s *fcport = port_cbarg;
3020 	struct bfi_fcport_set_svc_params_req_s *m;
3021 
3022 	/*
3023 	 * check for room in queue to send request now
3024 	 */
3025 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3026 	if (!m) {
3027 		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3028 		return;
3029 	}
3030 
3031 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3032 			bfa_lpuid(fcport->bfa));
3033 	m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3034 
3035 	/*
3036 	 * queue I/O message to firmware
3037 	 */
3038 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3039 }
3040 
3041 static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s * d,struct bfa_qos_stats_s * s)3042 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3043 	struct bfa_qos_stats_s *s)
3044 {
3045 	u32	*dip = (u32 *) d;
3046 	__be32	*sip = (__be32 *) s;
3047 	int		i;
3048 
3049 	/* Now swap the 32 bit fields */
3050 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3051 		dip[i] = be32_to_cpu(sip[i]);
3052 }
3053 
3054 static void
bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s * d,struct bfa_fcoe_stats_s * s)3055 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3056 	struct bfa_fcoe_stats_s *s)
3057 {
3058 	u32	*dip = (u32 *) d;
3059 	__be32	*sip = (__be32 *) s;
3060 	int		i;
3061 
3062 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3063 	     i = i + 2) {
3064 #ifdef __BIG_ENDIAN
3065 		dip[i] = be32_to_cpu(sip[i]);
3066 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3067 #else
3068 		dip[i] = be32_to_cpu(sip[i + 1]);
3069 		dip[i + 1] = be32_to_cpu(sip[i]);
3070 #endif
3071 	}
3072 }
3073 
3074 static void
__bfa_cb_fcport_stats_get(void * cbarg,bfa_boolean_t complete)3075 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3076 {
3077 	struct bfa_fcport_s *fcport = cbarg;
3078 
3079 	if (complete) {
3080 		if (fcport->stats_status == BFA_STATUS_OK) {
3081 			struct timeval tv;
3082 
3083 			/* Swap FC QoS or FCoE stats */
3084 			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3085 				bfa_fcport_qos_stats_swap(
3086 					&fcport->stats_ret->fcqos,
3087 					&fcport->stats->fcqos);
3088 			} else {
3089 				bfa_fcport_fcoe_stats_swap(
3090 					&fcport->stats_ret->fcoe,
3091 					&fcport->stats->fcoe);
3092 
3093 				do_gettimeofday(&tv);
3094 				fcport->stats_ret->fcoe.secs_reset =
3095 					tv.tv_sec - fcport->stats_reset_time;
3096 			}
3097 		}
3098 		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3099 	} else {
3100 		fcport->stats_busy = BFA_FALSE;
3101 		fcport->stats_status = BFA_STATUS_OK;
3102 	}
3103 }
3104 
3105 static void
bfa_fcport_stats_get_timeout(void * cbarg)3106 bfa_fcport_stats_get_timeout(void *cbarg)
3107 {
3108 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3109 
3110 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3111 
3112 	if (fcport->stats_qfull) {
3113 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3114 		fcport->stats_qfull = BFA_FALSE;
3115 	}
3116 
3117 	fcport->stats_status = BFA_STATUS_ETIMER;
3118 	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3119 		fcport);
3120 }
3121 
3122 static void
bfa_fcport_send_stats_get(void * cbarg)3123 bfa_fcport_send_stats_get(void *cbarg)
3124 {
3125 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3126 	struct bfi_fcport_req_s *msg;
3127 
3128 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3129 
3130 	if (!msg) {
3131 		fcport->stats_qfull = BFA_TRUE;
3132 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3133 				bfa_fcport_send_stats_get, fcport);
3134 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3135 				&fcport->stats_reqq_wait);
3136 		return;
3137 	}
3138 	fcport->stats_qfull = BFA_FALSE;
3139 
3140 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3141 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3142 			bfa_lpuid(fcport->bfa));
3143 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3144 }
3145 
3146 static void
__bfa_cb_fcport_stats_clr(void * cbarg,bfa_boolean_t complete)3147 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3148 {
3149 	struct bfa_fcport_s *fcport = cbarg;
3150 
3151 	if (complete) {
3152 		struct timeval tv;
3153 
3154 		/*
3155 		 * re-initialize time stamp for stats reset
3156 		 */
3157 		do_gettimeofday(&tv);
3158 		fcport->stats_reset_time = tv.tv_sec;
3159 
3160 		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3161 	} else {
3162 		fcport->stats_busy = BFA_FALSE;
3163 		fcport->stats_status = BFA_STATUS_OK;
3164 	}
3165 }
3166 
3167 static void
bfa_fcport_stats_clr_timeout(void * cbarg)3168 bfa_fcport_stats_clr_timeout(void *cbarg)
3169 {
3170 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3171 
3172 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3173 
3174 	if (fcport->stats_qfull) {
3175 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3176 		fcport->stats_qfull = BFA_FALSE;
3177 	}
3178 
3179 	fcport->stats_status = BFA_STATUS_ETIMER;
3180 	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3181 			__bfa_cb_fcport_stats_clr, fcport);
3182 }
3183 
3184 static void
bfa_fcport_send_stats_clear(void * cbarg)3185 bfa_fcport_send_stats_clear(void *cbarg)
3186 {
3187 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3188 	struct bfi_fcport_req_s *msg;
3189 
3190 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3191 
3192 	if (!msg) {
3193 		fcport->stats_qfull = BFA_TRUE;
3194 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3195 				bfa_fcport_send_stats_clear, fcport);
3196 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3197 						&fcport->stats_reqq_wait);
3198 		return;
3199 	}
3200 	fcport->stats_qfull = BFA_FALSE;
3201 
3202 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3203 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3204 			bfa_lpuid(fcport->bfa));
3205 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3206 }
3207 
3208 /*
3209  * Handle trunk SCN event from firmware.
3210  */
3211 static void
bfa_trunk_scn(struct bfa_fcport_s * fcport,struct bfi_fcport_trunk_scn_s * scn)3212 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3213 {
3214 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3215 	struct bfi_fcport_trunk_link_s *tlink;
3216 	struct bfa_trunk_link_attr_s *lattr;
3217 	enum bfa_trunk_state state_prev;
3218 	int i;
3219 	int link_bm = 0;
3220 
3221 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3222 	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3223 		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3224 
3225 	bfa_trc(fcport->bfa, trunk->attr.state);
3226 	bfa_trc(fcport->bfa, scn->trunk_state);
3227 	bfa_trc(fcport->bfa, scn->trunk_speed);
3228 
3229 	/*
3230 	 * Save off new state for trunk attribute query
3231 	 */
3232 	state_prev = trunk->attr.state;
3233 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3234 		trunk->attr.state = scn->trunk_state;
3235 	trunk->attr.speed = scn->trunk_speed;
3236 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3237 		lattr = &trunk->attr.link_attr[i];
3238 		tlink = &scn->tlink[i];
3239 
3240 		lattr->link_state = tlink->state;
3241 		lattr->trunk_wwn  = tlink->trunk_wwn;
3242 		lattr->fctl	  = tlink->fctl;
3243 		lattr->speed	  = tlink->speed;
3244 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3245 
3246 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3247 			fcport->speed	 = tlink->speed;
3248 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3249 			link_bm |= 1 << i;
3250 		}
3251 
3252 		bfa_trc(fcport->bfa, lattr->link_state);
3253 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3254 		bfa_trc(fcport->bfa, lattr->fctl);
3255 		bfa_trc(fcport->bfa, lattr->speed);
3256 		bfa_trc(fcport->bfa, lattr->deskew);
3257 	}
3258 
3259 	switch (link_bm) {
3260 	case 3:
3261 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3262 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3263 		break;
3264 	case 2:
3265 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3266 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3267 		break;
3268 	case 1:
3269 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3270 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3271 		break;
3272 	default:
3273 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3274 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3275 	}
3276 
3277 	/*
3278 	 * Notify upper layers if trunk state changed.
3279 	 */
3280 	if ((state_prev != trunk->attr.state) ||
3281 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3282 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3283 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3284 	}
3285 }
3286 
3287 static void
bfa_trunk_iocdisable(struct bfa_s * bfa)3288 bfa_trunk_iocdisable(struct bfa_s *bfa)
3289 {
3290 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3291 	int i = 0;
3292 
3293 	/*
3294 	 * In trunked mode, notify upper layers that link is down
3295 	 */
3296 	if (fcport->cfg.trunked) {
3297 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3298 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3299 
3300 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3301 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3302 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3303 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3304 			fcport->trunk.attr.link_attr[i].fctl =
3305 						BFA_TRUNK_LINK_FCTL_NORMAL;
3306 			fcport->trunk.attr.link_attr[i].link_state =
3307 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3308 			fcport->trunk.attr.link_attr[i].speed =
3309 						BFA_PORT_SPEED_UNKNOWN;
3310 			fcport->trunk.attr.link_attr[i].deskew = 0;
3311 		}
3312 	}
3313 }
3314 
3315 /*
3316  * Called to initialize port attributes
3317  */
3318 void
bfa_fcport_init(struct bfa_s * bfa)3319 bfa_fcport_init(struct bfa_s *bfa)
3320 {
3321 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3322 
3323 	/*
3324 	 * Initialize port attributes from IOC hardware data.
3325 	 */
3326 	bfa_fcport_set_wwns(fcport);
3327 	if (fcport->cfg.maxfrsize == 0)
3328 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3329 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3330 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3331 
3332 	WARN_ON(!fcport->cfg.maxfrsize);
3333 	WARN_ON(!fcport->cfg.rx_bbcredit);
3334 	WARN_ON(!fcport->speed_sup);
3335 }
3336 
3337 /*
3338  * Firmware message handler.
3339  */
3340 void
bfa_fcport_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)3341 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3342 {
3343 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3344 	union bfi_fcport_i2h_msg_u i2hmsg;
3345 
3346 	i2hmsg.msg = msg;
3347 	fcport->event_arg.i2hmsg = i2hmsg;
3348 
3349 	bfa_trc(bfa, msg->mhdr.msg_id);
3350 	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3351 
3352 	switch (msg->mhdr.msg_id) {
3353 	case BFI_FCPORT_I2H_ENABLE_RSP:
3354 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3355 
3356 			if (fcport->use_flash_cfg) {
3357 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3358 				fcport->cfg.maxfrsize =
3359 					cpu_to_be16(fcport->cfg.maxfrsize);
3360 				fcport->cfg.path_tov =
3361 					cpu_to_be16(fcport->cfg.path_tov);
3362 				fcport->cfg.q_depth =
3363 					cpu_to_be16(fcport->cfg.q_depth);
3364 
3365 				if (fcport->cfg.trunked)
3366 					fcport->trunk.attr.state =
3367 						BFA_TRUNK_OFFLINE;
3368 				else
3369 					fcport->trunk.attr.state =
3370 						BFA_TRUNK_DISABLED;
3371 				fcport->use_flash_cfg = BFA_FALSE;
3372 			}
3373 
3374 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3375 		}
3376 		break;
3377 
3378 	case BFI_FCPORT_I2H_DISABLE_RSP:
3379 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3380 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3381 		break;
3382 
3383 	case BFI_FCPORT_I2H_EVENT:
3384 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3385 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3386 		else
3387 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3388 		break;
3389 
3390 	case BFI_FCPORT_I2H_TRUNK_SCN:
3391 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3392 		break;
3393 
3394 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3395 		/*
3396 		 * check for timer pop before processing the rsp
3397 		 */
3398 		if (fcport->stats_busy == BFA_FALSE ||
3399 		    fcport->stats_status == BFA_STATUS_ETIMER)
3400 			break;
3401 
3402 		bfa_timer_stop(&fcport->timer);
3403 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3404 		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3405 				__bfa_cb_fcport_stats_get, fcport);
3406 		break;
3407 
3408 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3409 		/*
3410 		 * check for timer pop before processing the rsp
3411 		 */
3412 		if (fcport->stats_busy == BFA_FALSE ||
3413 		    fcport->stats_status == BFA_STATUS_ETIMER)
3414 			break;
3415 
3416 		bfa_timer_stop(&fcport->timer);
3417 		fcport->stats_status = BFA_STATUS_OK;
3418 		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3419 				__bfa_cb_fcport_stats_clr, fcport);
3420 		break;
3421 
3422 	case BFI_FCPORT_I2H_ENABLE_AEN:
3423 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3424 		break;
3425 
3426 	case BFI_FCPORT_I2H_DISABLE_AEN:
3427 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3428 		break;
3429 
3430 	default:
3431 		WARN_ON(1);
3432 	break;
3433 	}
3434 }
3435 
3436 /*
3437  * Registered callback for port events.
3438  */
3439 void
bfa_fcport_event_register(struct bfa_s * bfa,void (* cbfn)(void * cbarg,enum bfa_port_linkstate event),void * cbarg)3440 bfa_fcport_event_register(struct bfa_s *bfa,
3441 				void (*cbfn) (void *cbarg,
3442 				enum bfa_port_linkstate event),
3443 				void *cbarg)
3444 {
3445 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3446 
3447 	fcport->event_cbfn = cbfn;
3448 	fcport->event_cbarg = cbarg;
3449 }
3450 
3451 bfa_status_t
bfa_fcport_enable(struct bfa_s * bfa)3452 bfa_fcport_enable(struct bfa_s *bfa)
3453 {
3454 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3455 
3456 	if (bfa_ioc_is_disabled(&bfa->ioc))
3457 		return BFA_STATUS_IOC_DISABLED;
3458 
3459 	if (fcport->diag_busy)
3460 		return BFA_STATUS_DIAG_BUSY;
3461 
3462 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3463 	return BFA_STATUS_OK;
3464 }
3465 
3466 bfa_status_t
bfa_fcport_disable(struct bfa_s * bfa)3467 bfa_fcport_disable(struct bfa_s *bfa)
3468 {
3469 
3470 	if (bfa_ioc_is_disabled(&bfa->ioc))
3471 		return BFA_STATUS_IOC_DISABLED;
3472 
3473 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3474 	return BFA_STATUS_OK;
3475 }
3476 
3477 /*
3478  * Configure port speed.
3479  */
3480 bfa_status_t
bfa_fcport_cfg_speed(struct bfa_s * bfa,enum bfa_port_speed speed)3481 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3482 {
3483 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3484 
3485 	bfa_trc(bfa, speed);
3486 
3487 	if (fcport->cfg.trunked == BFA_TRUE)
3488 		return BFA_STATUS_TRUNK_ENABLED;
3489 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3490 		bfa_trc(bfa, fcport->speed_sup);
3491 		return BFA_STATUS_UNSUPP_SPEED;
3492 	}
3493 
3494 	fcport->cfg.speed = speed;
3495 
3496 	return BFA_STATUS_OK;
3497 }
3498 
3499 /*
3500  * Get current speed.
3501  */
3502 enum bfa_port_speed
bfa_fcport_get_speed(struct bfa_s * bfa)3503 bfa_fcport_get_speed(struct bfa_s *bfa)
3504 {
3505 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3506 
3507 	return fcport->speed;
3508 }
3509 
3510 /*
3511  * Configure port topology.
3512  */
3513 bfa_status_t
bfa_fcport_cfg_topology(struct bfa_s * bfa,enum bfa_port_topology topology)3514 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3515 {
3516 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3517 
3518 	bfa_trc(bfa, topology);
3519 	bfa_trc(bfa, fcport->cfg.topology);
3520 
3521 	switch (topology) {
3522 	case BFA_PORT_TOPOLOGY_P2P:
3523 	case BFA_PORT_TOPOLOGY_LOOP:
3524 	case BFA_PORT_TOPOLOGY_AUTO:
3525 		break;
3526 
3527 	default:
3528 		return BFA_STATUS_EINVAL;
3529 	}
3530 
3531 	fcport->cfg.topology = topology;
3532 	return BFA_STATUS_OK;
3533 }
3534 
3535 /*
3536  * Get current topology.
3537  */
3538 enum bfa_port_topology
bfa_fcport_get_topology(struct bfa_s * bfa)3539 bfa_fcport_get_topology(struct bfa_s *bfa)
3540 {
3541 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3542 
3543 	return fcport->topology;
3544 }
3545 
3546 bfa_status_t
bfa_fcport_cfg_hardalpa(struct bfa_s * bfa,u8 alpa)3547 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3548 {
3549 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3550 
3551 	bfa_trc(bfa, alpa);
3552 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3553 	bfa_trc(bfa, fcport->cfg.hardalpa);
3554 
3555 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3556 	fcport->cfg.hardalpa = alpa;
3557 
3558 	return BFA_STATUS_OK;
3559 }
3560 
3561 bfa_status_t
bfa_fcport_clr_hardalpa(struct bfa_s * bfa)3562 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3563 {
3564 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3565 
3566 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3567 	bfa_trc(bfa, fcport->cfg.hardalpa);
3568 
3569 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3570 	return BFA_STATUS_OK;
3571 }
3572 
3573 bfa_boolean_t
bfa_fcport_get_hardalpa(struct bfa_s * bfa,u8 * alpa)3574 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3575 {
3576 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3577 
3578 	*alpa = fcport->cfg.hardalpa;
3579 	return fcport->cfg.cfg_hardalpa;
3580 }
3581 
3582 u8
bfa_fcport_get_myalpa(struct bfa_s * bfa)3583 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3584 {
3585 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3586 
3587 	return fcport->myalpa;
3588 }
3589 
3590 bfa_status_t
bfa_fcport_cfg_maxfrsize(struct bfa_s * bfa,u16 maxfrsize)3591 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3592 {
3593 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3594 
3595 	bfa_trc(bfa, maxfrsize);
3596 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3597 
3598 	/* with in range */
3599 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3600 		return BFA_STATUS_INVLD_DFSZ;
3601 
3602 	/* power of 2, if not the max frame size of 2112 */
3603 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3604 		return BFA_STATUS_INVLD_DFSZ;
3605 
3606 	fcport->cfg.maxfrsize = maxfrsize;
3607 	return BFA_STATUS_OK;
3608 }
3609 
3610 u16
bfa_fcport_get_maxfrsize(struct bfa_s * bfa)3611 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3612 {
3613 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3614 
3615 	return fcport->cfg.maxfrsize;
3616 }
3617 
3618 u8
bfa_fcport_get_rx_bbcredit(struct bfa_s * bfa)3619 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3620 {
3621 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3622 
3623 	return fcport->cfg.rx_bbcredit;
3624 }
3625 
3626 void
bfa_fcport_set_tx_bbcredit(struct bfa_s * bfa,u16 tx_bbcredit)3627 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3628 {
3629 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3630 
3631 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3632 	bfa_fcport_send_txcredit(fcport);
3633 }
3634 
3635 /*
3636  * Get port attributes.
3637  */
3638 
3639 wwn_t
bfa_fcport_get_wwn(struct bfa_s * bfa,bfa_boolean_t node)3640 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3641 {
3642 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3643 	if (node)
3644 		return fcport->nwwn;
3645 	else
3646 		return fcport->pwwn;
3647 }
3648 
3649 void
bfa_fcport_get_attr(struct bfa_s * bfa,struct bfa_port_attr_s * attr)3650 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3651 {
3652 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3653 
3654 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3655 
3656 	attr->nwwn = fcport->nwwn;
3657 	attr->pwwn = fcport->pwwn;
3658 
3659 	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3660 	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3661 
3662 	memcpy(&attr->pport_cfg, &fcport->cfg,
3663 		sizeof(struct bfa_port_cfg_s));
3664 	/* speed attributes */
3665 	attr->pport_cfg.speed = fcport->cfg.speed;
3666 	attr->speed_supported = fcport->speed_sup;
3667 	attr->speed = fcport->speed;
3668 	attr->cos_supported = FC_CLASS_3;
3669 
3670 	/* topology attributes */
3671 	attr->pport_cfg.topology = fcport->cfg.topology;
3672 	attr->topology = fcport->topology;
3673 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3674 
3675 	/* beacon attributes */
3676 	attr->beacon = fcport->beacon;
3677 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3678 	attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
3679 	attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3680 
3681 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3682 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3683 	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3684 	if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3685 		attr->port_state = BFA_PORT_ST_IOCDIS;
3686 	else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3687 		attr->port_state = BFA_PORT_ST_FWMISMATCH;
3688 
3689 	/* FCoE vlan */
3690 	attr->fcoe_vlan = fcport->fcoe_vlan;
3691 }
3692 
3693 #define BFA_FCPORT_STATS_TOV	1000
3694 
3695 /*
3696  * Fetch port statistics (FCQoS or FCoE).
3697  */
3698 bfa_status_t
bfa_fcport_get_stats(struct bfa_s * bfa,union bfa_fcport_stats_u * stats,bfa_cb_port_t cbfn,void * cbarg)3699 bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3700 	bfa_cb_port_t cbfn, void *cbarg)
3701 {
3702 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3703 
3704 	if (fcport->stats_busy) {
3705 		bfa_trc(bfa, fcport->stats_busy);
3706 		return BFA_STATUS_DEVBUSY;
3707 	}
3708 
3709 	fcport->stats_busy  = BFA_TRUE;
3710 	fcport->stats_ret   = stats;
3711 	fcport->stats_cbfn  = cbfn;
3712 	fcport->stats_cbarg = cbarg;
3713 
3714 	bfa_fcport_send_stats_get(fcport);
3715 
3716 	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3717 			fcport, BFA_FCPORT_STATS_TOV);
3718 	return BFA_STATUS_OK;
3719 }
3720 
3721 /*
3722  * Reset port statistics (FCQoS or FCoE).
3723  */
3724 bfa_status_t
bfa_fcport_clear_stats(struct bfa_s * bfa,bfa_cb_port_t cbfn,void * cbarg)3725 bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3726 {
3727 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3728 
3729 	if (fcport->stats_busy) {
3730 		bfa_trc(bfa, fcport->stats_busy);
3731 		return BFA_STATUS_DEVBUSY;
3732 	}
3733 
3734 	fcport->stats_busy  = BFA_TRUE;
3735 	fcport->stats_cbfn  = cbfn;
3736 	fcport->stats_cbarg = cbarg;
3737 
3738 	bfa_fcport_send_stats_clear(fcport);
3739 
3740 	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3741 			fcport, BFA_FCPORT_STATS_TOV);
3742 	return BFA_STATUS_OK;
3743 }
3744 
3745 
3746 /*
3747  * Fetch port attributes.
3748  */
3749 bfa_boolean_t
bfa_fcport_is_disabled(struct bfa_s * bfa)3750 bfa_fcport_is_disabled(struct bfa_s *bfa)
3751 {
3752 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3753 
3754 	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3755 		BFA_PORT_ST_DISABLED;
3756 
3757 }
3758 
3759 bfa_boolean_t
bfa_fcport_is_ratelim(struct bfa_s * bfa)3760 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3761 {
3762 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3763 
3764 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3765 
3766 }
3767 
3768 /*
3769  * Get default minimum ratelim speed
3770  */
3771 enum bfa_port_speed
bfa_fcport_get_ratelim_speed(struct bfa_s * bfa)3772 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3773 {
3774 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3775 
3776 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
3777 	return fcport->cfg.trl_def_speed;
3778 
3779 }
3780 
3781 bfa_boolean_t
bfa_fcport_is_linkup(struct bfa_s * bfa)3782 bfa_fcport_is_linkup(struct bfa_s *bfa)
3783 {
3784 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3785 
3786 	return	(!fcport->cfg.trunked &&
3787 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3788 		(fcport->cfg.trunked &&
3789 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3790 }
3791 
3792 bfa_boolean_t
bfa_fcport_is_qos_enabled(struct bfa_s * bfa)3793 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3794 {
3795 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3796 
3797 	return fcport->cfg.qos_enabled;
3798 }
3799 
3800 /*
3801  * Rport State machine functions
3802  */
3803 /*
3804  * Beginning state, only online event expected.
3805  */
3806 static void
bfa_rport_sm_uninit(struct bfa_rport_s * rp,enum bfa_rport_event event)3807 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3808 {
3809 	bfa_trc(rp->bfa, rp->rport_tag);
3810 	bfa_trc(rp->bfa, event);
3811 
3812 	switch (event) {
3813 	case BFA_RPORT_SM_CREATE:
3814 		bfa_stats(rp, sm_un_cr);
3815 		bfa_sm_set_state(rp, bfa_rport_sm_created);
3816 		break;
3817 
3818 	default:
3819 		bfa_stats(rp, sm_un_unexp);
3820 		bfa_sm_fault(rp->bfa, event);
3821 	}
3822 }
3823 
3824 static void
bfa_rport_sm_created(struct bfa_rport_s * rp,enum bfa_rport_event event)3825 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3826 {
3827 	bfa_trc(rp->bfa, rp->rport_tag);
3828 	bfa_trc(rp->bfa, event);
3829 
3830 	switch (event) {
3831 	case BFA_RPORT_SM_ONLINE:
3832 		bfa_stats(rp, sm_cr_on);
3833 		if (bfa_rport_send_fwcreate(rp))
3834 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3835 		else
3836 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3837 		break;
3838 
3839 	case BFA_RPORT_SM_DELETE:
3840 		bfa_stats(rp, sm_cr_del);
3841 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3842 		bfa_rport_free(rp);
3843 		break;
3844 
3845 	case BFA_RPORT_SM_HWFAIL:
3846 		bfa_stats(rp, sm_cr_hwf);
3847 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3848 		break;
3849 
3850 	default:
3851 		bfa_stats(rp, sm_cr_unexp);
3852 		bfa_sm_fault(rp->bfa, event);
3853 	}
3854 }
3855 
3856 /*
3857  * Waiting for rport create response from firmware.
3858  */
3859 static void
bfa_rport_sm_fwcreate(struct bfa_rport_s * rp,enum bfa_rport_event event)3860 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3861 {
3862 	bfa_trc(rp->bfa, rp->rport_tag);
3863 	bfa_trc(rp->bfa, event);
3864 
3865 	switch (event) {
3866 	case BFA_RPORT_SM_FWRSP:
3867 		bfa_stats(rp, sm_fwc_rsp);
3868 		bfa_sm_set_state(rp, bfa_rport_sm_online);
3869 		bfa_rport_online_cb(rp);
3870 		break;
3871 
3872 	case BFA_RPORT_SM_DELETE:
3873 		bfa_stats(rp, sm_fwc_del);
3874 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3875 		break;
3876 
3877 	case BFA_RPORT_SM_OFFLINE:
3878 		bfa_stats(rp, sm_fwc_off);
3879 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3880 		break;
3881 
3882 	case BFA_RPORT_SM_HWFAIL:
3883 		bfa_stats(rp, sm_fwc_hwf);
3884 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3885 		break;
3886 
3887 	default:
3888 		bfa_stats(rp, sm_fwc_unexp);
3889 		bfa_sm_fault(rp->bfa, event);
3890 	}
3891 }
3892 
3893 /*
3894  * Request queue is full, awaiting queue resume to send create request.
3895  */
3896 static void
bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)3897 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3898 {
3899 	bfa_trc(rp->bfa, rp->rport_tag);
3900 	bfa_trc(rp->bfa, event);
3901 
3902 	switch (event) {
3903 	case BFA_RPORT_SM_QRESUME:
3904 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3905 		bfa_rport_send_fwcreate(rp);
3906 		break;
3907 
3908 	case BFA_RPORT_SM_DELETE:
3909 		bfa_stats(rp, sm_fwc_del);
3910 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3911 		bfa_reqq_wcancel(&rp->reqq_wait);
3912 		bfa_rport_free(rp);
3913 		break;
3914 
3915 	case BFA_RPORT_SM_OFFLINE:
3916 		bfa_stats(rp, sm_fwc_off);
3917 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
3918 		bfa_reqq_wcancel(&rp->reqq_wait);
3919 		bfa_rport_offline_cb(rp);
3920 		break;
3921 
3922 	case BFA_RPORT_SM_HWFAIL:
3923 		bfa_stats(rp, sm_fwc_hwf);
3924 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3925 		bfa_reqq_wcancel(&rp->reqq_wait);
3926 		break;
3927 
3928 	default:
3929 		bfa_stats(rp, sm_fwc_unexp);
3930 		bfa_sm_fault(rp->bfa, event);
3931 	}
3932 }
3933 
3934 /*
3935  * Online state - normal parking state.
3936  */
3937 static void
bfa_rport_sm_online(struct bfa_rport_s * rp,enum bfa_rport_event event)3938 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3939 {
3940 	struct bfi_rport_qos_scn_s *qos_scn;
3941 
3942 	bfa_trc(rp->bfa, rp->rport_tag);
3943 	bfa_trc(rp->bfa, event);
3944 
3945 	switch (event) {
3946 	case BFA_RPORT_SM_OFFLINE:
3947 		bfa_stats(rp, sm_on_off);
3948 		if (bfa_rport_send_fwdelete(rp))
3949 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3950 		else
3951 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3952 		break;
3953 
3954 	case BFA_RPORT_SM_DELETE:
3955 		bfa_stats(rp, sm_on_del);
3956 		if (bfa_rport_send_fwdelete(rp))
3957 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3958 		else
3959 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3960 		break;
3961 
3962 	case BFA_RPORT_SM_HWFAIL:
3963 		bfa_stats(rp, sm_on_hwf);
3964 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3965 		break;
3966 
3967 	case BFA_RPORT_SM_SET_SPEED:
3968 		bfa_rport_send_fwspeed(rp);
3969 		break;
3970 
3971 	case BFA_RPORT_SM_QOS_SCN:
3972 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3973 		rp->qos_attr = qos_scn->new_qos_attr;
3974 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3975 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3976 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3977 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3978 
3979 		qos_scn->old_qos_attr.qos_flow_id  =
3980 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
3981 		qos_scn->new_qos_attr.qos_flow_id  =
3982 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
3983 
3984 		if (qos_scn->old_qos_attr.qos_flow_id !=
3985 			qos_scn->new_qos_attr.qos_flow_id)
3986 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3987 						    qos_scn->old_qos_attr,
3988 						    qos_scn->new_qos_attr);
3989 		if (qos_scn->old_qos_attr.qos_priority !=
3990 			qos_scn->new_qos_attr.qos_priority)
3991 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3992 						  qos_scn->old_qos_attr,
3993 						  qos_scn->new_qos_attr);
3994 		break;
3995 
3996 	default:
3997 		bfa_stats(rp, sm_on_unexp);
3998 		bfa_sm_fault(rp->bfa, event);
3999 	}
4000 }
4001 
4002 /*
4003  * Firmware rport is being deleted - awaiting f/w response.
4004  */
4005 static void
bfa_rport_sm_fwdelete(struct bfa_rport_s * rp,enum bfa_rport_event event)4006 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4007 {
4008 	bfa_trc(rp->bfa, rp->rport_tag);
4009 	bfa_trc(rp->bfa, event);
4010 
4011 	switch (event) {
4012 	case BFA_RPORT_SM_FWRSP:
4013 		bfa_stats(rp, sm_fwd_rsp);
4014 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4015 		bfa_rport_offline_cb(rp);
4016 		break;
4017 
4018 	case BFA_RPORT_SM_DELETE:
4019 		bfa_stats(rp, sm_fwd_del);
4020 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4021 		break;
4022 
4023 	case BFA_RPORT_SM_HWFAIL:
4024 		bfa_stats(rp, sm_fwd_hwf);
4025 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4026 		bfa_rport_offline_cb(rp);
4027 		break;
4028 
4029 	default:
4030 		bfa_stats(rp, sm_fwd_unexp);
4031 		bfa_sm_fault(rp->bfa, event);
4032 	}
4033 }
4034 
4035 static void
bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4036 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4037 {
4038 	bfa_trc(rp->bfa, rp->rport_tag);
4039 	bfa_trc(rp->bfa, event);
4040 
4041 	switch (event) {
4042 	case BFA_RPORT_SM_QRESUME:
4043 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4044 		bfa_rport_send_fwdelete(rp);
4045 		break;
4046 
4047 	case BFA_RPORT_SM_DELETE:
4048 		bfa_stats(rp, sm_fwd_del);
4049 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4050 		break;
4051 
4052 	case BFA_RPORT_SM_HWFAIL:
4053 		bfa_stats(rp, sm_fwd_hwf);
4054 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4055 		bfa_reqq_wcancel(&rp->reqq_wait);
4056 		bfa_rport_offline_cb(rp);
4057 		break;
4058 
4059 	default:
4060 		bfa_stats(rp, sm_fwd_unexp);
4061 		bfa_sm_fault(rp->bfa, event);
4062 	}
4063 }
4064 
4065 /*
4066  * Offline state.
4067  */
4068 static void
bfa_rport_sm_offline(struct bfa_rport_s * rp,enum bfa_rport_event event)4069 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4070 {
4071 	bfa_trc(rp->bfa, rp->rport_tag);
4072 	bfa_trc(rp->bfa, event);
4073 
4074 	switch (event) {
4075 	case BFA_RPORT_SM_DELETE:
4076 		bfa_stats(rp, sm_off_del);
4077 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4078 		bfa_rport_free(rp);
4079 		break;
4080 
4081 	case BFA_RPORT_SM_ONLINE:
4082 		bfa_stats(rp, sm_off_on);
4083 		if (bfa_rport_send_fwcreate(rp))
4084 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4085 		else
4086 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4087 		break;
4088 
4089 	case BFA_RPORT_SM_HWFAIL:
4090 		bfa_stats(rp, sm_off_hwf);
4091 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4092 		break;
4093 
4094 	default:
4095 		bfa_stats(rp, sm_off_unexp);
4096 		bfa_sm_fault(rp->bfa, event);
4097 	}
4098 }
4099 
4100 /*
4101  * Rport is deleted, waiting for firmware response to delete.
4102  */
4103 static void
bfa_rport_sm_deleting(struct bfa_rport_s * rp,enum bfa_rport_event event)4104 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4105 {
4106 	bfa_trc(rp->bfa, rp->rport_tag);
4107 	bfa_trc(rp->bfa, event);
4108 
4109 	switch (event) {
4110 	case BFA_RPORT_SM_FWRSP:
4111 		bfa_stats(rp, sm_del_fwrsp);
4112 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4113 		bfa_rport_free(rp);
4114 		break;
4115 
4116 	case BFA_RPORT_SM_HWFAIL:
4117 		bfa_stats(rp, sm_del_hwf);
4118 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4119 		bfa_rport_free(rp);
4120 		break;
4121 
4122 	default:
4123 		bfa_sm_fault(rp->bfa, event);
4124 	}
4125 }
4126 
4127 static void
bfa_rport_sm_deleting_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4128 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4129 {
4130 	bfa_trc(rp->bfa, rp->rport_tag);
4131 	bfa_trc(rp->bfa, event);
4132 
4133 	switch (event) {
4134 	case BFA_RPORT_SM_QRESUME:
4135 		bfa_stats(rp, sm_del_fwrsp);
4136 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4137 		bfa_rport_send_fwdelete(rp);
4138 		break;
4139 
4140 	case BFA_RPORT_SM_HWFAIL:
4141 		bfa_stats(rp, sm_del_hwf);
4142 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4143 		bfa_reqq_wcancel(&rp->reqq_wait);
4144 		bfa_rport_free(rp);
4145 		break;
4146 
4147 	default:
4148 		bfa_sm_fault(rp->bfa, event);
4149 	}
4150 }
4151 
4152 /*
4153  * Waiting for rport create response from firmware. A delete is pending.
4154  */
4155 static void
bfa_rport_sm_delete_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4156 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4157 				enum bfa_rport_event event)
4158 {
4159 	bfa_trc(rp->bfa, rp->rport_tag);
4160 	bfa_trc(rp->bfa, event);
4161 
4162 	switch (event) {
4163 	case BFA_RPORT_SM_FWRSP:
4164 		bfa_stats(rp, sm_delp_fwrsp);
4165 		if (bfa_rport_send_fwdelete(rp))
4166 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4167 		else
4168 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4169 		break;
4170 
4171 	case BFA_RPORT_SM_HWFAIL:
4172 		bfa_stats(rp, sm_delp_hwf);
4173 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4174 		bfa_rport_free(rp);
4175 		break;
4176 
4177 	default:
4178 		bfa_stats(rp, sm_delp_unexp);
4179 		bfa_sm_fault(rp->bfa, event);
4180 	}
4181 }
4182 
4183 /*
4184  * Waiting for rport create response from firmware. Rport offline is pending.
4185  */
4186 static void
bfa_rport_sm_offline_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4187 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4188 				 enum bfa_rport_event event)
4189 {
4190 	bfa_trc(rp->bfa, rp->rport_tag);
4191 	bfa_trc(rp->bfa, event);
4192 
4193 	switch (event) {
4194 	case BFA_RPORT_SM_FWRSP:
4195 		bfa_stats(rp, sm_offp_fwrsp);
4196 		if (bfa_rport_send_fwdelete(rp))
4197 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4198 		else
4199 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4200 		break;
4201 
4202 	case BFA_RPORT_SM_DELETE:
4203 		bfa_stats(rp, sm_offp_del);
4204 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4205 		break;
4206 
4207 	case BFA_RPORT_SM_HWFAIL:
4208 		bfa_stats(rp, sm_offp_hwf);
4209 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4210 		break;
4211 
4212 	default:
4213 		bfa_stats(rp, sm_offp_unexp);
4214 		bfa_sm_fault(rp->bfa, event);
4215 	}
4216 }
4217 
4218 /*
4219  * IOC h/w failed.
4220  */
4221 static void
bfa_rport_sm_iocdisable(struct bfa_rport_s * rp,enum bfa_rport_event event)4222 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4223 {
4224 	bfa_trc(rp->bfa, rp->rport_tag);
4225 	bfa_trc(rp->bfa, event);
4226 
4227 	switch (event) {
4228 	case BFA_RPORT_SM_OFFLINE:
4229 		bfa_stats(rp, sm_iocd_off);
4230 		bfa_rport_offline_cb(rp);
4231 		break;
4232 
4233 	case BFA_RPORT_SM_DELETE:
4234 		bfa_stats(rp, sm_iocd_del);
4235 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4236 		bfa_rport_free(rp);
4237 		break;
4238 
4239 	case BFA_RPORT_SM_ONLINE:
4240 		bfa_stats(rp, sm_iocd_on);
4241 		if (bfa_rport_send_fwcreate(rp))
4242 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4243 		else
4244 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4245 		break;
4246 
4247 	case BFA_RPORT_SM_HWFAIL:
4248 		break;
4249 
4250 	default:
4251 		bfa_stats(rp, sm_iocd_unexp);
4252 		bfa_sm_fault(rp->bfa, event);
4253 	}
4254 }
4255 
4256 
4257 
4258 /*
4259  *  bfa_rport_private BFA rport private functions
4260  */
4261 
4262 static void
__bfa_cb_rport_online(void * cbarg,bfa_boolean_t complete)4263 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4264 {
4265 	struct bfa_rport_s *rp = cbarg;
4266 
4267 	if (complete)
4268 		bfa_cb_rport_online(rp->rport_drv);
4269 }
4270 
4271 static void
__bfa_cb_rport_offline(void * cbarg,bfa_boolean_t complete)4272 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4273 {
4274 	struct bfa_rport_s *rp = cbarg;
4275 
4276 	if (complete)
4277 		bfa_cb_rport_offline(rp->rport_drv);
4278 }
4279 
4280 static void
bfa_rport_qresume(void * cbarg)4281 bfa_rport_qresume(void *cbarg)
4282 {
4283 	struct bfa_rport_s	*rp = cbarg;
4284 
4285 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4286 }
4287 
4288 static void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * km_len,u32 * dm_len)4289 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4290 		u32 *dm_len)
4291 {
4292 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4293 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4294 
4295 	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4296 }
4297 
4298 static void
bfa_rport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)4299 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4300 		     struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4301 {
4302 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4303 	struct bfa_rport_s *rp;
4304 	u16 i;
4305 
4306 	INIT_LIST_HEAD(&mod->rp_free_q);
4307 	INIT_LIST_HEAD(&mod->rp_active_q);
4308 
4309 	rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4310 	mod->rps_list = rp;
4311 	mod->num_rports = cfg->fwcfg.num_rports;
4312 
4313 	WARN_ON(!mod->num_rports ||
4314 		   (mod->num_rports & (mod->num_rports - 1)));
4315 
4316 	for (i = 0; i < mod->num_rports; i++, rp++) {
4317 		memset(rp, 0, sizeof(struct bfa_rport_s));
4318 		rp->bfa = bfa;
4319 		rp->rport_tag = i;
4320 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4321 
4322 		/*
4323 		 *  - is unused
4324 		 */
4325 		if (i)
4326 			list_add_tail(&rp->qe, &mod->rp_free_q);
4327 
4328 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4329 	}
4330 
4331 	/*
4332 	 * consume memory
4333 	 */
4334 	bfa_meminfo_kva(meminfo) = (u8 *) rp;
4335 }
4336 
4337 static void
bfa_rport_detach(struct bfa_s * bfa)4338 bfa_rport_detach(struct bfa_s *bfa)
4339 {
4340 }
4341 
4342 static void
bfa_rport_start(struct bfa_s * bfa)4343 bfa_rport_start(struct bfa_s *bfa)
4344 {
4345 }
4346 
4347 static void
bfa_rport_stop(struct bfa_s * bfa)4348 bfa_rport_stop(struct bfa_s *bfa)
4349 {
4350 }
4351 
4352 static void
bfa_rport_iocdisable(struct bfa_s * bfa)4353 bfa_rport_iocdisable(struct bfa_s *bfa)
4354 {
4355 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4356 	struct bfa_rport_s *rport;
4357 	struct list_head *qe, *qen;
4358 
4359 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4360 		rport = (struct bfa_rport_s *) qe;
4361 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4362 	}
4363 }
4364 
4365 static struct bfa_rport_s *
bfa_rport_alloc(struct bfa_rport_mod_s * mod)4366 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4367 {
4368 	struct bfa_rport_s *rport;
4369 
4370 	bfa_q_deq(&mod->rp_free_q, &rport);
4371 	if (rport)
4372 		list_add_tail(&rport->qe, &mod->rp_active_q);
4373 
4374 	return rport;
4375 }
4376 
4377 static void
bfa_rport_free(struct bfa_rport_s * rport)4378 bfa_rport_free(struct bfa_rport_s *rport)
4379 {
4380 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4381 
4382 	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4383 	list_del(&rport->qe);
4384 	list_add_tail(&rport->qe, &mod->rp_free_q);
4385 }
4386 
4387 static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s * rp)4388 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4389 {
4390 	struct bfi_rport_create_req_s *m;
4391 
4392 	/*
4393 	 * check for room in queue to send request now
4394 	 */
4395 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4396 	if (!m) {
4397 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4398 		return BFA_FALSE;
4399 	}
4400 
4401 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4402 			bfa_lpuid(rp->bfa));
4403 	m->bfa_handle = rp->rport_tag;
4404 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4405 	m->pid = rp->rport_info.pid;
4406 	m->lp_tag = rp->rport_info.lp_tag;
4407 	m->local_pid = rp->rport_info.local_pid;
4408 	m->fc_class = rp->rport_info.fc_class;
4409 	m->vf_en = rp->rport_info.vf_en;
4410 	m->vf_id = rp->rport_info.vf_id;
4411 	m->cisc = rp->rport_info.cisc;
4412 
4413 	/*
4414 	 * queue I/O message to firmware
4415 	 */
4416 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4417 	return BFA_TRUE;
4418 }
4419 
4420 static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s * rp)4421 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4422 {
4423 	struct bfi_rport_delete_req_s *m;
4424 
4425 	/*
4426 	 * check for room in queue to send request now
4427 	 */
4428 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4429 	if (!m) {
4430 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4431 		return BFA_FALSE;
4432 	}
4433 
4434 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4435 			bfa_lpuid(rp->bfa));
4436 	m->fw_handle = rp->fw_handle;
4437 
4438 	/*
4439 	 * queue I/O message to firmware
4440 	 */
4441 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4442 	return BFA_TRUE;
4443 }
4444 
4445 static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s * rp)4446 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4447 {
4448 	struct bfa_rport_speed_req_s *m;
4449 
4450 	/*
4451 	 * check for room in queue to send request now
4452 	 */
4453 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4454 	if (!m) {
4455 		bfa_trc(rp->bfa, rp->rport_info.speed);
4456 		return BFA_FALSE;
4457 	}
4458 
4459 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4460 			bfa_lpuid(rp->bfa));
4461 	m->fw_handle = rp->fw_handle;
4462 	m->speed = (u8)rp->rport_info.speed;
4463 
4464 	/*
4465 	 * queue I/O message to firmware
4466 	 */
4467 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4468 	return BFA_TRUE;
4469 }
4470 
4471 
4472 
4473 /*
4474  *  bfa_rport_public
4475  */
4476 
4477 /*
4478  * Rport interrupt processing.
4479  */
4480 void
bfa_rport_isr(struct bfa_s * bfa,struct bfi_msg_s * m)4481 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4482 {
4483 	union bfi_rport_i2h_msg_u msg;
4484 	struct bfa_rport_s *rp;
4485 
4486 	bfa_trc(bfa, m->mhdr.msg_id);
4487 
4488 	msg.msg = m;
4489 
4490 	switch (m->mhdr.msg_id) {
4491 	case BFI_RPORT_I2H_CREATE_RSP:
4492 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4493 		rp->fw_handle = msg.create_rsp->fw_handle;
4494 		rp->qos_attr = msg.create_rsp->qos_attr;
4495 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4496 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4497 		break;
4498 
4499 	case BFI_RPORT_I2H_DELETE_RSP:
4500 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4501 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4502 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4503 		break;
4504 
4505 	case BFI_RPORT_I2H_QOS_SCN:
4506 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4507 		rp->event_arg.fw_msg = msg.qos_scn_evt;
4508 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4509 		break;
4510 
4511 	default:
4512 		bfa_trc(bfa, m->mhdr.msg_id);
4513 		WARN_ON(1);
4514 	}
4515 }
4516 
4517 
4518 
4519 /*
4520  *  bfa_rport_api
4521  */
4522 
4523 struct bfa_rport_s *
bfa_rport_create(struct bfa_s * bfa,void * rport_drv)4524 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4525 {
4526 	struct bfa_rport_s *rp;
4527 
4528 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4529 
4530 	if (rp == NULL)
4531 		return NULL;
4532 
4533 	rp->bfa = bfa;
4534 	rp->rport_drv = rport_drv;
4535 	memset(&rp->stats, 0, sizeof(rp->stats));
4536 
4537 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4538 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4539 
4540 	return rp;
4541 }
4542 
4543 void
bfa_rport_online(struct bfa_rport_s * rport,struct bfa_rport_info_s * rport_info)4544 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4545 {
4546 	WARN_ON(rport_info->max_frmsz == 0);
4547 
4548 	/*
4549 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4550 	 * responses. Default to minimum size.
4551 	 */
4552 	if (rport_info->max_frmsz == 0) {
4553 		bfa_trc(rport->bfa, rport->rport_tag);
4554 		rport_info->max_frmsz = FC_MIN_PDUSZ;
4555 	}
4556 
4557 	rport->rport_info = *rport_info;
4558 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4559 }
4560 
4561 void
bfa_rport_speed(struct bfa_rport_s * rport,enum bfa_port_speed speed)4562 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4563 {
4564 	WARN_ON(speed == 0);
4565 	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4566 
4567 	rport->rport_info.speed = speed;
4568 	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4569 }
4570 
4571 
4572 /*
4573  * SGPG related functions
4574  */
4575 
4576 /*
4577  * Compute and return memory needed by FCP(im) module.
4578  */
4579 static void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * km_len,u32 * dm_len)4580 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4581 		u32 *dm_len)
4582 {
4583 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4584 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4585 
4586 	*km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4587 	*dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4588 }
4589 
4590 
4591 static void
bfa_sgpg_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_pcidev_s * pcidev)4592 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4593 		    struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4594 {
4595 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4596 	int i;
4597 	struct bfa_sgpg_s *hsgpg;
4598 	struct bfi_sgpg_s *sgpg;
4599 	u64 align_len;
4600 
4601 	union {
4602 		u64 pa;
4603 		union bfi_addr_u addr;
4604 	} sgpg_pa, sgpg_pa_tmp;
4605 
4606 	INIT_LIST_HEAD(&mod->sgpg_q);
4607 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
4608 
4609 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4610 
4611 	mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4612 	mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4613 	align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4614 	mod->sgpg_arr_pa += align_len;
4615 	mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4616 						align_len);
4617 	mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4618 						align_len);
4619 
4620 	hsgpg = mod->hsgpg_arr;
4621 	sgpg = mod->sgpg_arr;
4622 	sgpg_pa.pa = mod->sgpg_arr_pa;
4623 	mod->free_sgpgs = mod->num_sgpgs;
4624 
4625 	WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
4626 
4627 	for (i = 0; i < mod->num_sgpgs; i++) {
4628 		memset(hsgpg, 0, sizeof(*hsgpg));
4629 		memset(sgpg, 0, sizeof(*sgpg));
4630 
4631 		hsgpg->sgpg = sgpg;
4632 		sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4633 		hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4634 		list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4635 
4636 		hsgpg++;
4637 		sgpg++;
4638 		sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4639 	}
4640 
4641 	bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4642 	bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4643 	bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4644 }
4645 
4646 static void
bfa_sgpg_detach(struct bfa_s * bfa)4647 bfa_sgpg_detach(struct bfa_s *bfa)
4648 {
4649 }
4650 
4651 static void
bfa_sgpg_start(struct bfa_s * bfa)4652 bfa_sgpg_start(struct bfa_s *bfa)
4653 {
4654 }
4655 
4656 static void
bfa_sgpg_stop(struct bfa_s * bfa)4657 bfa_sgpg_stop(struct bfa_s *bfa)
4658 {
4659 }
4660 
4661 static void
bfa_sgpg_iocdisable(struct bfa_s * bfa)4662 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4663 {
4664 }
4665 
4666 bfa_status_t
bfa_sgpg_malloc(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpgs)4667 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4668 {
4669 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4670 	struct bfa_sgpg_s *hsgpg;
4671 	int i;
4672 
4673 	if (mod->free_sgpgs < nsgpgs)
4674 		return BFA_STATUS_ENOMEM;
4675 
4676 	for (i = 0; i < nsgpgs; i++) {
4677 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
4678 		WARN_ON(!hsgpg);
4679 		list_add_tail(&hsgpg->qe, sgpg_q);
4680 	}
4681 
4682 	mod->free_sgpgs -= nsgpgs;
4683 	return BFA_STATUS_OK;
4684 }
4685 
4686 void
bfa_sgpg_mfree(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpg)4687 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4688 {
4689 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4690 	struct bfa_sgpg_wqe_s *wqe;
4691 
4692 	mod->free_sgpgs += nsgpg;
4693 	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4694 
4695 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4696 
4697 	if (list_empty(&mod->sgpg_wait_q))
4698 		return;
4699 
4700 	/*
4701 	 * satisfy as many waiting requests as possible
4702 	 */
4703 	do {
4704 		wqe = bfa_q_first(&mod->sgpg_wait_q);
4705 		if (mod->free_sgpgs < wqe->nsgpg)
4706 			nsgpg = mod->free_sgpgs;
4707 		else
4708 			nsgpg = wqe->nsgpg;
4709 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4710 		wqe->nsgpg -= nsgpg;
4711 		if (wqe->nsgpg == 0) {
4712 			list_del(&wqe->qe);
4713 			wqe->cbfn(wqe->cbarg);
4714 		}
4715 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4716 }
4717 
4718 void
bfa_sgpg_wait(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe,int nsgpg)4719 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4720 {
4721 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4722 
4723 	WARN_ON(nsgpg <= 0);
4724 	WARN_ON(nsgpg <= mod->free_sgpgs);
4725 
4726 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4727 
4728 	/*
4729 	 * allocate any left to this one first
4730 	 */
4731 	if (mod->free_sgpgs) {
4732 		/*
4733 		 * no one else is waiting for SGPG
4734 		 */
4735 		WARN_ON(!list_empty(&mod->sgpg_wait_q));
4736 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4737 		wqe->nsgpg -= mod->free_sgpgs;
4738 		mod->free_sgpgs = 0;
4739 	}
4740 
4741 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4742 }
4743 
4744 void
bfa_sgpg_wcancel(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe)4745 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4746 {
4747 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4748 
4749 	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4750 	list_del(&wqe->qe);
4751 
4752 	if (wqe->nsgpg_total != wqe->nsgpg)
4753 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4754 				   wqe->nsgpg_total - wqe->nsgpg);
4755 }
4756 
4757 void
bfa_sgpg_winit(struct bfa_sgpg_wqe_s * wqe,void (* cbfn)(void * cbarg),void * cbarg)4758 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4759 		   void *cbarg)
4760 {
4761 	INIT_LIST_HEAD(&wqe->sgpg_q);
4762 	wqe->cbfn = cbfn;
4763 	wqe->cbarg = cbarg;
4764 }
4765 
4766 /*
4767  *  UF related functions
4768  */
4769 /*
4770  *****************************************************************************
4771  * Internal functions
4772  *****************************************************************************
4773  */
4774 static void
__bfa_cb_uf_recv(void * cbarg,bfa_boolean_t complete)4775 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4776 {
4777 	struct bfa_uf_s   *uf = cbarg;
4778 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4779 
4780 	if (complete)
4781 		ufm->ufrecv(ufm->cbarg, uf);
4782 }
4783 
4784 static void
claim_uf_pbs(struct bfa_uf_mod_s * ufm,struct bfa_meminfo_s * mi)4785 claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4786 {
4787 	u32 uf_pb_tot_sz;
4788 
4789 	ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4790 	ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4791 	uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4792 							BFA_DMA_ALIGN_SZ);
4793 
4794 	bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4795 	bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4796 
4797 	memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
4798 }
4799 
4800 static void
claim_uf_post_msgs(struct bfa_uf_mod_s * ufm,struct bfa_meminfo_s * mi)4801 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4802 {
4803 	struct bfi_uf_buf_post_s *uf_bp_msg;
4804 	struct bfi_sge_s      *sge;
4805 	union bfi_addr_u      sga_zero = { {0} };
4806 	u16 i;
4807 	u16 buf_len;
4808 
4809 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4810 	uf_bp_msg = ufm->uf_buf_posts;
4811 
4812 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4813 	     i++, uf_bp_msg++) {
4814 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
4815 
4816 		uf_bp_msg->buf_tag = i;
4817 		buf_len = sizeof(struct bfa_uf_buf_s);
4818 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
4819 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4820 			    bfa_lpuid(ufm->bfa));
4821 
4822 		sge = uf_bp_msg->sge;
4823 		sge[0].sg_len = buf_len;
4824 		sge[0].flags = BFI_SGE_DATA_LAST;
4825 		bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4826 		bfa_sge_to_be(sge);
4827 
4828 		sge[1].sg_len = buf_len;
4829 		sge[1].flags = BFI_SGE_PGDLEN;
4830 		sge[1].sga = sga_zero;
4831 		bfa_sge_to_be(&sge[1]);
4832 	}
4833 
4834 	/*
4835 	 * advance pointer beyond consumed memory
4836 	 */
4837 	bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4838 }
4839 
4840 static void
claim_ufs(struct bfa_uf_mod_s * ufm,struct bfa_meminfo_s * mi)4841 claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4842 {
4843 	u16 i;
4844 	struct bfa_uf_s   *uf;
4845 
4846 	/*
4847 	 * Claim block of memory for UF list
4848 	 */
4849 	ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4850 
4851 	/*
4852 	 * Initialize UFs and queue it in UF free queue
4853 	 */
4854 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
4855 		memset(uf, 0, sizeof(struct bfa_uf_s));
4856 		uf->bfa = ufm->bfa;
4857 		uf->uf_tag = i;
4858 		uf->pb_len = sizeof(struct bfa_uf_buf_s);
4859 		uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4860 		uf->buf_pa = ufm_pbs_pa(ufm, i);
4861 		list_add_tail(&uf->qe, &ufm->uf_free_q);
4862 	}
4863 
4864 	/*
4865 	 * advance memory pointer
4866 	 */
4867 	bfa_meminfo_kva(mi) = (u8 *) uf;
4868 }
4869 
4870 static void
uf_mem_claim(struct bfa_uf_mod_s * ufm,struct bfa_meminfo_s * mi)4871 uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4872 {
4873 	claim_uf_pbs(ufm, mi);
4874 	claim_ufs(ufm, mi);
4875 	claim_uf_post_msgs(ufm, mi);
4876 }
4877 
4878 static void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * ndm_len,u32 * dm_len)4879 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4880 {
4881 	u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4882 
4883 	/*
4884 	 * dma-able memory for UF posted bufs
4885 	 */
4886 	*dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4887 							BFA_DMA_ALIGN_SZ);
4888 
4889 	/*
4890 	 * kernel Virtual memory for UFs and UF buf post msg copies
4891 	 */
4892 	*ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4893 	*ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4894 }
4895 
4896 static void
bfa_uf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)4897 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4898 		  struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4899 {
4900 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4901 
4902 	memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
4903 	ufm->bfa = bfa;
4904 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4905 	INIT_LIST_HEAD(&ufm->uf_free_q);
4906 	INIT_LIST_HEAD(&ufm->uf_posted_q);
4907 
4908 	uf_mem_claim(ufm, meminfo);
4909 }
4910 
4911 static void
bfa_uf_detach(struct bfa_s * bfa)4912 bfa_uf_detach(struct bfa_s *bfa)
4913 {
4914 }
4915 
4916 static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s * uf_mod)4917 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4918 {
4919 	struct bfa_uf_s   *uf;
4920 
4921 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
4922 	return uf;
4923 }
4924 
4925 static void
bfa_uf_put(struct bfa_uf_mod_s * uf_mod,struct bfa_uf_s * uf)4926 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4927 {
4928 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4929 }
4930 
4931 static bfa_status_t
bfa_uf_post(struct bfa_uf_mod_s * ufm,struct bfa_uf_s * uf)4932 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4933 {
4934 	struct bfi_uf_buf_post_s *uf_post_msg;
4935 
4936 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4937 	if (!uf_post_msg)
4938 		return BFA_STATUS_FAILED;
4939 
4940 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
4941 		      sizeof(struct bfi_uf_buf_post_s));
4942 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4943 
4944 	bfa_trc(ufm->bfa, uf->uf_tag);
4945 
4946 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
4947 	return BFA_STATUS_OK;
4948 }
4949 
4950 static void
bfa_uf_post_all(struct bfa_uf_mod_s * uf_mod)4951 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4952 {
4953 	struct bfa_uf_s   *uf;
4954 
4955 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4956 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4957 			break;
4958 	}
4959 }
4960 
4961 static void
uf_recv(struct bfa_s * bfa,struct bfi_uf_frm_rcvd_s * m)4962 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4963 {
4964 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4965 	u16 uf_tag = m->buf_tag;
4966 	struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4967 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4968 	u8 *buf = &uf_buf->d[0];
4969 	struct fchs_s *fchs;
4970 
4971 	m->frm_len = be16_to_cpu(m->frm_len);
4972 	m->xfr_len = be16_to_cpu(m->xfr_len);
4973 
4974 	fchs = (struct fchs_s *)uf_buf;
4975 
4976 	list_del(&uf->qe);	/* dequeue from posted queue */
4977 
4978 	uf->data_ptr = buf;
4979 	uf->data_len = m->xfr_len;
4980 
4981 	WARN_ON(uf->data_len < sizeof(struct fchs_s));
4982 
4983 	if (uf->data_len == sizeof(struct fchs_s)) {
4984 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4985 			       uf->data_len, (struct fchs_s *)buf);
4986 	} else {
4987 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4988 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4989 				      BFA_PL_EID_RX, uf->data_len,
4990 				      (struct fchs_s *)buf, pld_w0);
4991 	}
4992 
4993 	if (bfa->fcs)
4994 		__bfa_cb_uf_recv(uf, BFA_TRUE);
4995 	else
4996 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
4997 }
4998 
4999 static void
bfa_uf_stop(struct bfa_s * bfa)5000 bfa_uf_stop(struct bfa_s *bfa)
5001 {
5002 }
5003 
5004 static void
bfa_uf_iocdisable(struct bfa_s * bfa)5005 bfa_uf_iocdisable(struct bfa_s *bfa)
5006 {
5007 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5008 	struct bfa_uf_s *uf;
5009 	struct list_head *qe, *qen;
5010 
5011 	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5012 		uf = (struct bfa_uf_s *) qe;
5013 		list_del(&uf->qe);
5014 		bfa_uf_put(ufm, uf);
5015 	}
5016 }
5017 
5018 static void
bfa_uf_start(struct bfa_s * bfa)5019 bfa_uf_start(struct bfa_s *bfa)
5020 {
5021 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5022 }
5023 
5024 /*
5025  * Register handler for all unsolicted receive frames.
5026  *
5027  * @param[in]	bfa		BFA instance
5028  * @param[in]	ufrecv	receive handler function
5029  * @param[in]	cbarg	receive handler arg
5030  */
5031 void
bfa_uf_recv_register(struct bfa_s * bfa,bfa_cb_uf_recv_t ufrecv,void * cbarg)5032 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5033 {
5034 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5035 
5036 	ufm->ufrecv = ufrecv;
5037 	ufm->cbarg = cbarg;
5038 }
5039 
5040 /*
5041  *	Free an unsolicited frame back to BFA.
5042  *
5043  * @param[in]		uf		unsolicited frame to be freed
5044  *
5045  * @return None
5046  */
5047 void
bfa_uf_free(struct bfa_uf_s * uf)5048 bfa_uf_free(struct bfa_uf_s *uf)
5049 {
5050 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5051 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5052 }
5053 
5054 
5055 
5056 /*
5057  *  uf_pub BFA uf module public functions
5058  */
5059 void
bfa_uf_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)5060 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5061 {
5062 	bfa_trc(bfa, msg->mhdr.msg_id);
5063 
5064 	switch (msg->mhdr.msg_id) {
5065 	case BFI_UF_I2H_FRM_RCVD:
5066 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5067 		break;
5068 
5069 	default:
5070 		bfa_trc(bfa, msg->mhdr.msg_id);
5071 		WARN_ON(1);
5072 	}
5073 }
5074 
5075 
5076