1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfa_modules.h"
20 
21 BFA_TRC_FILE(HAL, FCPIM);
22 BFA_MODULE(fcpim);
23 
24 /*
25  *  BFA ITNIM Related definitions
26  */
27 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
28 
29 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
30 	(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31 
32 #define bfa_fcpim_additn(__itnim)					\
33 	list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34 #define bfa_fcpim_delitn(__itnim)	do {				\
35 	WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
36 	bfa_itnim_update_del_itn_stats(__itnim);      \
37 	list_del(&(__itnim)->qe);      \
38 	WARN_ON(!list_empty(&(__itnim)->io_q));				\
39 	WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));			\
40 	WARN_ON(!list_empty(&(__itnim)->pending_q));			\
41 } while (0)
42 
43 #define bfa_itnim_online_cb(__itnim) do {				\
44 	if ((__itnim)->bfa->fcs)					\
45 		bfa_cb_itnim_online((__itnim)->ditn);      \
46 	else {								\
47 		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
48 		__bfa_cb_itnim_online, (__itnim));      \
49 	}								\
50 } while (0)
51 
52 #define bfa_itnim_offline_cb(__itnim) do {				\
53 	if ((__itnim)->bfa->fcs)					\
54 		bfa_cb_itnim_offline((__itnim)->ditn);      \
55 	else {								\
56 		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
57 		__bfa_cb_itnim_offline, (__itnim));      \
58 	}								\
59 } while (0)
60 
61 #define bfa_itnim_sler_cb(__itnim) do {					\
62 	if ((__itnim)->bfa->fcs)					\
63 		bfa_cb_itnim_sler((__itnim)->ditn);      \
64 	else {								\
65 		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
66 		__bfa_cb_itnim_sler, (__itnim));      \
67 	}								\
68 } while (0)
69 
70 /*
71  *  itnim state machine event
72  */
73 enum bfa_itnim_event {
74 	BFA_ITNIM_SM_CREATE = 1,	/*  itnim is created */
75 	BFA_ITNIM_SM_ONLINE = 2,	/*  itnim is online */
76 	BFA_ITNIM_SM_OFFLINE = 3,	/*  itnim is offline */
77 	BFA_ITNIM_SM_FWRSP = 4,		/*  firmware response */
78 	BFA_ITNIM_SM_DELETE = 5,	/*  deleting an existing itnim */
79 	BFA_ITNIM_SM_CLEANUP = 6,	/*  IO cleanup completion */
80 	BFA_ITNIM_SM_SLER = 7,		/*  second level error recovery */
81 	BFA_ITNIM_SM_HWFAIL = 8,	/*  IOC h/w failure event */
82 	BFA_ITNIM_SM_QRESUME = 9,	/*  queue space available */
83 };
84 
85 /*
86  *  BFA IOIM related definitions
87  */
88 #define bfa_ioim_move_to_comp_q(__ioim) do {				\
89 	list_del(&(__ioim)->qe);					\
90 	list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);	\
91 } while (0)
92 
93 
94 #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {			\
95 	if ((__fcpim)->profile_comp)					\
96 		(__fcpim)->profile_comp(__ioim);			\
97 } while (0)
98 
99 #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {			\
100 	if ((__fcpim)->profile_start)					\
101 		(__fcpim)->profile_start(__ioim);			\
102 } while (0)
103 
104 /*
105  * IO state machine events
106  */
107 enum bfa_ioim_event {
108 	BFA_IOIM_SM_START	= 1,	/*  io start request from host */
109 	BFA_IOIM_SM_COMP_GOOD	= 2,	/*  io good comp, resource free */
110 	BFA_IOIM_SM_COMP	= 3,	/*  io comp, resource is free */
111 	BFA_IOIM_SM_COMP_UTAG	= 4,	/*  io comp, resource is free */
112 	BFA_IOIM_SM_DONE	= 5,	/*  io comp, resource not free */
113 	BFA_IOIM_SM_FREE	= 6,	/*  io resource is freed */
114 	BFA_IOIM_SM_ABORT	= 7,	/*  abort request from scsi stack */
115 	BFA_IOIM_SM_ABORT_COMP	= 8,	/*  abort from f/w */
116 	BFA_IOIM_SM_ABORT_DONE	= 9,	/*  abort completion from f/w */
117 	BFA_IOIM_SM_QRESUME	= 10,	/*  CQ space available to queue IO */
118 	BFA_IOIM_SM_SGALLOCED	= 11,	/*  SG page allocation successful */
119 	BFA_IOIM_SM_SQRETRY	= 12,	/*  sequence recovery retry */
120 	BFA_IOIM_SM_HCB		= 13,	/*  bfa callback complete */
121 	BFA_IOIM_SM_CLEANUP	= 14,	/*  IO cleanup from itnim */
122 	BFA_IOIM_SM_TMSTART	= 15,	/*  IO cleanup from tskim */
123 	BFA_IOIM_SM_TMDONE	= 16,	/*  IO cleanup from tskim */
124 	BFA_IOIM_SM_HWFAIL	= 17,	/*  IOC h/w failure event */
125 	BFA_IOIM_SM_IOTOV	= 18,	/*  ITN offline TOV */
126 };
127 
128 
129 /*
130  *  BFA TSKIM related definitions
131  */
132 
133 /*
134  * task management completion handling
135  */
136 #define bfa_tskim_qcomp(__tskim, __cbfn) do {				\
137 	bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
138 	bfa_tskim_notify_comp(__tskim);      \
139 } while (0)
140 
141 #define bfa_tskim_notify_comp(__tskim) do {				\
142 	if ((__tskim)->notify)						\
143 		bfa_itnim_tskdone((__tskim)->itnim);      \
144 } while (0)
145 
146 
147 enum bfa_tskim_event {
148 	BFA_TSKIM_SM_START	= 1,	/*  TM command start		*/
149 	BFA_TSKIM_SM_DONE	= 2,	/*  TM completion		*/
150 	BFA_TSKIM_SM_QRESUME	= 3,	/*  resume after qfull		*/
151 	BFA_TSKIM_SM_HWFAIL	= 5,	/*  IOC h/w failure event	*/
152 	BFA_TSKIM_SM_HCB	= 6,	/*  BFA callback completion	*/
153 	BFA_TSKIM_SM_IOS_DONE	= 7,	/*  IO and sub TM completions	*/
154 	BFA_TSKIM_SM_CLEANUP	= 8,	/*  TM cleanup on ITN offline	*/
155 	BFA_TSKIM_SM_CLEANUP_DONE = 9,	/*  TM abort completion	*/
156 };
157 
158 /*
159  * forward declaration for BFA ITNIM functions
160  */
161 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
162 static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
163 static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
164 static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
165 static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
166 static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
167 static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
168 static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
169 static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
170 static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
171 static void     bfa_itnim_iotov(void *itnim_arg);
172 static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
173 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
174 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
175 
176 /*
177  * forward declaration of ITNIM state machine
178  */
179 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
180 					enum bfa_itnim_event event);
181 static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
182 					enum bfa_itnim_event event);
183 static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
184 					enum bfa_itnim_event event);
185 static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
186 					enum bfa_itnim_event event);
187 static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
188 					enum bfa_itnim_event event);
189 static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
190 					enum bfa_itnim_event event);
191 static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
192 					enum bfa_itnim_event event);
193 static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
194 					enum bfa_itnim_event event);
195 static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
196 					enum bfa_itnim_event event);
197 static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
198 					enum bfa_itnim_event event);
199 static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
200 					enum bfa_itnim_event event);
201 static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
202 					enum bfa_itnim_event event);
203 static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
204 					enum bfa_itnim_event event);
205 static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
206 					enum bfa_itnim_event event);
207 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
208 					enum bfa_itnim_event event);
209 
210 /*
211  * forward declaration for BFA IOIM functions
212  */
213 static bfa_boolean_t	bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
214 static bfa_boolean_t	bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
215 static bfa_boolean_t	bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
216 static void		bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
217 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
218 static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
219 static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
220 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
221 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
222 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
223 
224 /*
225  * forward declaration of BFA IO state machine
226  */
227 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
228 					enum bfa_ioim_event event);
229 static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
230 					enum bfa_ioim_event event);
231 static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
232 					enum bfa_ioim_event event);
233 static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
234 					enum bfa_ioim_event event);
235 static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
236 					enum bfa_ioim_event event);
237 static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
238 					enum bfa_ioim_event event);
239 static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
240 					enum bfa_ioim_event event);
241 static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
242 					enum bfa_ioim_event event);
243 static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
244 					enum bfa_ioim_event event);
245 static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
246 					enum bfa_ioim_event event);
247 static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
248 					enum bfa_ioim_event event);
249 static void	bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
250 					enum bfa_ioim_event event);
251 /*
252  * forward declaration for BFA TSKIM functions
253  */
254 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
255 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
256 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
257 					struct scsi_lun lun);
258 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
259 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
260 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
261 static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
262 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
263 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
264 
265 /*
266  * forward declaration of BFA TSKIM state machine
267  */
268 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
269 					enum bfa_tskim_event event);
270 static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
271 					enum bfa_tskim_event event);
272 static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
273 					enum bfa_tskim_event event);
274 static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
275 					enum bfa_tskim_event event);
276 static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
277 					enum bfa_tskim_event event);
278 static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
279 					enum bfa_tskim_event event);
280 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
281 					enum bfa_tskim_event event);
282 /*
283  *  BFA FCP Initiator Mode module
284  */
285 
286 /*
287  * Compute and return memory needed by FCP(im) module.
288  */
289 static void
bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * km_len,u32 * dm_len)290 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
291 		u32 *dm_len)
292 {
293 	bfa_itnim_meminfo(cfg, km_len, dm_len);
294 
295 	/*
296 	 * IO memory
297 	 */
298 	if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
299 		cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
300 	else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
301 		cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
302 
303 	*km_len += cfg->fwcfg.num_ioim_reqs *
304 	  (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305 
306 	*dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
307 
308 	/*
309 	 * task management command memory
310 	 */
311 	if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
312 		cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
313 	*km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
314 }
315 
316 
317 static void
bfa_fcpim_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_pcidev_s * pcidev)318 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
319 		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
320 {
321 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
322 
323 	bfa_trc(bfa, cfg->drvcfg.path_tov);
324 	bfa_trc(bfa, cfg->fwcfg.num_rports);
325 	bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
326 	bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
327 
328 	fcpim->bfa		= bfa;
329 	fcpim->num_itnims	= cfg->fwcfg.num_rports;
330 	fcpim->num_ioim_reqs  = cfg->fwcfg.num_ioim_reqs;
331 	fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
332 	fcpim->path_tov		= cfg->drvcfg.path_tov;
333 	fcpim->delay_comp	= cfg->drvcfg.delay_comp;
334 	fcpim->profile_comp = NULL;
335 	fcpim->profile_start = NULL;
336 
337 	bfa_itnim_attach(fcpim, meminfo);
338 	bfa_tskim_attach(fcpim, meminfo);
339 	bfa_ioim_attach(fcpim, meminfo);
340 }
341 
342 static void
bfa_fcpim_detach(struct bfa_s * bfa)343 bfa_fcpim_detach(struct bfa_s *bfa)
344 {
345 }
346 
347 static void
bfa_fcpim_start(struct bfa_s * bfa)348 bfa_fcpim_start(struct bfa_s *bfa)
349 {
350 }
351 
352 static void
bfa_fcpim_stop(struct bfa_s * bfa)353 bfa_fcpim_stop(struct bfa_s *bfa)
354 {
355 }
356 
357 static void
bfa_fcpim_iocdisable(struct bfa_s * bfa)358 bfa_fcpim_iocdisable(struct bfa_s *bfa)
359 {
360 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361 	struct bfa_itnim_s *itnim;
362 	struct list_head *qe, *qen;
363 
364 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
365 		itnim = (struct bfa_itnim_s *) qe;
366 		bfa_itnim_iocdisable(itnim);
367 	}
368 }
369 
370 void
bfa_fcpim_path_tov_set(struct bfa_s * bfa,u16 path_tov)371 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
372 {
373 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
374 
375 	fcpim->path_tov = path_tov * 1000;
376 	if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
377 		fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
378 }
379 
380 u16
bfa_fcpim_path_tov_get(struct bfa_s * bfa)381 bfa_fcpim_path_tov_get(struct bfa_s *bfa)
382 {
383 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
384 
385 	return fcpim->path_tov / 1000;
386 }
387 
388 u16
bfa_fcpim_qdepth_get(struct bfa_s * bfa)389 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
390 {
391 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
392 
393 	return fcpim->q_depth;
394 }
395 
396 /*
397  *  BFA ITNIM module state machine functions
398  */
399 
400 /*
401  * Beginning/unallocated state - no events expected.
402  */
403 static void
bfa_itnim_sm_uninit(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)404 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
405 {
406 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
407 	bfa_trc(itnim->bfa, event);
408 
409 	switch (event) {
410 	case BFA_ITNIM_SM_CREATE:
411 		bfa_sm_set_state(itnim, bfa_itnim_sm_created);
412 		itnim->is_online = BFA_FALSE;
413 		bfa_fcpim_additn(itnim);
414 		break;
415 
416 	default:
417 		bfa_sm_fault(itnim->bfa, event);
418 	}
419 }
420 
421 /*
422  * Beginning state, only online event expected.
423  */
424 static void
bfa_itnim_sm_created(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)425 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
426 {
427 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
428 	bfa_trc(itnim->bfa, event);
429 
430 	switch (event) {
431 	case BFA_ITNIM_SM_ONLINE:
432 		if (bfa_itnim_send_fwcreate(itnim))
433 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
434 		else
435 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
436 		break;
437 
438 	case BFA_ITNIM_SM_DELETE:
439 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
440 		bfa_fcpim_delitn(itnim);
441 		break;
442 
443 	case BFA_ITNIM_SM_HWFAIL:
444 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
445 		break;
446 
447 	default:
448 		bfa_sm_fault(itnim->bfa, event);
449 	}
450 }
451 
452 /*
453  *	Waiting for itnim create response from firmware.
454  */
455 static void
bfa_itnim_sm_fwcreate(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)456 bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
457 {
458 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
459 	bfa_trc(itnim->bfa, event);
460 
461 	switch (event) {
462 	case BFA_ITNIM_SM_FWRSP:
463 		bfa_sm_set_state(itnim, bfa_itnim_sm_online);
464 		itnim->is_online = BFA_TRUE;
465 		bfa_itnim_iotov_online(itnim);
466 		bfa_itnim_online_cb(itnim);
467 		break;
468 
469 	case BFA_ITNIM_SM_DELETE:
470 		bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
471 		break;
472 
473 	case BFA_ITNIM_SM_OFFLINE:
474 		if (bfa_itnim_send_fwdelete(itnim))
475 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
476 		else
477 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
478 		break;
479 
480 	case BFA_ITNIM_SM_HWFAIL:
481 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
482 		break;
483 
484 	default:
485 		bfa_sm_fault(itnim->bfa, event);
486 	}
487 }
488 
489 static void
bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)490 bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
491 			enum bfa_itnim_event event)
492 {
493 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
494 	bfa_trc(itnim->bfa, event);
495 
496 	switch (event) {
497 	case BFA_ITNIM_SM_QRESUME:
498 		bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
499 		bfa_itnim_send_fwcreate(itnim);
500 		break;
501 
502 	case BFA_ITNIM_SM_DELETE:
503 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
504 		bfa_reqq_wcancel(&itnim->reqq_wait);
505 		bfa_fcpim_delitn(itnim);
506 		break;
507 
508 	case BFA_ITNIM_SM_OFFLINE:
509 		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
510 		bfa_reqq_wcancel(&itnim->reqq_wait);
511 		bfa_itnim_offline_cb(itnim);
512 		break;
513 
514 	case BFA_ITNIM_SM_HWFAIL:
515 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
516 		bfa_reqq_wcancel(&itnim->reqq_wait);
517 		break;
518 
519 	default:
520 		bfa_sm_fault(itnim->bfa, event);
521 	}
522 }
523 
524 /*
525  * Waiting for itnim create response from firmware, a delete is pending.
526  */
527 static void
bfa_itnim_sm_delete_pending(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)528 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
529 				enum bfa_itnim_event event)
530 {
531 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
532 	bfa_trc(itnim->bfa, event);
533 
534 	switch (event) {
535 	case BFA_ITNIM_SM_FWRSP:
536 		if (bfa_itnim_send_fwdelete(itnim))
537 			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
538 		else
539 			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
540 		break;
541 
542 	case BFA_ITNIM_SM_HWFAIL:
543 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
544 		bfa_fcpim_delitn(itnim);
545 		break;
546 
547 	default:
548 		bfa_sm_fault(itnim->bfa, event);
549 	}
550 }
551 
552 /*
553  * Online state - normal parking state.
554  */
555 static void
bfa_itnim_sm_online(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)556 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
557 {
558 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
559 	bfa_trc(itnim->bfa, event);
560 
561 	switch (event) {
562 	case BFA_ITNIM_SM_OFFLINE:
563 		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
564 		itnim->is_online = BFA_FALSE;
565 		bfa_itnim_iotov_start(itnim);
566 		bfa_itnim_cleanup(itnim);
567 		break;
568 
569 	case BFA_ITNIM_SM_DELETE:
570 		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
571 		itnim->is_online = BFA_FALSE;
572 		bfa_itnim_cleanup(itnim);
573 		break;
574 
575 	case BFA_ITNIM_SM_SLER:
576 		bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
577 		itnim->is_online = BFA_FALSE;
578 		bfa_itnim_iotov_start(itnim);
579 		bfa_itnim_sler_cb(itnim);
580 		break;
581 
582 	case BFA_ITNIM_SM_HWFAIL:
583 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
584 		itnim->is_online = BFA_FALSE;
585 		bfa_itnim_iotov_start(itnim);
586 		bfa_itnim_iocdisable_cleanup(itnim);
587 		break;
588 
589 	default:
590 		bfa_sm_fault(itnim->bfa, event);
591 	}
592 }
593 
594 /*
595  * Second level error recovery need.
596  */
597 static void
bfa_itnim_sm_sler(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)598 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
599 {
600 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
601 	bfa_trc(itnim->bfa, event);
602 
603 	switch (event) {
604 	case BFA_ITNIM_SM_OFFLINE:
605 		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
606 		bfa_itnim_cleanup(itnim);
607 		break;
608 
609 	case BFA_ITNIM_SM_DELETE:
610 		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
611 		bfa_itnim_cleanup(itnim);
612 		bfa_itnim_iotov_delete(itnim);
613 		break;
614 
615 	case BFA_ITNIM_SM_HWFAIL:
616 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
617 		bfa_itnim_iocdisable_cleanup(itnim);
618 		break;
619 
620 	default:
621 		bfa_sm_fault(itnim->bfa, event);
622 	}
623 }
624 
625 /*
626  * Going offline. Waiting for active IO cleanup.
627  */
628 static void
bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)629 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
630 				 enum bfa_itnim_event event)
631 {
632 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
633 	bfa_trc(itnim->bfa, event);
634 
635 	switch (event) {
636 	case BFA_ITNIM_SM_CLEANUP:
637 		if (bfa_itnim_send_fwdelete(itnim))
638 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
639 		else
640 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
641 		break;
642 
643 	case BFA_ITNIM_SM_DELETE:
644 		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
645 		bfa_itnim_iotov_delete(itnim);
646 		break;
647 
648 	case BFA_ITNIM_SM_HWFAIL:
649 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
650 		bfa_itnim_iocdisable_cleanup(itnim);
651 		bfa_itnim_offline_cb(itnim);
652 		break;
653 
654 	case BFA_ITNIM_SM_SLER:
655 		break;
656 
657 	default:
658 		bfa_sm_fault(itnim->bfa, event);
659 	}
660 }
661 
662 /*
663  * Deleting itnim. Waiting for active IO cleanup.
664  */
665 static void
bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)666 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
667 				enum bfa_itnim_event event)
668 {
669 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 	bfa_trc(itnim->bfa, event);
671 
672 	switch (event) {
673 	case BFA_ITNIM_SM_CLEANUP:
674 		if (bfa_itnim_send_fwdelete(itnim))
675 			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
676 		else
677 			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
678 		break;
679 
680 	case BFA_ITNIM_SM_HWFAIL:
681 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
682 		bfa_itnim_iocdisable_cleanup(itnim);
683 		break;
684 
685 	default:
686 		bfa_sm_fault(itnim->bfa, event);
687 	}
688 }
689 
690 /*
691  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
692  */
693 static void
bfa_itnim_sm_fwdelete(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)694 bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
695 {
696 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 	bfa_trc(itnim->bfa, event);
698 
699 	switch (event) {
700 	case BFA_ITNIM_SM_FWRSP:
701 		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
702 		bfa_itnim_offline_cb(itnim);
703 		break;
704 
705 	case BFA_ITNIM_SM_DELETE:
706 		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
707 		break;
708 
709 	case BFA_ITNIM_SM_HWFAIL:
710 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
711 		bfa_itnim_offline_cb(itnim);
712 		break;
713 
714 	default:
715 		bfa_sm_fault(itnim->bfa, event);
716 	}
717 }
718 
719 static void
bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)720 bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
721 			enum bfa_itnim_event event)
722 {
723 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
724 	bfa_trc(itnim->bfa, event);
725 
726 	switch (event) {
727 	case BFA_ITNIM_SM_QRESUME:
728 		bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
729 		bfa_itnim_send_fwdelete(itnim);
730 		break;
731 
732 	case BFA_ITNIM_SM_DELETE:
733 		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
734 		break;
735 
736 	case BFA_ITNIM_SM_HWFAIL:
737 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
738 		bfa_reqq_wcancel(&itnim->reqq_wait);
739 		bfa_itnim_offline_cb(itnim);
740 		break;
741 
742 	default:
743 		bfa_sm_fault(itnim->bfa, event);
744 	}
745 }
746 
747 /*
748  * Offline state.
749  */
750 static void
bfa_itnim_sm_offline(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)751 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
752 {
753 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
754 	bfa_trc(itnim->bfa, event);
755 
756 	switch (event) {
757 	case BFA_ITNIM_SM_DELETE:
758 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
759 		bfa_itnim_iotov_delete(itnim);
760 		bfa_fcpim_delitn(itnim);
761 		break;
762 
763 	case BFA_ITNIM_SM_ONLINE:
764 		if (bfa_itnim_send_fwcreate(itnim))
765 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
766 		else
767 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
768 		break;
769 
770 	case BFA_ITNIM_SM_HWFAIL:
771 		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
772 		break;
773 
774 	default:
775 		bfa_sm_fault(itnim->bfa, event);
776 	}
777 }
778 
779 static void
bfa_itnim_sm_iocdisable(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)780 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
781 				enum bfa_itnim_event event)
782 {
783 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
784 	bfa_trc(itnim->bfa, event);
785 
786 	switch (event) {
787 	case BFA_ITNIM_SM_DELETE:
788 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
789 		bfa_itnim_iotov_delete(itnim);
790 		bfa_fcpim_delitn(itnim);
791 		break;
792 
793 	case BFA_ITNIM_SM_OFFLINE:
794 		bfa_itnim_offline_cb(itnim);
795 		break;
796 
797 	case BFA_ITNIM_SM_ONLINE:
798 		if (bfa_itnim_send_fwcreate(itnim))
799 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
800 		else
801 			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
802 		break;
803 
804 	case BFA_ITNIM_SM_HWFAIL:
805 		break;
806 
807 	default:
808 		bfa_sm_fault(itnim->bfa, event);
809 	}
810 }
811 
812 /*
813  * Itnim is deleted, waiting for firmware response to delete.
814  */
815 static void
bfa_itnim_sm_deleting(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)816 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
817 {
818 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
819 	bfa_trc(itnim->bfa, event);
820 
821 	switch (event) {
822 	case BFA_ITNIM_SM_FWRSP:
823 	case BFA_ITNIM_SM_HWFAIL:
824 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
825 		bfa_fcpim_delitn(itnim);
826 		break;
827 
828 	default:
829 		bfa_sm_fault(itnim->bfa, event);
830 	}
831 }
832 
833 static void
bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s * itnim,enum bfa_itnim_event event)834 bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
835 		enum bfa_itnim_event event)
836 {
837 	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
838 	bfa_trc(itnim->bfa, event);
839 
840 	switch (event) {
841 	case BFA_ITNIM_SM_QRESUME:
842 		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
843 		bfa_itnim_send_fwdelete(itnim);
844 		break;
845 
846 	case BFA_ITNIM_SM_HWFAIL:
847 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
848 		bfa_reqq_wcancel(&itnim->reqq_wait);
849 		bfa_fcpim_delitn(itnim);
850 		break;
851 
852 	default:
853 		bfa_sm_fault(itnim->bfa, event);
854 	}
855 }
856 
857 /*
858  * Initiate cleanup of all IOs on an IOC failure.
859  */
860 static void
bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s * itnim)861 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
862 {
863 	struct bfa_tskim_s *tskim;
864 	struct bfa_ioim_s *ioim;
865 	struct list_head	*qe, *qen;
866 
867 	list_for_each_safe(qe, qen, &itnim->tsk_q) {
868 		tskim = (struct bfa_tskim_s *) qe;
869 		bfa_tskim_iocdisable(tskim);
870 	}
871 
872 	list_for_each_safe(qe, qen, &itnim->io_q) {
873 		ioim = (struct bfa_ioim_s *) qe;
874 		bfa_ioim_iocdisable(ioim);
875 	}
876 
877 	/*
878 	 * For IO request in pending queue, we pretend an early timeout.
879 	 */
880 	list_for_each_safe(qe, qen, &itnim->pending_q) {
881 		ioim = (struct bfa_ioim_s *) qe;
882 		bfa_ioim_tov(ioim);
883 	}
884 
885 	list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
886 		ioim = (struct bfa_ioim_s *) qe;
887 		bfa_ioim_iocdisable(ioim);
888 	}
889 }
890 
891 /*
892  * IO cleanup completion
893  */
894 static void
bfa_itnim_cleanp_comp(void * itnim_cbarg)895 bfa_itnim_cleanp_comp(void *itnim_cbarg)
896 {
897 	struct bfa_itnim_s *itnim = itnim_cbarg;
898 
899 	bfa_stats(itnim, cleanup_comps);
900 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
901 }
902 
903 /*
904  * Initiate cleanup of all IOs.
905  */
906 static void
bfa_itnim_cleanup(struct bfa_itnim_s * itnim)907 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
908 {
909 	struct bfa_ioim_s  *ioim;
910 	struct bfa_tskim_s *tskim;
911 	struct list_head	*qe, *qen;
912 
913 	bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
914 
915 	list_for_each_safe(qe, qen, &itnim->io_q) {
916 		ioim = (struct bfa_ioim_s *) qe;
917 
918 		/*
919 		 * Move IO to a cleanup queue from active queue so that a later
920 		 * TM will not pickup this IO.
921 		 */
922 		list_del(&ioim->qe);
923 		list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
924 
925 		bfa_wc_up(&itnim->wc);
926 		bfa_ioim_cleanup(ioim);
927 	}
928 
929 	list_for_each_safe(qe, qen, &itnim->tsk_q) {
930 		tskim = (struct bfa_tskim_s *) qe;
931 		bfa_wc_up(&itnim->wc);
932 		bfa_tskim_cleanup(tskim);
933 	}
934 
935 	bfa_wc_wait(&itnim->wc);
936 }
937 
938 static void
__bfa_cb_itnim_online(void * cbarg,bfa_boolean_t complete)939 __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
940 {
941 	struct bfa_itnim_s *itnim = cbarg;
942 
943 	if (complete)
944 		bfa_cb_itnim_online(itnim->ditn);
945 }
946 
947 static void
__bfa_cb_itnim_offline(void * cbarg,bfa_boolean_t complete)948 __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
949 {
950 	struct bfa_itnim_s *itnim = cbarg;
951 
952 	if (complete)
953 		bfa_cb_itnim_offline(itnim->ditn);
954 }
955 
956 static void
__bfa_cb_itnim_sler(void * cbarg,bfa_boolean_t complete)957 __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
958 {
959 	struct bfa_itnim_s *itnim = cbarg;
960 
961 	if (complete)
962 		bfa_cb_itnim_sler(itnim->ditn);
963 }
964 
965 /*
966  * Call to resume any I/O requests waiting for room in request queue.
967  */
968 static void
bfa_itnim_qresume(void * cbarg)969 bfa_itnim_qresume(void *cbarg)
970 {
971 	struct bfa_itnim_s *itnim = cbarg;
972 
973 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
974 }
975 
976 /*
977  *  bfa_itnim_public
978  */
979 
980 void
bfa_itnim_iodone(struct bfa_itnim_s * itnim)981 bfa_itnim_iodone(struct bfa_itnim_s *itnim)
982 {
983 	bfa_wc_down(&itnim->wc);
984 }
985 
986 void
bfa_itnim_tskdone(struct bfa_itnim_s * itnim)987 bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
988 {
989 	bfa_wc_down(&itnim->wc);
990 }
991 
992 void
bfa_itnim_meminfo(struct bfa_iocfc_cfg_s * cfg,u32 * km_len,u32 * dm_len)993 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
994 		u32 *dm_len)
995 {
996 	/*
997 	 * ITN memory
998 	 */
999 	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1000 }
1001 
1002 void
bfa_itnim_attach(struct bfa_fcpim_mod_s * fcpim,struct bfa_meminfo_s * minfo)1003 bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1004 {
1005 	struct bfa_s	*bfa = fcpim->bfa;
1006 	struct bfa_itnim_s *itnim;
1007 	int	i, j;
1008 
1009 	INIT_LIST_HEAD(&fcpim->itnim_q);
1010 
1011 	itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1012 	fcpim->itnim_arr = itnim;
1013 
1014 	for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1015 		memset(itnim, 0, sizeof(struct bfa_itnim_s));
1016 		itnim->bfa = bfa;
1017 		itnim->fcpim = fcpim;
1018 		itnim->reqq = BFA_REQQ_QOS_LO;
1019 		itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1020 		itnim->iotov_active = BFA_FALSE;
1021 		bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1022 
1023 		INIT_LIST_HEAD(&itnim->io_q);
1024 		INIT_LIST_HEAD(&itnim->io_cleanup_q);
1025 		INIT_LIST_HEAD(&itnim->pending_q);
1026 		INIT_LIST_HEAD(&itnim->tsk_q);
1027 		INIT_LIST_HEAD(&itnim->delay_comp_q);
1028 		for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1029 			itnim->ioprofile.io_latency.min[j] = ~0;
1030 		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 	}
1032 
1033 	bfa_meminfo_kva(minfo) = (u8 *) itnim;
1034 }
1035 
1036 void
bfa_itnim_iocdisable(struct bfa_itnim_s * itnim)1037 bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1038 {
1039 	bfa_stats(itnim, ioc_disabled);
1040 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1041 }
1042 
1043 static bfa_boolean_t
bfa_itnim_send_fwcreate(struct bfa_itnim_s * itnim)1044 bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1045 {
1046 	struct bfi_itnim_create_req_s *m;
1047 
1048 	itnim->msg_no++;
1049 
1050 	/*
1051 	 * check for room in queue to send request now
1052 	 */
1053 	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1054 	if (!m) {
1055 		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1056 		return BFA_FALSE;
1057 	}
1058 
1059 	bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1060 			bfa_lpuid(itnim->bfa));
1061 	m->fw_handle = itnim->rport->fw_handle;
1062 	m->class = FC_CLASS_3;
1063 	m->seq_rec = itnim->seq_rec;
1064 	m->msg_no = itnim->msg_no;
1065 	bfa_stats(itnim, fw_create);
1066 
1067 	/*
1068 	 * queue I/O message to firmware
1069 	 */
1070 	bfa_reqq_produce(itnim->bfa, itnim->reqq);
1071 	return BFA_TRUE;
1072 }
1073 
1074 static bfa_boolean_t
bfa_itnim_send_fwdelete(struct bfa_itnim_s * itnim)1075 bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1076 {
1077 	struct bfi_itnim_delete_req_s *m;
1078 
1079 	/*
1080 	 * check for room in queue to send request now
1081 	 */
1082 	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1083 	if (!m) {
1084 		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1085 		return BFA_FALSE;
1086 	}
1087 
1088 	bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1089 			bfa_lpuid(itnim->bfa));
1090 	m->fw_handle = itnim->rport->fw_handle;
1091 	bfa_stats(itnim, fw_delete);
1092 
1093 	/*
1094 	 * queue I/O message to firmware
1095 	 */
1096 	bfa_reqq_produce(itnim->bfa, itnim->reqq);
1097 	return BFA_TRUE;
1098 }
1099 
1100 /*
1101  * Cleanup all pending failed inflight requests.
1102  */
1103 static void
bfa_itnim_delayed_comp(struct bfa_itnim_s * itnim,bfa_boolean_t iotov)1104 bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1105 {
1106 	struct bfa_ioim_s *ioim;
1107 	struct list_head *qe, *qen;
1108 
1109 	list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1110 		ioim = (struct bfa_ioim_s *)qe;
1111 		bfa_ioim_delayed_comp(ioim, iotov);
1112 	}
1113 }
1114 
1115 /*
1116  * Start all pending IO requests.
1117  */
1118 static void
bfa_itnim_iotov_online(struct bfa_itnim_s * itnim)1119 bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1120 {
1121 	struct bfa_ioim_s *ioim;
1122 
1123 	bfa_itnim_iotov_stop(itnim);
1124 
1125 	/*
1126 	 * Abort all inflight IO requests in the queue
1127 	 */
1128 	bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1129 
1130 	/*
1131 	 * Start all pending IO requests.
1132 	 */
1133 	while (!list_empty(&itnim->pending_q)) {
1134 		bfa_q_deq(&itnim->pending_q, &ioim);
1135 		list_add_tail(&ioim->qe, &itnim->io_q);
1136 		bfa_ioim_start(ioim);
1137 	}
1138 }
1139 
1140 /*
1141  * Fail all pending IO requests
1142  */
1143 static void
bfa_itnim_iotov_cleanup(struct bfa_itnim_s * itnim)1144 bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1145 {
1146 	struct bfa_ioim_s *ioim;
1147 
1148 	/*
1149 	 * Fail all inflight IO requests in the queue
1150 	 */
1151 	bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1152 
1153 	/*
1154 	 * Fail any pending IO requests.
1155 	 */
1156 	while (!list_empty(&itnim->pending_q)) {
1157 		bfa_q_deq(&itnim->pending_q, &ioim);
1158 		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1159 		bfa_ioim_tov(ioim);
1160 	}
1161 }
1162 
1163 /*
1164  * IO TOV timer callback. Fail any pending IO requests.
1165  */
1166 static void
bfa_itnim_iotov(void * itnim_arg)1167 bfa_itnim_iotov(void *itnim_arg)
1168 {
1169 	struct bfa_itnim_s *itnim = itnim_arg;
1170 
1171 	itnim->iotov_active = BFA_FALSE;
1172 
1173 	bfa_cb_itnim_tov_begin(itnim->ditn);
1174 	bfa_itnim_iotov_cleanup(itnim);
1175 	bfa_cb_itnim_tov(itnim->ditn);
1176 }
1177 
1178 /*
1179  * Start IO TOV timer for failing back pending IO requests in offline state.
1180  */
1181 static void
bfa_itnim_iotov_start(struct bfa_itnim_s * itnim)1182 bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1183 {
1184 	if (itnim->fcpim->path_tov > 0) {
1185 
1186 		itnim->iotov_active = BFA_TRUE;
1187 		WARN_ON(!bfa_itnim_hold_io(itnim));
1188 		bfa_timer_start(itnim->bfa, &itnim->timer,
1189 			bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1190 	}
1191 }
1192 
1193 /*
1194  * Stop IO TOV timer.
1195  */
1196 static void
bfa_itnim_iotov_stop(struct bfa_itnim_s * itnim)1197 bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1198 {
1199 	if (itnim->iotov_active) {
1200 		itnim->iotov_active = BFA_FALSE;
1201 		bfa_timer_stop(&itnim->timer);
1202 	}
1203 }
1204 
1205 /*
1206  * Stop IO TOV timer.
1207  */
1208 static void
bfa_itnim_iotov_delete(struct bfa_itnim_s * itnim)1209 bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1210 {
1211 	bfa_boolean_t pathtov_active = BFA_FALSE;
1212 
1213 	if (itnim->iotov_active)
1214 		pathtov_active = BFA_TRUE;
1215 
1216 	bfa_itnim_iotov_stop(itnim);
1217 	if (pathtov_active)
1218 		bfa_cb_itnim_tov_begin(itnim->ditn);
1219 	bfa_itnim_iotov_cleanup(itnim);
1220 	if (pathtov_active)
1221 		bfa_cb_itnim_tov(itnim->ditn);
1222 }
1223 
1224 static void
bfa_itnim_update_del_itn_stats(struct bfa_itnim_s * itnim)1225 bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1226 {
1227 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1228 	fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1229 		itnim->stats.iocomp_aborted;
1230 	fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1231 		itnim->stats.iocomp_timedout;
1232 	fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1233 		itnim->stats.iocom_sqer_needed;
1234 	fcpim->del_itn_stats.del_itn_iocom_res_free +=
1235 		itnim->stats.iocom_res_free;
1236 	fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1237 		itnim->stats.iocom_hostabrts;
1238 	fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1239 	fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1240 	fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1241 }
1242 
1243 /*
1244  * bfa_itnim_public
1245  */
1246 
1247 /*
1248  * Itnim interrupt processing.
1249  */
1250 void
bfa_itnim_isr(struct bfa_s * bfa,struct bfi_msg_s * m)1251 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1252 {
1253 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1254 	union bfi_itnim_i2h_msg_u msg;
1255 	struct bfa_itnim_s *itnim;
1256 
1257 	bfa_trc(bfa, m->mhdr.msg_id);
1258 
1259 	msg.msg = m;
1260 
1261 	switch (m->mhdr.msg_id) {
1262 	case BFI_ITNIM_I2H_CREATE_RSP:
1263 		itnim = BFA_ITNIM_FROM_TAG(fcpim,
1264 						msg.create_rsp->bfa_handle);
1265 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1266 		bfa_stats(itnim, create_comps);
1267 		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1268 		break;
1269 
1270 	case BFI_ITNIM_I2H_DELETE_RSP:
1271 		itnim = BFA_ITNIM_FROM_TAG(fcpim,
1272 						msg.delete_rsp->bfa_handle);
1273 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1274 		bfa_stats(itnim, delete_comps);
1275 		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1276 		break;
1277 
1278 	case BFI_ITNIM_I2H_SLER_EVENT:
1279 		itnim = BFA_ITNIM_FROM_TAG(fcpim,
1280 						msg.sler_event->bfa_handle);
1281 		bfa_stats(itnim, sler_events);
1282 		bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1283 		break;
1284 
1285 	default:
1286 		bfa_trc(bfa, m->mhdr.msg_id);
1287 		WARN_ON(1);
1288 	}
1289 }
1290 
1291 /*
1292  * bfa_itnim_api
1293  */
1294 
1295 struct bfa_itnim_s *
bfa_itnim_create(struct bfa_s * bfa,struct bfa_rport_s * rport,void * ditn)1296 bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1297 {
1298 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1299 	struct bfa_itnim_s *itnim;
1300 
1301 	itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1302 	WARN_ON(itnim->rport != rport);
1303 
1304 	itnim->ditn = ditn;
1305 
1306 	bfa_stats(itnim, creates);
1307 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1308 
1309 	return itnim;
1310 }
1311 
1312 void
bfa_itnim_delete(struct bfa_itnim_s * itnim)1313 bfa_itnim_delete(struct bfa_itnim_s *itnim)
1314 {
1315 	bfa_stats(itnim, deletes);
1316 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1317 }
1318 
1319 void
bfa_itnim_online(struct bfa_itnim_s * itnim,bfa_boolean_t seq_rec)1320 bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1321 {
1322 	itnim->seq_rec = seq_rec;
1323 	bfa_stats(itnim, onlines);
1324 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1325 }
1326 
1327 void
bfa_itnim_offline(struct bfa_itnim_s * itnim)1328 bfa_itnim_offline(struct bfa_itnim_s *itnim)
1329 {
1330 	bfa_stats(itnim, offlines);
1331 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1332 }
1333 
1334 /*
1335  * Return true if itnim is considered offline for holding off IO request.
1336  * IO is not held if itnim is being deleted.
1337  */
1338 bfa_boolean_t
bfa_itnim_hold_io(struct bfa_itnim_s * itnim)1339 bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1340 {
1341 	return itnim->fcpim->path_tov && itnim->iotov_active &&
1342 		(bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1343 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1344 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1345 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1346 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1347 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1348 }
1349 
1350 void
bfa_itnim_clear_stats(struct bfa_itnim_s * itnim)1351 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1352 {
1353 	int j;
1354 	memset(&itnim->stats, 0, sizeof(itnim->stats));
1355 	memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1356 	for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1357 		itnim->ioprofile.io_latency.min[j] = ~0;
1358 }
1359 
1360 /*
1361  *  BFA IO module state machine functions
1362  */
1363 
1364 /*
1365  * IO is not started (unallocated).
1366  */
1367 static void
bfa_ioim_sm_uninit(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1368 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1369 {
1370 	switch (event) {
1371 	case BFA_IOIM_SM_START:
1372 		if (!bfa_itnim_is_online(ioim->itnim)) {
1373 			if (!bfa_itnim_hold_io(ioim->itnim)) {
1374 				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1375 				list_del(&ioim->qe);
1376 				list_add_tail(&ioim->qe,
1377 					&ioim->fcpim->ioim_comp_q);
1378 				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1379 						__bfa_cb_ioim_pathtov, ioim);
1380 			} else {
1381 				list_del(&ioim->qe);
1382 				list_add_tail(&ioim->qe,
1383 					&ioim->itnim->pending_q);
1384 			}
1385 			break;
1386 		}
1387 
1388 		if (ioim->nsges > BFI_SGE_INLINE) {
1389 			if (!bfa_ioim_sgpg_alloc(ioim)) {
1390 				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1391 				return;
1392 			}
1393 		}
1394 
1395 		if (!bfa_ioim_send_ioreq(ioim)) {
1396 			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1397 			break;
1398 		}
1399 
1400 		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1401 		break;
1402 
1403 	case BFA_IOIM_SM_IOTOV:
1404 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1405 		bfa_ioim_move_to_comp_q(ioim);
1406 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1407 				__bfa_cb_ioim_pathtov, ioim);
1408 		break;
1409 
1410 	case BFA_IOIM_SM_ABORT:
1411 		/*
1412 		 * IO in pending queue can get abort requests. Complete abort
1413 		 * requests immediately.
1414 		 */
1415 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1416 		WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1417 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1418 				__bfa_cb_ioim_abort, ioim);
1419 		break;
1420 
1421 	default:
1422 		bfa_sm_fault(ioim->bfa, event);
1423 	}
1424 }
1425 
1426 /*
1427  * IO is waiting for SG pages.
1428  */
1429 static void
bfa_ioim_sm_sgalloc(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1430 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1431 {
1432 	bfa_trc(ioim->bfa, ioim->iotag);
1433 	bfa_trc(ioim->bfa, event);
1434 
1435 	switch (event) {
1436 	case BFA_IOIM_SM_SGALLOCED:
1437 		if (!bfa_ioim_send_ioreq(ioim)) {
1438 			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1439 			break;
1440 		}
1441 		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1442 		break;
1443 
1444 	case BFA_IOIM_SM_CLEANUP:
1445 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1446 		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1447 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1448 			      ioim);
1449 		bfa_ioim_notify_cleanup(ioim);
1450 		break;
1451 
1452 	case BFA_IOIM_SM_ABORT:
1453 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1454 		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1455 		bfa_ioim_move_to_comp_q(ioim);
1456 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1457 			      ioim);
1458 		break;
1459 
1460 	case BFA_IOIM_SM_HWFAIL:
1461 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1462 		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1463 		bfa_ioim_move_to_comp_q(ioim);
1464 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1465 			      ioim);
1466 		break;
1467 
1468 	default:
1469 		bfa_sm_fault(ioim->bfa, event);
1470 	}
1471 }
1472 
1473 /*
1474  * IO is active.
1475  */
1476 static void
bfa_ioim_sm_active(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1477 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1478 {
1479 	switch (event) {
1480 	case BFA_IOIM_SM_COMP_GOOD:
1481 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1482 		bfa_ioim_move_to_comp_q(ioim);
1483 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1484 			      __bfa_cb_ioim_good_comp, ioim);
1485 		break;
1486 
1487 	case BFA_IOIM_SM_COMP:
1488 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1489 		bfa_ioim_move_to_comp_q(ioim);
1490 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1491 			      ioim);
1492 		break;
1493 
1494 	case BFA_IOIM_SM_DONE:
1495 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1496 		bfa_ioim_move_to_comp_q(ioim);
1497 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1498 			      ioim);
1499 		break;
1500 
1501 	case BFA_IOIM_SM_ABORT:
1502 		ioim->iosp->abort_explicit = BFA_TRUE;
1503 		ioim->io_cbfn = __bfa_cb_ioim_abort;
1504 
1505 		if (bfa_ioim_send_abort(ioim))
1506 			bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1507 		else {
1508 			bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1509 			bfa_stats(ioim->itnim, qwait);
1510 			bfa_reqq_wait(ioim->bfa, ioim->reqq,
1511 					  &ioim->iosp->reqq_wait);
1512 		}
1513 		break;
1514 
1515 	case BFA_IOIM_SM_CLEANUP:
1516 		ioim->iosp->abort_explicit = BFA_FALSE;
1517 		ioim->io_cbfn = __bfa_cb_ioim_failed;
1518 
1519 		if (bfa_ioim_send_abort(ioim))
1520 			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1521 		else {
1522 			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1523 			bfa_stats(ioim->itnim, qwait);
1524 			bfa_reqq_wait(ioim->bfa, ioim->reqq,
1525 					  &ioim->iosp->reqq_wait);
1526 		}
1527 		break;
1528 
1529 	case BFA_IOIM_SM_HWFAIL:
1530 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1531 		bfa_ioim_move_to_comp_q(ioim);
1532 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1533 			      ioim);
1534 		break;
1535 
1536 	case BFA_IOIM_SM_SQRETRY:
1537 		if (bfa_ioim_maxretry_reached(ioim)) {
1538 			/* max retry reached, free IO */
1539 			bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1540 			bfa_ioim_move_to_comp_q(ioim);
1541 			bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1542 					__bfa_cb_ioim_failed, ioim);
1543 			break;
1544 		}
1545 		/* waiting for IO tag resource free */
1546 		bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1547 		break;
1548 
1549 	default:
1550 		bfa_sm_fault(ioim->bfa, event);
1551 	}
1552 }
1553 
1554 /*
1555  * IO is retried with new tag.
1556  */
1557 static void
bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1558 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1559 {
1560 	switch (event) {
1561 	case BFA_IOIM_SM_FREE:
1562 		/* abts and rrq done. Now retry the IO with new tag */
1563 		bfa_ioim_update_iotag(ioim);
1564 		if (!bfa_ioim_send_ioreq(ioim)) {
1565 			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1566 			break;
1567 		}
1568 		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1569 	break;
1570 
1571 	case BFA_IOIM_SM_CLEANUP:
1572 		ioim->iosp->abort_explicit = BFA_FALSE;
1573 		ioim->io_cbfn = __bfa_cb_ioim_failed;
1574 
1575 		if (bfa_ioim_send_abort(ioim))
1576 			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1577 		else {
1578 			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1579 			bfa_stats(ioim->itnim, qwait);
1580 			bfa_reqq_wait(ioim->bfa, ioim->reqq,
1581 					  &ioim->iosp->reqq_wait);
1582 		}
1583 	break;
1584 
1585 	case BFA_IOIM_SM_HWFAIL:
1586 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1587 		bfa_ioim_move_to_comp_q(ioim);
1588 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1589 			 __bfa_cb_ioim_failed, ioim);
1590 		break;
1591 
1592 	case BFA_IOIM_SM_ABORT:
1593 		/* in this state IO abort is done.
1594 		 * Waiting for IO tag resource free.
1595 		 */
1596 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1597 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1598 			      ioim);
1599 		break;
1600 
1601 	default:
1602 		bfa_sm_fault(ioim->bfa, event);
1603 	}
1604 }
1605 
1606 /*
1607  * IO is being aborted, waiting for completion from firmware.
1608  */
1609 static void
bfa_ioim_sm_abort(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1610 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1611 {
1612 	bfa_trc(ioim->bfa, ioim->iotag);
1613 	bfa_trc(ioim->bfa, event);
1614 
1615 	switch (event) {
1616 	case BFA_IOIM_SM_COMP_GOOD:
1617 	case BFA_IOIM_SM_COMP:
1618 	case BFA_IOIM_SM_DONE:
1619 	case BFA_IOIM_SM_FREE:
1620 		break;
1621 
1622 	case BFA_IOIM_SM_ABORT_DONE:
1623 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1624 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1625 			      ioim);
1626 		break;
1627 
1628 	case BFA_IOIM_SM_ABORT_COMP:
1629 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1630 		bfa_ioim_move_to_comp_q(ioim);
1631 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1632 			      ioim);
1633 		break;
1634 
1635 	case BFA_IOIM_SM_COMP_UTAG:
1636 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1637 		bfa_ioim_move_to_comp_q(ioim);
1638 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1639 			      ioim);
1640 		break;
1641 
1642 	case BFA_IOIM_SM_CLEANUP:
1643 		WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1644 		ioim->iosp->abort_explicit = BFA_FALSE;
1645 
1646 		if (bfa_ioim_send_abort(ioim))
1647 			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1648 		else {
1649 			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1650 			bfa_stats(ioim->itnim, qwait);
1651 			bfa_reqq_wait(ioim->bfa, ioim->reqq,
1652 					  &ioim->iosp->reqq_wait);
1653 		}
1654 		break;
1655 
1656 	case BFA_IOIM_SM_HWFAIL:
1657 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1658 		bfa_ioim_move_to_comp_q(ioim);
1659 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1660 			      ioim);
1661 		break;
1662 
1663 	default:
1664 		bfa_sm_fault(ioim->bfa, event);
1665 	}
1666 }
1667 
1668 /*
1669  * IO is being cleaned up (implicit abort), waiting for completion from
1670  * firmware.
1671  */
1672 static void
bfa_ioim_sm_cleanup(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1673 bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1674 {
1675 	bfa_trc(ioim->bfa, ioim->iotag);
1676 	bfa_trc(ioim->bfa, event);
1677 
1678 	switch (event) {
1679 	case BFA_IOIM_SM_COMP_GOOD:
1680 	case BFA_IOIM_SM_COMP:
1681 	case BFA_IOIM_SM_DONE:
1682 	case BFA_IOIM_SM_FREE:
1683 		break;
1684 
1685 	case BFA_IOIM_SM_ABORT:
1686 		/*
1687 		 * IO is already being aborted implicitly
1688 		 */
1689 		ioim->io_cbfn = __bfa_cb_ioim_abort;
1690 		break;
1691 
1692 	case BFA_IOIM_SM_ABORT_DONE:
1693 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1694 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1695 		bfa_ioim_notify_cleanup(ioim);
1696 		break;
1697 
1698 	case BFA_IOIM_SM_ABORT_COMP:
1699 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1700 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1701 		bfa_ioim_notify_cleanup(ioim);
1702 		break;
1703 
1704 	case BFA_IOIM_SM_COMP_UTAG:
1705 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1706 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1707 		bfa_ioim_notify_cleanup(ioim);
1708 		break;
1709 
1710 	case BFA_IOIM_SM_HWFAIL:
1711 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1712 		bfa_ioim_move_to_comp_q(ioim);
1713 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1714 			      ioim);
1715 		break;
1716 
1717 	case BFA_IOIM_SM_CLEANUP:
1718 		/*
1719 		 * IO can be in cleanup state already due to TM command.
1720 		 * 2nd cleanup request comes from ITN offline event.
1721 		 */
1722 		break;
1723 
1724 	default:
1725 		bfa_sm_fault(ioim->bfa, event);
1726 	}
1727 }
1728 
1729 /*
1730  * IO is waiting for room in request CQ
1731  */
1732 static void
bfa_ioim_sm_qfull(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1733 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1734 {
1735 	bfa_trc(ioim->bfa, ioim->iotag);
1736 	bfa_trc(ioim->bfa, event);
1737 
1738 	switch (event) {
1739 	case BFA_IOIM_SM_QRESUME:
1740 		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1741 		bfa_ioim_send_ioreq(ioim);
1742 		break;
1743 
1744 	case BFA_IOIM_SM_ABORT:
1745 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1746 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1747 		bfa_ioim_move_to_comp_q(ioim);
1748 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1749 			      ioim);
1750 		break;
1751 
1752 	case BFA_IOIM_SM_CLEANUP:
1753 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1754 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1755 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1756 			      ioim);
1757 		bfa_ioim_notify_cleanup(ioim);
1758 		break;
1759 
1760 	case BFA_IOIM_SM_HWFAIL:
1761 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1762 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1763 		bfa_ioim_move_to_comp_q(ioim);
1764 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1765 			      ioim);
1766 		break;
1767 
1768 	default:
1769 		bfa_sm_fault(ioim->bfa, event);
1770 	}
1771 }
1772 
1773 /*
1774  * Active IO is being aborted, waiting for room in request CQ.
1775  */
1776 static void
bfa_ioim_sm_abort_qfull(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1777 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1778 {
1779 	bfa_trc(ioim->bfa, ioim->iotag);
1780 	bfa_trc(ioim->bfa, event);
1781 
1782 	switch (event) {
1783 	case BFA_IOIM_SM_QRESUME:
1784 		bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1785 		bfa_ioim_send_abort(ioim);
1786 		break;
1787 
1788 	case BFA_IOIM_SM_CLEANUP:
1789 		WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1790 		ioim->iosp->abort_explicit = BFA_FALSE;
1791 		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1792 		break;
1793 
1794 	case BFA_IOIM_SM_COMP_GOOD:
1795 	case BFA_IOIM_SM_COMP:
1796 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1797 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1798 		bfa_ioim_move_to_comp_q(ioim);
1799 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1800 			      ioim);
1801 		break;
1802 
1803 	case BFA_IOIM_SM_DONE:
1804 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1805 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1806 		bfa_ioim_move_to_comp_q(ioim);
1807 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1808 			      ioim);
1809 		break;
1810 
1811 	case BFA_IOIM_SM_HWFAIL:
1812 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1813 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1814 		bfa_ioim_move_to_comp_q(ioim);
1815 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1816 			      ioim);
1817 		break;
1818 
1819 	default:
1820 		bfa_sm_fault(ioim->bfa, event);
1821 	}
1822 }
1823 
1824 /*
1825  * Active IO is being cleaned up, waiting for room in request CQ.
1826  */
1827 static void
bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1828 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1829 {
1830 	bfa_trc(ioim->bfa, ioim->iotag);
1831 	bfa_trc(ioim->bfa, event);
1832 
1833 	switch (event) {
1834 	case BFA_IOIM_SM_QRESUME:
1835 		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1836 		bfa_ioim_send_abort(ioim);
1837 		break;
1838 
1839 	case BFA_IOIM_SM_ABORT:
1840 		/*
1841 		 * IO is already being cleaned up implicitly
1842 		 */
1843 		ioim->io_cbfn = __bfa_cb_ioim_abort;
1844 		break;
1845 
1846 	case BFA_IOIM_SM_COMP_GOOD:
1847 	case BFA_IOIM_SM_COMP:
1848 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1849 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1850 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1851 		bfa_ioim_notify_cleanup(ioim);
1852 		break;
1853 
1854 	case BFA_IOIM_SM_DONE:
1855 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1856 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1857 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1858 		bfa_ioim_notify_cleanup(ioim);
1859 		break;
1860 
1861 	case BFA_IOIM_SM_HWFAIL:
1862 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1863 		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1864 		bfa_ioim_move_to_comp_q(ioim);
1865 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1866 			      ioim);
1867 		break;
1868 
1869 	default:
1870 		bfa_sm_fault(ioim->bfa, event);
1871 	}
1872 }
1873 
1874 /*
1875  * IO bfa callback is pending.
1876  */
1877 static void
bfa_ioim_sm_hcb(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1878 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1879 {
1880 	switch (event) {
1881 	case BFA_IOIM_SM_HCB:
1882 		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1883 		bfa_ioim_free(ioim);
1884 		break;
1885 
1886 	case BFA_IOIM_SM_CLEANUP:
1887 		bfa_ioim_notify_cleanup(ioim);
1888 		break;
1889 
1890 	case BFA_IOIM_SM_HWFAIL:
1891 		break;
1892 
1893 	default:
1894 		bfa_sm_fault(ioim->bfa, event);
1895 	}
1896 }
1897 
1898 /*
1899  * IO bfa callback is pending. IO resource cannot be freed.
1900  */
1901 static void
bfa_ioim_sm_hcb_free(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1902 bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1903 {
1904 	bfa_trc(ioim->bfa, ioim->iotag);
1905 	bfa_trc(ioim->bfa, event);
1906 
1907 	switch (event) {
1908 	case BFA_IOIM_SM_HCB:
1909 		bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1910 		list_del(&ioim->qe);
1911 		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1912 		break;
1913 
1914 	case BFA_IOIM_SM_FREE:
1915 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1916 		break;
1917 
1918 	case BFA_IOIM_SM_CLEANUP:
1919 		bfa_ioim_notify_cleanup(ioim);
1920 		break;
1921 
1922 	case BFA_IOIM_SM_HWFAIL:
1923 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1924 		break;
1925 
1926 	default:
1927 		bfa_sm_fault(ioim->bfa, event);
1928 	}
1929 }
1930 
1931 /*
1932  * IO is completed, waiting resource free from firmware.
1933  */
1934 static void
bfa_ioim_sm_resfree(struct bfa_ioim_s * ioim,enum bfa_ioim_event event)1935 bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1936 {
1937 	bfa_trc(ioim->bfa, ioim->iotag);
1938 	bfa_trc(ioim->bfa, event);
1939 
1940 	switch (event) {
1941 	case BFA_IOIM_SM_FREE:
1942 		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1943 		bfa_ioim_free(ioim);
1944 		break;
1945 
1946 	case BFA_IOIM_SM_CLEANUP:
1947 		bfa_ioim_notify_cleanup(ioim);
1948 		break;
1949 
1950 	case BFA_IOIM_SM_HWFAIL:
1951 		break;
1952 
1953 	default:
1954 		bfa_sm_fault(ioim->bfa, event);
1955 	}
1956 }
1957 
1958 
1959 static void
__bfa_cb_ioim_good_comp(void * cbarg,bfa_boolean_t complete)1960 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1961 {
1962 	struct bfa_ioim_s *ioim = cbarg;
1963 
1964 	if (!complete) {
1965 		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1966 		return;
1967 	}
1968 
1969 	bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1970 }
1971 
1972 static void
__bfa_cb_ioim_comp(void * cbarg,bfa_boolean_t complete)1973 __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1974 {
1975 	struct bfa_ioim_s	*ioim = cbarg;
1976 	struct bfi_ioim_rsp_s *m;
1977 	u8	*snsinfo = NULL;
1978 	u8	sns_len = 0;
1979 	s32	residue = 0;
1980 
1981 	if (!complete) {
1982 		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1983 		return;
1984 	}
1985 
1986 	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1987 	if (m->io_status == BFI_IOIM_STS_OK) {
1988 		/*
1989 		 * setup sense information, if present
1990 		 */
1991 		if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
1992 					m->sns_len) {
1993 			sns_len = m->sns_len;
1994 			snsinfo = ioim->iosp->snsinfo;
1995 		}
1996 
1997 		/*
1998 		 * setup residue value correctly for normal completions
1999 		 */
2000 		if (m->resid_flags == FCP_RESID_UNDER) {
2001 			residue = be32_to_cpu(m->residue);
2002 			bfa_stats(ioim->itnim, iocomp_underrun);
2003 		}
2004 		if (m->resid_flags == FCP_RESID_OVER) {
2005 			residue = be32_to_cpu(m->residue);
2006 			residue = -residue;
2007 			bfa_stats(ioim->itnim, iocomp_overrun);
2008 		}
2009 	}
2010 
2011 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2012 			  m->scsi_status, sns_len, snsinfo, residue);
2013 }
2014 
2015 static void
__bfa_cb_ioim_failed(void * cbarg,bfa_boolean_t complete)2016 __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2017 {
2018 	struct bfa_ioim_s *ioim = cbarg;
2019 
2020 	if (!complete) {
2021 		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2022 		return;
2023 	}
2024 
2025 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2026 			  0, 0, NULL, 0);
2027 }
2028 
2029 static void
__bfa_cb_ioim_pathtov(void * cbarg,bfa_boolean_t complete)2030 __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2031 {
2032 	struct bfa_ioim_s *ioim = cbarg;
2033 
2034 	bfa_stats(ioim->itnim, path_tov_expired);
2035 	if (!complete) {
2036 		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2037 		return;
2038 	}
2039 
2040 	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2041 			  0, 0, NULL, 0);
2042 }
2043 
2044 static void
__bfa_cb_ioim_abort(void * cbarg,bfa_boolean_t complete)2045 __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2046 {
2047 	struct bfa_ioim_s *ioim = cbarg;
2048 
2049 	if (!complete) {
2050 		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2051 		return;
2052 	}
2053 
2054 	bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2055 }
2056 
2057 static void
bfa_ioim_sgpg_alloced(void * cbarg)2058 bfa_ioim_sgpg_alloced(void *cbarg)
2059 {
2060 	struct bfa_ioim_s *ioim = cbarg;
2061 
2062 	ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2063 	list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2064 	ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2065 	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2066 }
2067 
2068 /*
2069  * Send I/O request to firmware.
2070  */
2071 static	bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s * ioim)2072 bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2073 {
2074 	struct bfa_itnim_s *itnim = ioim->itnim;
2075 	struct bfi_ioim_req_s *m;
2076 	static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2077 	struct bfi_sge_s *sge, *sgpge;
2078 	u32	pgdlen = 0;
2079 	u32	fcp_dl;
2080 	u64 addr;
2081 	struct scatterlist *sg;
2082 	struct bfa_sgpg_s *sgpg;
2083 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2084 	u32 i, sge_id, pgcumsz;
2085 	enum dma_data_direction dmadir;
2086 
2087 	/*
2088 	 * check for room in queue to send request now
2089 	 */
2090 	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2091 	if (!m) {
2092 		bfa_stats(ioim->itnim, qwait);
2093 		bfa_reqq_wait(ioim->bfa, ioim->reqq,
2094 				  &ioim->iosp->reqq_wait);
2095 		return BFA_FALSE;
2096 	}
2097 
2098 	/*
2099 	 * build i/o request message next
2100 	 */
2101 	m->io_tag = cpu_to_be16(ioim->iotag);
2102 	m->rport_hdl = ioim->itnim->rport->fw_handle;
2103 	m->io_timeout = 0;
2104 
2105 	sge = &m->sges[0];
2106 	sgpg = ioim->sgpg;
2107 	sge_id = 0;
2108 	sgpge = NULL;
2109 	pgcumsz = 0;
2110 	scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2111 		if (i == 0) {
2112 			/* build inline IO SG element */
2113 			addr = bfa_sgaddr_le(sg_dma_address(sg));
2114 			sge->sga = *(union bfi_addr_u *) &addr;
2115 			pgdlen = sg_dma_len(sg);
2116 			sge->sg_len = pgdlen;
2117 			sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2118 					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2119 			bfa_sge_to_be(sge);
2120 			sge++;
2121 		} else {
2122 			if (sge_id == 0)
2123 				sgpge = sgpg->sgpg->sges;
2124 
2125 			addr = bfa_sgaddr_le(sg_dma_address(sg));
2126 			sgpge->sga = *(union bfi_addr_u *) &addr;
2127 			sgpge->sg_len = sg_dma_len(sg);
2128 			pgcumsz += sgpge->sg_len;
2129 
2130 			/* set flags */
2131 			if (i < (ioim->nsges - 1) &&
2132 					sge_id < (BFI_SGPG_DATA_SGES - 1))
2133 				sgpge->flags = BFI_SGE_DATA;
2134 			else if (i < (ioim->nsges - 1))
2135 				sgpge->flags = BFI_SGE_DATA_CPL;
2136 			else
2137 				sgpge->flags = BFI_SGE_DATA_LAST;
2138 
2139 			bfa_sge_to_le(sgpge);
2140 
2141 			sgpge++;
2142 			if (i == (ioim->nsges - 1)) {
2143 				sgpge->flags = BFI_SGE_PGDLEN;
2144 				sgpge->sga.a32.addr_lo = 0;
2145 				sgpge->sga.a32.addr_hi = 0;
2146 				sgpge->sg_len = pgcumsz;
2147 				bfa_sge_to_le(sgpge);
2148 			} else if (++sge_id == BFI_SGPG_DATA_SGES) {
2149 				sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2150 				sgpge->flags = BFI_SGE_LINK;
2151 				sgpge->sga = sgpg->sgpg_pa;
2152 				sgpge->sg_len = pgcumsz;
2153 				bfa_sge_to_le(sgpge);
2154 				sge_id = 0;
2155 				pgcumsz = 0;
2156 			}
2157 		}
2158 	}
2159 
2160 	if (ioim->nsges > BFI_SGE_INLINE) {
2161 		sge->sga = ioim->sgpg->sgpg_pa;
2162 	} else {
2163 		sge->sga.a32.addr_lo = 0;
2164 		sge->sga.a32.addr_hi = 0;
2165 	}
2166 	sge->sg_len = pgdlen;
2167 	sge->flags = BFI_SGE_PGDLEN;
2168 	bfa_sge_to_be(sge);
2169 
2170 	/*
2171 	 * set up I/O command parameters
2172 	 */
2173 	m->cmnd = cmnd_z0;
2174 	int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2175 	dmadir = cmnd->sc_data_direction;
2176 	if (dmadir == DMA_TO_DEVICE)
2177 		m->cmnd.iodir = FCP_IODIR_WRITE;
2178 	else if (dmadir == DMA_FROM_DEVICE)
2179 		m->cmnd.iodir = FCP_IODIR_READ;
2180 	else
2181 		m->cmnd.iodir = FCP_IODIR_NONE;
2182 
2183 	m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2184 	fcp_dl = scsi_bufflen(cmnd);
2185 	m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2186 
2187 	/*
2188 	 * set up I/O message header
2189 	 */
2190 	switch (m->cmnd.iodir) {
2191 	case FCP_IODIR_READ:
2192 		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2193 		bfa_stats(itnim, input_reqs);
2194 		ioim->itnim->stats.rd_throughput += fcp_dl;
2195 		break;
2196 	case FCP_IODIR_WRITE:
2197 		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2198 		bfa_stats(itnim, output_reqs);
2199 		ioim->itnim->stats.wr_throughput += fcp_dl;
2200 		break;
2201 	case FCP_IODIR_RW:
2202 		bfa_stats(itnim, input_reqs);
2203 		bfa_stats(itnim, output_reqs);
2204 	default:
2205 		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2206 	}
2207 	if (itnim->seq_rec ||
2208 	    (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2209 		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2210 
2211 	/*
2212 	 * queue I/O message to firmware
2213 	 */
2214 	bfa_reqq_produce(ioim->bfa, ioim->reqq);
2215 	return BFA_TRUE;
2216 }
2217 
2218 /*
2219  * Setup any additional SG pages needed.Inline SG element is setup
2220  * at queuing time.
2221  */
2222 static bfa_boolean_t
bfa_ioim_sgpg_alloc(struct bfa_ioim_s * ioim)2223 bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2224 {
2225 	u16	nsgpgs;
2226 
2227 	WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2228 
2229 	/*
2230 	 * allocate SG pages needed
2231 	 */
2232 	nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2233 	if (!nsgpgs)
2234 		return BFA_TRUE;
2235 
2236 	if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2237 	    != BFA_STATUS_OK) {
2238 		bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2239 		return BFA_FALSE;
2240 	}
2241 
2242 	ioim->nsgpgs = nsgpgs;
2243 	ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2244 
2245 	return BFA_TRUE;
2246 }
2247 
2248 /*
2249  * Send I/O abort request to firmware.
2250  */
2251 static	bfa_boolean_t
bfa_ioim_send_abort(struct bfa_ioim_s * ioim)2252 bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2253 {
2254 	struct bfi_ioim_abort_req_s *m;
2255 	enum bfi_ioim_h2i	msgop;
2256 
2257 	/*
2258 	 * check for room in queue to send request now
2259 	 */
2260 	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2261 	if (!m)
2262 		return BFA_FALSE;
2263 
2264 	/*
2265 	 * build i/o request message next
2266 	 */
2267 	if (ioim->iosp->abort_explicit)
2268 		msgop = BFI_IOIM_H2I_IOABORT_REQ;
2269 	else
2270 		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2271 
2272 	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
2273 	m->io_tag    = cpu_to_be16(ioim->iotag);
2274 	m->abort_tag = ++ioim->abort_tag;
2275 
2276 	/*
2277 	 * queue I/O message to firmware
2278 	 */
2279 	bfa_reqq_produce(ioim->bfa, ioim->reqq);
2280 	return BFA_TRUE;
2281 }
2282 
2283 /*
2284  * Call to resume any I/O requests waiting for room in request queue.
2285  */
2286 static void
bfa_ioim_qresume(void * cbarg)2287 bfa_ioim_qresume(void *cbarg)
2288 {
2289 	struct bfa_ioim_s *ioim = cbarg;
2290 
2291 	bfa_stats(ioim->itnim, qresumes);
2292 	bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2293 }
2294 
2295 
2296 static void
bfa_ioim_notify_cleanup(struct bfa_ioim_s * ioim)2297 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2298 {
2299 	/*
2300 	 * Move IO from itnim queue to fcpim global queue since itnim will be
2301 	 * freed.
2302 	 */
2303 	list_del(&ioim->qe);
2304 	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2305 
2306 	if (!ioim->iosp->tskim) {
2307 		if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2308 			bfa_cb_dequeue(&ioim->hcb_qe);
2309 			list_del(&ioim->qe);
2310 			list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2311 		}
2312 		bfa_itnim_iodone(ioim->itnim);
2313 	} else
2314 		bfa_wc_down(&ioim->iosp->tskim->wc);
2315 }
2316 
2317 static bfa_boolean_t
bfa_ioim_is_abortable(struct bfa_ioim_s * ioim)2318 bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2319 {
2320 	if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2321 	    (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))	||
2322 	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))		||
2323 	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))	||
2324 	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))		||
2325 	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))	||
2326 	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2327 		return BFA_FALSE;
2328 
2329 	return BFA_TRUE;
2330 }
2331 
2332 void
bfa_ioim_delayed_comp(struct bfa_ioim_s * ioim,bfa_boolean_t iotov)2333 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2334 {
2335 	/*
2336 	 * If path tov timer expired, failback with PATHTOV status - these
2337 	 * IO requests are not normally retried by IO stack.
2338 	 *
2339 	 * Otherwise device cameback online and fail it with normal failed
2340 	 * status so that IO stack retries these failed IO requests.
2341 	 */
2342 	if (iotov)
2343 		ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2344 	else {
2345 		ioim->io_cbfn = __bfa_cb_ioim_failed;
2346 		bfa_stats(ioim->itnim, iocom_nexus_abort);
2347 	}
2348 	bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2349 
2350 	/*
2351 	 * Move IO to fcpim global queue since itnim will be
2352 	 * freed.
2353 	 */
2354 	list_del(&ioim->qe);
2355 	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2356 }
2357 
2358 
2359 /*
2360  * Memory allocation and initialization.
2361  */
2362 void
bfa_ioim_attach(struct bfa_fcpim_mod_s * fcpim,struct bfa_meminfo_s * minfo)2363 bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2364 {
2365 	struct bfa_ioim_s		*ioim;
2366 	struct bfa_ioim_sp_s	*iosp;
2367 	u16		i;
2368 	u8			*snsinfo;
2369 	u32		snsbufsz;
2370 
2371 	/*
2372 	 * claim memory first
2373 	 */
2374 	ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2375 	fcpim->ioim_arr = ioim;
2376 	bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2377 
2378 	iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2379 	fcpim->ioim_sp_arr = iosp;
2380 	bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2381 
2382 	/*
2383 	 * Claim DMA memory for per IO sense data.
2384 	 */
2385 	snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2386 	fcpim->snsbase.pa  = bfa_meminfo_dma_phys(minfo);
2387 	bfa_meminfo_dma_phys(minfo) += snsbufsz;
2388 
2389 	fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2390 	bfa_meminfo_dma_virt(minfo) += snsbufsz;
2391 	snsinfo = fcpim->snsbase.kva;
2392 	bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2393 
2394 	/*
2395 	 * Initialize ioim free queues
2396 	 */
2397 	INIT_LIST_HEAD(&fcpim->ioim_free_q);
2398 	INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2399 	INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2400 
2401 	for (i = 0; i < fcpim->num_ioim_reqs;
2402 	     i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2403 		/*
2404 		 * initialize IOIM
2405 		 */
2406 		memset(ioim, 0, sizeof(struct bfa_ioim_s));
2407 		ioim->iotag   = i;
2408 		ioim->bfa     = fcpim->bfa;
2409 		ioim->fcpim   = fcpim;
2410 		ioim->iosp    = iosp;
2411 		iosp->snsinfo = snsinfo;
2412 		INIT_LIST_HEAD(&ioim->sgpg_q);
2413 		bfa_reqq_winit(&ioim->iosp->reqq_wait,
2414 				   bfa_ioim_qresume, ioim);
2415 		bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2416 				   bfa_ioim_sgpg_alloced, ioim);
2417 		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2418 
2419 		list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2420 	}
2421 }
2422 
2423 void
bfa_ioim_isr(struct bfa_s * bfa,struct bfi_msg_s * m)2424 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2425 {
2426 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2427 	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2428 	struct bfa_ioim_s *ioim;
2429 	u16	iotag;
2430 	enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2431 
2432 	iotag = be16_to_cpu(rsp->io_tag);
2433 
2434 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2435 	WARN_ON(ioim->iotag != iotag);
2436 
2437 	bfa_trc(ioim->bfa, ioim->iotag);
2438 	bfa_trc(ioim->bfa, rsp->io_status);
2439 	bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2440 
2441 	if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2442 		ioim->iosp->comp_rspmsg = *m;
2443 
2444 	switch (rsp->io_status) {
2445 	case BFI_IOIM_STS_OK:
2446 		bfa_stats(ioim->itnim, iocomp_ok);
2447 		if (rsp->reuse_io_tag == 0)
2448 			evt = BFA_IOIM_SM_DONE;
2449 		else
2450 			evt = BFA_IOIM_SM_COMP;
2451 		break;
2452 
2453 	case BFI_IOIM_STS_TIMEDOUT:
2454 		bfa_stats(ioim->itnim, iocomp_timedout);
2455 	case BFI_IOIM_STS_ABORTED:
2456 		rsp->io_status = BFI_IOIM_STS_ABORTED;
2457 		bfa_stats(ioim->itnim, iocomp_aborted);
2458 		if (rsp->reuse_io_tag == 0)
2459 			evt = BFA_IOIM_SM_DONE;
2460 		else
2461 			evt = BFA_IOIM_SM_COMP;
2462 		break;
2463 
2464 	case BFI_IOIM_STS_PROTO_ERR:
2465 		bfa_stats(ioim->itnim, iocom_proto_err);
2466 		WARN_ON(!rsp->reuse_io_tag);
2467 		evt = BFA_IOIM_SM_COMP;
2468 		break;
2469 
2470 	case BFI_IOIM_STS_SQER_NEEDED:
2471 		bfa_stats(ioim->itnim, iocom_sqer_needed);
2472 		WARN_ON(rsp->reuse_io_tag != 0);
2473 		evt = BFA_IOIM_SM_SQRETRY;
2474 		break;
2475 
2476 	case BFI_IOIM_STS_RES_FREE:
2477 		bfa_stats(ioim->itnim, iocom_res_free);
2478 		evt = BFA_IOIM_SM_FREE;
2479 		break;
2480 
2481 	case BFI_IOIM_STS_HOST_ABORTED:
2482 		bfa_stats(ioim->itnim, iocom_hostabrts);
2483 		if (rsp->abort_tag != ioim->abort_tag) {
2484 			bfa_trc(ioim->bfa, rsp->abort_tag);
2485 			bfa_trc(ioim->bfa, ioim->abort_tag);
2486 			return;
2487 		}
2488 
2489 		if (rsp->reuse_io_tag)
2490 			evt = BFA_IOIM_SM_ABORT_COMP;
2491 		else
2492 			evt = BFA_IOIM_SM_ABORT_DONE;
2493 		break;
2494 
2495 	case BFI_IOIM_STS_UTAG:
2496 		bfa_stats(ioim->itnim, iocom_utags);
2497 		evt = BFA_IOIM_SM_COMP_UTAG;
2498 		break;
2499 
2500 	default:
2501 		WARN_ON(1);
2502 	}
2503 
2504 	bfa_sm_send_event(ioim, evt);
2505 }
2506 
2507 void
bfa_ioim_good_comp_isr(struct bfa_s * bfa,struct bfi_msg_s * m)2508 bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2509 {
2510 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2511 	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2512 	struct bfa_ioim_s *ioim;
2513 	u16	iotag;
2514 
2515 	iotag = be16_to_cpu(rsp->io_tag);
2516 
2517 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2518 	WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2519 
2520 	bfa_ioim_cb_profile_comp(fcpim, ioim);
2521 	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2522 }
2523 
2524 /*
2525  * Called by itnim to clean up IO while going offline.
2526  */
2527 void
bfa_ioim_cleanup(struct bfa_ioim_s * ioim)2528 bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2529 {
2530 	bfa_trc(ioim->bfa, ioim->iotag);
2531 	bfa_stats(ioim->itnim, io_cleanups);
2532 
2533 	ioim->iosp->tskim = NULL;
2534 	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2535 }
2536 
2537 void
bfa_ioim_cleanup_tm(struct bfa_ioim_s * ioim,struct bfa_tskim_s * tskim)2538 bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2539 {
2540 	bfa_trc(ioim->bfa, ioim->iotag);
2541 	bfa_stats(ioim->itnim, io_tmaborts);
2542 
2543 	ioim->iosp->tskim = tskim;
2544 	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2545 }
2546 
2547 /*
2548  * IOC failure handling.
2549  */
2550 void
bfa_ioim_iocdisable(struct bfa_ioim_s * ioim)2551 bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2552 {
2553 	bfa_trc(ioim->bfa, ioim->iotag);
2554 	bfa_stats(ioim->itnim, io_iocdowns);
2555 	bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2556 }
2557 
2558 /*
2559  * IO offline TOV popped. Fail the pending IO.
2560  */
2561 void
bfa_ioim_tov(struct bfa_ioim_s * ioim)2562 bfa_ioim_tov(struct bfa_ioim_s *ioim)
2563 {
2564 	bfa_trc(ioim->bfa, ioim->iotag);
2565 	bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2566 }
2567 
2568 
2569 /*
2570  * Allocate IOIM resource for initiator mode I/O request.
2571  */
2572 struct bfa_ioim_s *
bfa_ioim_alloc(struct bfa_s * bfa,struct bfad_ioim_s * dio,struct bfa_itnim_s * itnim,u16 nsges)2573 bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2574 		struct bfa_itnim_s *itnim, u16 nsges)
2575 {
2576 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2577 	struct bfa_ioim_s *ioim;
2578 
2579 	/*
2580 	 * alocate IOIM resource
2581 	 */
2582 	bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2583 	if (!ioim) {
2584 		bfa_stats(itnim, no_iotags);
2585 		return NULL;
2586 	}
2587 
2588 	ioim->dio = dio;
2589 	ioim->itnim = itnim;
2590 	ioim->nsges = nsges;
2591 	ioim->nsgpgs = 0;
2592 
2593 	bfa_stats(itnim, total_ios);
2594 	fcpim->ios_active++;
2595 
2596 	list_add_tail(&ioim->qe, &itnim->io_q);
2597 
2598 	return ioim;
2599 }
2600 
2601 void
bfa_ioim_free(struct bfa_ioim_s * ioim)2602 bfa_ioim_free(struct bfa_ioim_s *ioim)
2603 {
2604 	struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2605 
2606 	if (ioim->nsgpgs > 0)
2607 		bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2608 
2609 	bfa_stats(ioim->itnim, io_comps);
2610 	fcpim->ios_active--;
2611 
2612 	ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2613 	list_del(&ioim->qe);
2614 	list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2615 }
2616 
2617 void
bfa_ioim_start(struct bfa_ioim_s * ioim)2618 bfa_ioim_start(struct bfa_ioim_s *ioim)
2619 {
2620 	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2621 
2622 	/*
2623 	 * Obtain the queue over which this request has to be issued
2624 	 */
2625 	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2626 			BFA_FALSE : bfa_itnim_get_reqq(ioim);
2627 
2628 	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2629 }
2630 
2631 /*
2632  * Driver I/O abort request.
2633  */
2634 bfa_status_t
bfa_ioim_abort(struct bfa_ioim_s * ioim)2635 bfa_ioim_abort(struct bfa_ioim_s *ioim)
2636 {
2637 
2638 	bfa_trc(ioim->bfa, ioim->iotag);
2639 
2640 	if (!bfa_ioim_is_abortable(ioim))
2641 		return BFA_STATUS_FAILED;
2642 
2643 	bfa_stats(ioim->itnim, io_aborts);
2644 	bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2645 
2646 	return BFA_STATUS_OK;
2647 }
2648 
2649 /*
2650  *  BFA TSKIM state machine functions
2651  */
2652 
2653 /*
2654  * Task management command beginning state.
2655  */
2656 static void
bfa_tskim_sm_uninit(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2657 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2658 {
2659 	bfa_trc(tskim->bfa, event);
2660 
2661 	switch (event) {
2662 	case BFA_TSKIM_SM_START:
2663 		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2664 		bfa_tskim_gather_ios(tskim);
2665 
2666 		/*
2667 		 * If device is offline, do not send TM on wire. Just cleanup
2668 		 * any pending IO requests and complete TM request.
2669 		 */
2670 		if (!bfa_itnim_is_online(tskim->itnim)) {
2671 			bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2672 			tskim->tsk_status = BFI_TSKIM_STS_OK;
2673 			bfa_tskim_cleanup_ios(tskim);
2674 			return;
2675 		}
2676 
2677 		if (!bfa_tskim_send(tskim)) {
2678 			bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2679 			bfa_stats(tskim->itnim, tm_qwait);
2680 			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2681 					  &tskim->reqq_wait);
2682 		}
2683 		break;
2684 
2685 	default:
2686 		bfa_sm_fault(tskim->bfa, event);
2687 	}
2688 }
2689 
2690 /*
2691  * TM command is active, awaiting completion from firmware to
2692  * cleanup IO requests in TM scope.
2693  */
2694 static void
bfa_tskim_sm_active(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2695 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2696 {
2697 	bfa_trc(tskim->bfa, event);
2698 
2699 	switch (event) {
2700 	case BFA_TSKIM_SM_DONE:
2701 		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2702 		bfa_tskim_cleanup_ios(tskim);
2703 		break;
2704 
2705 	case BFA_TSKIM_SM_CLEANUP:
2706 		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2707 		if (!bfa_tskim_send_abort(tskim)) {
2708 			bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2709 			bfa_stats(tskim->itnim, tm_qwait);
2710 			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2711 				&tskim->reqq_wait);
2712 		}
2713 		break;
2714 
2715 	case BFA_TSKIM_SM_HWFAIL:
2716 		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2717 		bfa_tskim_iocdisable_ios(tskim);
2718 		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2719 		break;
2720 
2721 	default:
2722 		bfa_sm_fault(tskim->bfa, event);
2723 	}
2724 }
2725 
2726 /*
2727  * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2728  * completion event from firmware.
2729  */
2730 static void
bfa_tskim_sm_cleanup(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2731 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2732 {
2733 	bfa_trc(tskim->bfa, event);
2734 
2735 	switch (event) {
2736 	case BFA_TSKIM_SM_DONE:
2737 		/*
2738 		 * Ignore and wait for ABORT completion from firmware.
2739 		 */
2740 		break;
2741 
2742 	case BFA_TSKIM_SM_CLEANUP_DONE:
2743 		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2744 		bfa_tskim_cleanup_ios(tskim);
2745 		break;
2746 
2747 	case BFA_TSKIM_SM_HWFAIL:
2748 		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2749 		bfa_tskim_iocdisable_ios(tskim);
2750 		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2751 		break;
2752 
2753 	default:
2754 		bfa_sm_fault(tskim->bfa, event);
2755 	}
2756 }
2757 
2758 static void
bfa_tskim_sm_iocleanup(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2759 bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2760 {
2761 	bfa_trc(tskim->bfa, event);
2762 
2763 	switch (event) {
2764 	case BFA_TSKIM_SM_IOS_DONE:
2765 		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2766 		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2767 		break;
2768 
2769 	case BFA_TSKIM_SM_CLEANUP:
2770 		/*
2771 		 * Ignore, TM command completed on wire.
2772 		 * Notify TM conmpletion on IO cleanup completion.
2773 		 */
2774 		break;
2775 
2776 	case BFA_TSKIM_SM_HWFAIL:
2777 		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2778 		bfa_tskim_iocdisable_ios(tskim);
2779 		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2780 		break;
2781 
2782 	default:
2783 		bfa_sm_fault(tskim->bfa, event);
2784 	}
2785 }
2786 
2787 /*
2788  * Task management command is waiting for room in request CQ
2789  */
2790 static void
bfa_tskim_sm_qfull(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2791 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2792 {
2793 	bfa_trc(tskim->bfa, event);
2794 
2795 	switch (event) {
2796 	case BFA_TSKIM_SM_QRESUME:
2797 		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2798 		bfa_tskim_send(tskim);
2799 		break;
2800 
2801 	case BFA_TSKIM_SM_CLEANUP:
2802 		/*
2803 		 * No need to send TM on wire since ITN is offline.
2804 		 */
2805 		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2806 		bfa_reqq_wcancel(&tskim->reqq_wait);
2807 		bfa_tskim_cleanup_ios(tskim);
2808 		break;
2809 
2810 	case BFA_TSKIM_SM_HWFAIL:
2811 		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2812 		bfa_reqq_wcancel(&tskim->reqq_wait);
2813 		bfa_tskim_iocdisable_ios(tskim);
2814 		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2815 		break;
2816 
2817 	default:
2818 		bfa_sm_fault(tskim->bfa, event);
2819 	}
2820 }
2821 
2822 /*
2823  * Task management command is active, awaiting for room in request CQ
2824  * to send clean up request.
2825  */
2826 static void
bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2827 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2828 		enum bfa_tskim_event event)
2829 {
2830 	bfa_trc(tskim->bfa, event);
2831 
2832 	switch (event) {
2833 	case BFA_TSKIM_SM_DONE:
2834 		bfa_reqq_wcancel(&tskim->reqq_wait);
2835 		/*
2836 		 * Fall through !!!
2837 		 */
2838 	case BFA_TSKIM_SM_QRESUME:
2839 		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2840 		bfa_tskim_send_abort(tskim);
2841 		break;
2842 
2843 	case BFA_TSKIM_SM_HWFAIL:
2844 		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2845 		bfa_reqq_wcancel(&tskim->reqq_wait);
2846 		bfa_tskim_iocdisable_ios(tskim);
2847 		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2848 		break;
2849 
2850 	default:
2851 		bfa_sm_fault(tskim->bfa, event);
2852 	}
2853 }
2854 
2855 /*
2856  * BFA callback is pending
2857  */
2858 static void
bfa_tskim_sm_hcb(struct bfa_tskim_s * tskim,enum bfa_tskim_event event)2859 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2860 {
2861 	bfa_trc(tskim->bfa, event);
2862 
2863 	switch (event) {
2864 	case BFA_TSKIM_SM_HCB:
2865 		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2866 		bfa_tskim_free(tskim);
2867 		break;
2868 
2869 	case BFA_TSKIM_SM_CLEANUP:
2870 		bfa_tskim_notify_comp(tskim);
2871 		break;
2872 
2873 	case BFA_TSKIM_SM_HWFAIL:
2874 		break;
2875 
2876 	default:
2877 		bfa_sm_fault(tskim->bfa, event);
2878 	}
2879 }
2880 
2881 static void
__bfa_cb_tskim_done(void * cbarg,bfa_boolean_t complete)2882 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2883 {
2884 	struct bfa_tskim_s *tskim = cbarg;
2885 
2886 	if (!complete) {
2887 		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2888 		return;
2889 	}
2890 
2891 	bfa_stats(tskim->itnim, tm_success);
2892 	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2893 }
2894 
2895 static void
__bfa_cb_tskim_failed(void * cbarg,bfa_boolean_t complete)2896 __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2897 {
2898 	struct bfa_tskim_s *tskim = cbarg;
2899 
2900 	if (!complete) {
2901 		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2902 		return;
2903 	}
2904 
2905 	bfa_stats(tskim->itnim, tm_failures);
2906 	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2907 				BFI_TSKIM_STS_FAILED);
2908 }
2909 
2910 static bfa_boolean_t
bfa_tskim_match_scope(struct bfa_tskim_s * tskim,struct scsi_lun lun)2911 bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
2912 {
2913 	switch (tskim->tm_cmnd) {
2914 	case FCP_TM_TARGET_RESET:
2915 		return BFA_TRUE;
2916 
2917 	case FCP_TM_ABORT_TASK_SET:
2918 	case FCP_TM_CLEAR_TASK_SET:
2919 	case FCP_TM_LUN_RESET:
2920 	case FCP_TM_CLEAR_ACA:
2921 		return !memcmp(&tskim->lun, &lun, sizeof(lun));
2922 
2923 	default:
2924 		WARN_ON(1);
2925 	}
2926 
2927 	return BFA_FALSE;
2928 }
2929 
2930 /*
2931  * Gather affected IO requests and task management commands.
2932  */
2933 static void
bfa_tskim_gather_ios(struct bfa_tskim_s * tskim)2934 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2935 {
2936 	struct bfa_itnim_s *itnim = tskim->itnim;
2937 	struct bfa_ioim_s *ioim;
2938 	struct list_head *qe, *qen;
2939 	struct scsi_cmnd *cmnd;
2940 	struct scsi_lun scsilun;
2941 
2942 	INIT_LIST_HEAD(&tskim->io_q);
2943 
2944 	/*
2945 	 * Gather any active IO requests first.
2946 	 */
2947 	list_for_each_safe(qe, qen, &itnim->io_q) {
2948 		ioim = (struct bfa_ioim_s *) qe;
2949 		cmnd = (struct scsi_cmnd *) ioim->dio;
2950 		int_to_scsilun(cmnd->device->lun, &scsilun);
2951 		if (bfa_tskim_match_scope(tskim, scsilun)) {
2952 			list_del(&ioim->qe);
2953 			list_add_tail(&ioim->qe, &tskim->io_q);
2954 		}
2955 	}
2956 
2957 	/*
2958 	 * Failback any pending IO requests immediately.
2959 	 */
2960 	list_for_each_safe(qe, qen, &itnim->pending_q) {
2961 		ioim = (struct bfa_ioim_s *) qe;
2962 		cmnd = (struct scsi_cmnd *) ioim->dio;
2963 		int_to_scsilun(cmnd->device->lun, &scsilun);
2964 		if (bfa_tskim_match_scope(tskim, scsilun)) {
2965 			list_del(&ioim->qe);
2966 			list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2967 			bfa_ioim_tov(ioim);
2968 		}
2969 	}
2970 }
2971 
2972 /*
2973  * IO cleanup completion
2974  */
2975 static void
bfa_tskim_cleanp_comp(void * tskim_cbarg)2976 bfa_tskim_cleanp_comp(void *tskim_cbarg)
2977 {
2978 	struct bfa_tskim_s *tskim = tskim_cbarg;
2979 
2980 	bfa_stats(tskim->itnim, tm_io_comps);
2981 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
2982 }
2983 
2984 /*
2985  * Gather affected IO requests and task management commands.
2986  */
2987 static void
bfa_tskim_cleanup_ios(struct bfa_tskim_s * tskim)2988 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
2989 {
2990 	struct bfa_ioim_s *ioim;
2991 	struct list_head	*qe, *qen;
2992 
2993 	bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
2994 
2995 	list_for_each_safe(qe, qen, &tskim->io_q) {
2996 		ioim = (struct bfa_ioim_s *) qe;
2997 		bfa_wc_up(&tskim->wc);
2998 		bfa_ioim_cleanup_tm(ioim, tskim);
2999 	}
3000 
3001 	bfa_wc_wait(&tskim->wc);
3002 }
3003 
3004 /*
3005  * Send task management request to firmware.
3006  */
3007 static bfa_boolean_t
bfa_tskim_send(struct bfa_tskim_s * tskim)3008 bfa_tskim_send(struct bfa_tskim_s *tskim)
3009 {
3010 	struct bfa_itnim_s *itnim = tskim->itnim;
3011 	struct bfi_tskim_req_s *m;
3012 
3013 	/*
3014 	 * check for room in queue to send request now
3015 	 */
3016 	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3017 	if (!m)
3018 		return BFA_FALSE;
3019 
3020 	/*
3021 	 * build i/o request message next
3022 	 */
3023 	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3024 			bfa_lpuid(tskim->bfa));
3025 
3026 	m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3027 	m->itn_fhdl = tskim->itnim->rport->fw_handle;
3028 	m->t_secs = tskim->tsecs;
3029 	m->lun = tskim->lun;
3030 	m->tm_flags = tskim->tm_cmnd;
3031 
3032 	/*
3033 	 * queue I/O message to firmware
3034 	 */
3035 	bfa_reqq_produce(tskim->bfa, itnim->reqq);
3036 	return BFA_TRUE;
3037 }
3038 
3039 /*
3040  * Send abort request to cleanup an active TM to firmware.
3041  */
3042 static bfa_boolean_t
bfa_tskim_send_abort(struct bfa_tskim_s * tskim)3043 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3044 {
3045 	struct bfa_itnim_s	*itnim = tskim->itnim;
3046 	struct bfi_tskim_abortreq_s	*m;
3047 
3048 	/*
3049 	 * check for room in queue to send request now
3050 	 */
3051 	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3052 	if (!m)
3053 		return BFA_FALSE;
3054 
3055 	/*
3056 	 * build i/o request message next
3057 	 */
3058 	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3059 			bfa_lpuid(tskim->bfa));
3060 
3061 	m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3062 
3063 	/*
3064 	 * queue I/O message to firmware
3065 	 */
3066 	bfa_reqq_produce(tskim->bfa, itnim->reqq);
3067 	return BFA_TRUE;
3068 }
3069 
3070 /*
3071  * Call to resume task management cmnd waiting for room in request queue.
3072  */
3073 static void
bfa_tskim_qresume(void * cbarg)3074 bfa_tskim_qresume(void *cbarg)
3075 {
3076 	struct bfa_tskim_s *tskim = cbarg;
3077 
3078 	bfa_stats(tskim->itnim, tm_qresumes);
3079 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3080 }
3081 
3082 /*
3083  * Cleanup IOs associated with a task mangement command on IOC failures.
3084  */
3085 static void
bfa_tskim_iocdisable_ios(struct bfa_tskim_s * tskim)3086 bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3087 {
3088 	struct bfa_ioim_s *ioim;
3089 	struct list_head	*qe, *qen;
3090 
3091 	list_for_each_safe(qe, qen, &tskim->io_q) {
3092 		ioim = (struct bfa_ioim_s *) qe;
3093 		bfa_ioim_iocdisable(ioim);
3094 	}
3095 }
3096 
3097 /*
3098  * Notification on completions from related ioim.
3099  */
3100 void
bfa_tskim_iodone(struct bfa_tskim_s * tskim)3101 bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3102 {
3103 	bfa_wc_down(&tskim->wc);
3104 }
3105 
3106 /*
3107  * Handle IOC h/w failure notification from itnim.
3108  */
3109 void
bfa_tskim_iocdisable(struct bfa_tskim_s * tskim)3110 bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3111 {
3112 	tskim->notify = BFA_FALSE;
3113 	bfa_stats(tskim->itnim, tm_iocdowns);
3114 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3115 }
3116 
3117 /*
3118  * Cleanup TM command and associated IOs as part of ITNIM offline.
3119  */
3120 void
bfa_tskim_cleanup(struct bfa_tskim_s * tskim)3121 bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3122 {
3123 	tskim->notify = BFA_TRUE;
3124 	bfa_stats(tskim->itnim, tm_cleanups);
3125 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3126 }
3127 
3128 /*
3129  * Memory allocation and initialization.
3130  */
3131 void
bfa_tskim_attach(struct bfa_fcpim_mod_s * fcpim,struct bfa_meminfo_s * minfo)3132 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3133 {
3134 	struct bfa_tskim_s *tskim;
3135 	u16	i;
3136 
3137 	INIT_LIST_HEAD(&fcpim->tskim_free_q);
3138 
3139 	tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3140 	fcpim->tskim_arr = tskim;
3141 
3142 	for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3143 		/*
3144 		 * initialize TSKIM
3145 		 */
3146 		memset(tskim, 0, sizeof(struct bfa_tskim_s));
3147 		tskim->tsk_tag = i;
3148 		tskim->bfa	= fcpim->bfa;
3149 		tskim->fcpim	= fcpim;
3150 		tskim->notify  = BFA_FALSE;
3151 		bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3152 					tskim);
3153 		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3154 
3155 		list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3156 	}
3157 
3158 	bfa_meminfo_kva(minfo) = (u8 *) tskim;
3159 }
3160 
3161 void
bfa_tskim_isr(struct bfa_s * bfa,struct bfi_msg_s * m)3162 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3163 {
3164 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3165 	struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3166 	struct bfa_tskim_s *tskim;
3167 	u16	tsk_tag = be16_to_cpu(rsp->tsk_tag);
3168 
3169 	tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3170 	WARN_ON(tskim->tsk_tag != tsk_tag);
3171 
3172 	tskim->tsk_status = rsp->tsk_status;
3173 
3174 	/*
3175 	 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3176 	 * requests. All other statuses are for normal completions.
3177 	 */
3178 	if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3179 		bfa_stats(tskim->itnim, tm_cleanup_comps);
3180 		bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3181 	} else {
3182 		bfa_stats(tskim->itnim, tm_fw_rsps);
3183 		bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3184 	}
3185 }
3186 
3187 
3188 struct bfa_tskim_s *
bfa_tskim_alloc(struct bfa_s * bfa,struct bfad_tskim_s * dtsk)3189 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3190 {
3191 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3192 	struct bfa_tskim_s *tskim;
3193 
3194 	bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3195 
3196 	if (tskim)
3197 		tskim->dtsk = dtsk;
3198 
3199 	return tskim;
3200 }
3201 
3202 void
bfa_tskim_free(struct bfa_tskim_s * tskim)3203 bfa_tskim_free(struct bfa_tskim_s *tskim)
3204 {
3205 	WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3206 	list_del(&tskim->qe);
3207 	list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3208 }
3209 
3210 /*
3211  * Start a task management command.
3212  *
3213  * @param[in]	tskim	BFA task management command instance
3214  * @param[in]	itnim	i-t nexus for the task management command
3215  * @param[in]	lun	lun, if applicable
3216  * @param[in]	tm_cmnd	Task management command code.
3217  * @param[in]	t_secs	Timeout in seconds
3218  *
3219  * @return None.
3220  */
3221 void
bfa_tskim_start(struct bfa_tskim_s * tskim,struct bfa_itnim_s * itnim,struct scsi_lun lun,enum fcp_tm_cmnd tm_cmnd,u8 tsecs)3222 bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3223 			struct scsi_lun lun,
3224 			enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3225 {
3226 	tskim->itnim	= itnim;
3227 	tskim->lun	= lun;
3228 	tskim->tm_cmnd = tm_cmnd;
3229 	tskim->tsecs	= tsecs;
3230 	tskim->notify  = BFA_FALSE;
3231 	bfa_stats(itnim, tm_cmnds);
3232 
3233 	list_add_tail(&tskim->qe, &itnim->tsk_q);
3234 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3235 }
3236