[SCSI] bfa: remove unused and empty functions
[linux-2.6-block.git] / drivers / scsi / bfa / bfa_fcpim.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
a36c61f9
KG
18#include "bfa_modules.h"
19#include "bfa_cb_ioim.h"
7725ccfd
JH
20
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
a36c61f9
KG
24
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
5fbe25c7 29/*
a36c61f9
KG
30 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
5fbe25c7 75/*
a36c61f9
KG
76 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
5fbe25c7 92/*
a36c61f9
KG
93 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
a36c61f9 110
5fbe25c7 111/*
a36c61f9
KG
112 * IO state machine events
113 */
114enum bfa_ioim_event {
115 BFA_IOIM_SM_START = 1, /* io start request from host */
116 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
117 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
118 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
119 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
120 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
121 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
122 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
123 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
124 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
125 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
126 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
127 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
128 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
129 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
130 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
131 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
132 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
133};
134
135
5fbe25c7 136/*
a36c61f9
KG
137 * BFA TSKIM related definitions
138 */
139
5fbe25c7 140/*
a36c61f9
KG
141 * task management completion handling
142 */
143#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
144 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
145 bfa_tskim_notify_comp(__tskim); \
146} while (0)
147
148#define bfa_tskim_notify_comp(__tskim) do { \
149 if ((__tskim)->notify) \
150 bfa_itnim_tskdone((__tskim)->itnim); \
151} while (0)
152
153
154enum bfa_tskim_event {
155 BFA_TSKIM_SM_START = 1, /* TM command start */
156 BFA_TSKIM_SM_DONE = 2, /* TM completion */
157 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
158 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
159 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
160 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
161 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
162 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
163};
164
5fbe25c7 165/*
a36c61f9
KG
166 * forward declaration for BFA ITNIM functions
167 */
168static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
169static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
170static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
171static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
172static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
173static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
174static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
175static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
176static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
177static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
178static void bfa_itnim_iotov(void *itnim_arg);
179static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
182
5fbe25c7 183/*
a36c61f9
KG
184 * forward declaration of ITNIM state machine
185 */
186static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
187 enum bfa_itnim_event event);
188static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
189 enum bfa_itnim_event event);
190static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
191 enum bfa_itnim_event event);
192static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
193 enum bfa_itnim_event event);
194static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
195 enum bfa_itnim_event event);
196static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
197 enum bfa_itnim_event event);
198static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
199 enum bfa_itnim_event event);
200static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
201 enum bfa_itnim_event event);
202static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
203 enum bfa_itnim_event event);
204static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
209 enum bfa_itnim_event event);
210static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
211 enum bfa_itnim_event event);
212static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
213 enum bfa_itnim_event event);
214static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
215 enum bfa_itnim_event event);
216
5fbe25c7 217/*
a36c61f9
KG
218 * forward declaration for BFA IOIM functions
219 */
220static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
221static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
222static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
223static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
224static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
225static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
226static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
227static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
228static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
229static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
230static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
231
232
5fbe25c7 233/*
a36c61f9
KG
234 * forward declaration of BFA IO state machine
235 */
236static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
237 enum bfa_ioim_event event);
238static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
239 enum bfa_ioim_event event);
240static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
241 enum bfa_ioim_event event);
242static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
243 enum bfa_ioim_event event);
244static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
245 enum bfa_ioim_event event);
246static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
247 enum bfa_ioim_event event);
248static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
249 enum bfa_ioim_event event);
250static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
251 enum bfa_ioim_event event);
252static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
253 enum bfa_ioim_event event);
254static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
255 enum bfa_ioim_event event);
256static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
257 enum bfa_ioim_event event);
258static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
259 enum bfa_ioim_event event);
260
5fbe25c7 261/*
a36c61f9
KG
262 * forward declaration for BFA TSKIM functions
263 */
264static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
265static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
266static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
267 lun_t lun);
268static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
269static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
270static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
271static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
272static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
273static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
274
275
5fbe25c7 276/*
a36c61f9
KG
277 * forward declaration of BFA TSKIM state machine
278 */
279static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
280 enum bfa_tskim_event event);
281static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
282 enum bfa_tskim_event event);
283static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
284 enum bfa_tskim_event event);
285static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
286 enum bfa_tskim_event event);
287static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
288 enum bfa_tskim_event event);
289static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
290 enum bfa_tskim_event event);
291static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
292 enum bfa_tskim_event event);
293
5fbe25c7 294/*
df0f1933 295 * BFA FCP Initiator Mode module
7725ccfd
JH
296 */
297
5fbe25c7 298/*
a36c61f9 299 * Compute and return memory needed by FCP(im) module.
7725ccfd
JH
300 */
301static void
302bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
303 u32 *dm_len)
304{
305 bfa_itnim_meminfo(cfg, km_len, dm_len);
306
5fbe25c7 307 /*
7725ccfd
JH
308 * IO memory
309 */
310 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
311 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
312 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
313 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
314
315 *km_len += cfg->fwcfg.num_ioim_reqs *
316 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
317
318 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
319
5fbe25c7 320 /*
7725ccfd
JH
321 * task management command memory
322 */
323 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
324 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
325 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
326}
327
328
329static void
330bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
a36c61f9 331 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
7725ccfd
JH
332{
333 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
334
335 bfa_trc(bfa, cfg->drvcfg.path_tov);
336 bfa_trc(bfa, cfg->fwcfg.num_rports);
337 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
338 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
339
a36c61f9
KG
340 fcpim->bfa = bfa;
341 fcpim->num_itnims = cfg->fwcfg.num_rports;
7725ccfd
JH
342 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
343 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
a36c61f9
KG
344 fcpim->path_tov = cfg->drvcfg.path_tov;
345 fcpim->delay_comp = cfg->drvcfg.delay_comp;
346 fcpim->profile_comp = NULL;
347 fcpim->profile_start = NULL;
7725ccfd
JH
348
349 bfa_itnim_attach(fcpim, meminfo);
350 bfa_tskim_attach(fcpim, meminfo);
351 bfa_ioim_attach(fcpim, meminfo);
352}
353
7725ccfd
JH
354static void
355bfa_fcpim_detach(struct bfa_s *bfa)
356{
7725ccfd
JH
357}
358
359static void
360bfa_fcpim_start(struct bfa_s *bfa)
361{
362}
363
364static void
365bfa_fcpim_stop(struct bfa_s *bfa)
366{
367}
368
369static void
370bfa_fcpim_iocdisable(struct bfa_s *bfa)
371{
372 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
373 struct bfa_itnim_s *itnim;
a36c61f9 374 struct list_head *qe, *qen;
7725ccfd
JH
375
376 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
377 itnim = (struct bfa_itnim_s *) qe;
378 bfa_itnim_iocdisable(itnim);
379 }
380}
381
a36c61f9
KG
382void
383bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
384 struct bfa_itnim_iostats_s *rstats)
385{
386 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
387 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
388 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
389 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
390 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
391 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
392 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
393 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
394 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
395 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
396 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
397 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
398 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
399 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
400 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
401 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
402 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
403 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
404 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
405 bfa_fcpim_add_iostats(lstats, rstats, onlines);
406 bfa_fcpim_add_iostats(lstats, rstats, offlines);
407 bfa_fcpim_add_iostats(lstats, rstats, creates);
408 bfa_fcpim_add_iostats(lstats, rstats, deletes);
409 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
410 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
411 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
412 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
413 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
414 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
415 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
416 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
417 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
418 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
419 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
420 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
421 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
422 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
424 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
425 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
426 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
427 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
428 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
429 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
430}
431
7725ccfd
JH
432void
433bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
434{
435 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
436
437 fcpim->path_tov = path_tov * 1000;
438 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
439 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
440}
441
442u16
443bfa_fcpim_path_tov_get(struct bfa_s *bfa)
444{
445 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
446
f8ceafde 447 return fcpim->path_tov / 1000;
7725ccfd
JH
448}
449
450bfa_status_t
a36c61f9
KG
451bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
452 u8 lp_tag)
453{
454 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
455 struct list_head *qe, *qen;
456 struct bfa_itnim_s *itnim;
457
458 /* accumulate IO stats from itnim */
6a18b167 459 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
a36c61f9
KG
460 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
461 itnim = (struct bfa_itnim_s *) qe;
462 if (itnim->rport->rport_info.lp_tag != lp_tag)
463 continue;
464 bfa_fcpim_add_stats(stats, &(itnim->stats));
465 }
466 return BFA_STATUS_OK;
467}
468bfa_status_t
469bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
470{
471 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
472 struct list_head *qe, *qen;
473 struct bfa_itnim_s *itnim;
474
475 /* accumulate IO stats from itnim */
6a18b167 476 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
a36c61f9
KG
477 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
478 itnim = (struct bfa_itnim_s *) qe;
479 bfa_fcpim_add_stats(modstats, &(itnim->stats));
480 }
481 return BFA_STATUS_OK;
482}
483
484bfa_status_t
485bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
486 struct bfa_fcpim_del_itn_stats_s *modstats)
487{
488 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
489
490 *modstats = fcpim->del_itn_stats;
491
492 return BFA_STATUS_OK;
493}
494
495
496bfa_status_t
497bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
498{
499 struct bfa_itnim_s *itnim;
500 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
501 struct list_head *qe, *qen;
502
503 /* accumulate IO stats from itnim */
504 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
505 itnim = (struct bfa_itnim_s *) qe;
506 bfa_itnim_clear_stats(itnim);
507 }
508 fcpim->io_profile = BFA_TRUE;
509 fcpim->io_profile_start_time = time;
510 fcpim->profile_comp = bfa_ioim_profile_comp;
511 fcpim->profile_start = bfa_ioim_profile_start;
512
513 return BFA_STATUS_OK;
514}
515bfa_status_t
516bfa_fcpim_profile_off(struct bfa_s *bfa)
7725ccfd
JH
517{
518 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
a36c61f9
KG
519 fcpim->io_profile = BFA_FALSE;
520 fcpim->io_profile_start_time = 0;
521 fcpim->profile_comp = NULL;
522 fcpim->profile_start = NULL;
523 return BFA_STATUS_OK;
524}
7725ccfd 525
a36c61f9
KG
526bfa_status_t
527bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
528{
529 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
530 struct list_head *qe, *qen;
531 struct bfa_itnim_s *itnim;
7725ccfd 532
a36c61f9
KG
533 /* clear IO stats from all active itnims */
534 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
535 itnim = (struct bfa_itnim_s *) qe;
536 if (itnim->rport->rport_info.lp_tag != lp_tag)
537 continue;
538 bfa_itnim_clear_stats(itnim);
539 }
7725ccfd 540 return BFA_STATUS_OK;
a36c61f9 541
7725ccfd
JH
542}
543
544bfa_status_t
545bfa_fcpim_clr_modstats(struct bfa_s *bfa)
546{
547 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
a36c61f9
KG
548 struct list_head *qe, *qen;
549 struct bfa_itnim_s *itnim;
7725ccfd 550
a36c61f9
KG
551 /* clear IO stats from all active itnims */
552 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
553 itnim = (struct bfa_itnim_s *) qe;
554 bfa_itnim_clear_stats(itnim);
555 }
6a18b167 556 memset(&fcpim->del_itn_stats, 0,
a36c61f9 557 sizeof(struct bfa_fcpim_del_itn_stats_s));
7725ccfd
JH
558
559 return BFA_STATUS_OK;
560}
561
562void
563bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
564{
565 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
566
567 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
568
569 fcpim->q_depth = q_depth;
570}
571
572u16
573bfa_fcpim_qdepth_get(struct bfa_s *bfa)
574{
575 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
576
f8ceafde 577 return fcpim->q_depth;
7725ccfd
JH
578}
579
36d345a7
JH
580void
581bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
582{
583 bfa_boolean_t ioredirect;
584
585 /*
586 * IO redirection is turned off when QoS is enabled and vice versa
587 */
588 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
36d345a7
JH
589}
590
591void
592bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
593{
594 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
595 fcpim->ioredirect = state;
596}
a36c61f9
KG
597
598
599
5fbe25c7 600/*
a36c61f9
KG
601 * BFA ITNIM module state machine functions
602 */
603
5fbe25c7 604/*
a36c61f9
KG
605 * Beginning/unallocated state - no events expected.
606 */
607static void
608bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
609{
610 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
611 bfa_trc(itnim->bfa, event);
612
613 switch (event) {
614 case BFA_ITNIM_SM_CREATE:
615 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
616 itnim->is_online = BFA_FALSE;
617 bfa_fcpim_additn(itnim);
618 break;
619
620 default:
621 bfa_sm_fault(itnim->bfa, event);
622 }
623}
624
5fbe25c7 625/*
a36c61f9
KG
626 * Beginning state, only online event expected.
627 */
628static void
629bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
630{
631 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
632 bfa_trc(itnim->bfa, event);
633
634 switch (event) {
635 case BFA_ITNIM_SM_ONLINE:
636 if (bfa_itnim_send_fwcreate(itnim))
637 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
638 else
639 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
640 break;
641
642 case BFA_ITNIM_SM_DELETE:
643 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
644 bfa_fcpim_delitn(itnim);
645 break;
646
647 case BFA_ITNIM_SM_HWFAIL:
648 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
649 break;
650
651 default:
652 bfa_sm_fault(itnim->bfa, event);
653 }
654}
655
5fbe25c7 656/*
a36c61f9
KG
657 * Waiting for itnim create response from firmware.
658 */
659static void
660bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
661{
662 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
663 bfa_trc(itnim->bfa, event);
664
665 switch (event) {
666 case BFA_ITNIM_SM_FWRSP:
667 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
668 itnim->is_online = BFA_TRUE;
669 bfa_itnim_iotov_online(itnim);
670 bfa_itnim_online_cb(itnim);
671 break;
672
673 case BFA_ITNIM_SM_DELETE:
674 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
675 break;
676
677 case BFA_ITNIM_SM_OFFLINE:
678 if (bfa_itnim_send_fwdelete(itnim))
679 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
680 else
681 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
682 break;
683
684 case BFA_ITNIM_SM_HWFAIL:
685 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
686 break;
687
688 default:
689 bfa_sm_fault(itnim->bfa, event);
690 }
691}
692
693static void
694bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
695 enum bfa_itnim_event event)
696{
697 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
698 bfa_trc(itnim->bfa, event);
699
700 switch (event) {
701 case BFA_ITNIM_SM_QRESUME:
702 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
703 bfa_itnim_send_fwcreate(itnim);
704 break;
705
706 case BFA_ITNIM_SM_DELETE:
707 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
708 bfa_reqq_wcancel(&itnim->reqq_wait);
709 bfa_fcpim_delitn(itnim);
710 break;
711
712 case BFA_ITNIM_SM_OFFLINE:
713 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
714 bfa_reqq_wcancel(&itnim->reqq_wait);
715 bfa_itnim_offline_cb(itnim);
716 break;
717
718 case BFA_ITNIM_SM_HWFAIL:
719 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
720 bfa_reqq_wcancel(&itnim->reqq_wait);
721 break;
722
723 default:
724 bfa_sm_fault(itnim->bfa, event);
725 }
726}
727
5fbe25c7 728/*
a36c61f9
KG
729 * Waiting for itnim create response from firmware, a delete is pending.
730 */
731static void
732bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
733 enum bfa_itnim_event event)
734{
735 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
736 bfa_trc(itnim->bfa, event);
737
738 switch (event) {
739 case BFA_ITNIM_SM_FWRSP:
740 if (bfa_itnim_send_fwdelete(itnim))
741 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
742 else
743 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
744 break;
745
746 case BFA_ITNIM_SM_HWFAIL:
747 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
748 bfa_fcpim_delitn(itnim);
749 break;
750
751 default:
752 bfa_sm_fault(itnim->bfa, event);
753 }
754}
755
5fbe25c7 756/*
a36c61f9
KG
757 * Online state - normal parking state.
758 */
759static void
760bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
761{
762 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
763 bfa_trc(itnim->bfa, event);
764
765 switch (event) {
766 case BFA_ITNIM_SM_OFFLINE:
767 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
768 itnim->is_online = BFA_FALSE;
769 bfa_itnim_iotov_start(itnim);
770 bfa_itnim_cleanup(itnim);
771 break;
772
773 case BFA_ITNIM_SM_DELETE:
774 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
775 itnim->is_online = BFA_FALSE;
776 bfa_itnim_cleanup(itnim);
777 break;
778
779 case BFA_ITNIM_SM_SLER:
780 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
781 itnim->is_online = BFA_FALSE;
782 bfa_itnim_iotov_start(itnim);
783 bfa_itnim_sler_cb(itnim);
784 break;
785
786 case BFA_ITNIM_SM_HWFAIL:
787 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
788 itnim->is_online = BFA_FALSE;
789 bfa_itnim_iotov_start(itnim);
790 bfa_itnim_iocdisable_cleanup(itnim);
791 break;
792
793 default:
794 bfa_sm_fault(itnim->bfa, event);
795 }
796}
797
5fbe25c7 798/*
a36c61f9
KG
799 * Second level error recovery need.
800 */
801static void
802bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
803{
804 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
805 bfa_trc(itnim->bfa, event);
806
807 switch (event) {
808 case BFA_ITNIM_SM_OFFLINE:
809 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
810 bfa_itnim_cleanup(itnim);
811 break;
812
813 case BFA_ITNIM_SM_DELETE:
814 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
815 bfa_itnim_cleanup(itnim);
816 bfa_itnim_iotov_delete(itnim);
817 break;
818
819 case BFA_ITNIM_SM_HWFAIL:
820 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
821 bfa_itnim_iocdisable_cleanup(itnim);
822 break;
823
824 default:
825 bfa_sm_fault(itnim->bfa, event);
826 }
827}
828
5fbe25c7 829/*
a36c61f9
KG
830 * Going offline. Waiting for active IO cleanup.
831 */
832static void
833bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
834 enum bfa_itnim_event event)
835{
836 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
837 bfa_trc(itnim->bfa, event);
838
839 switch (event) {
840 case BFA_ITNIM_SM_CLEANUP:
841 if (bfa_itnim_send_fwdelete(itnim))
842 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
843 else
844 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
845 break;
846
847 case BFA_ITNIM_SM_DELETE:
848 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
849 bfa_itnim_iotov_delete(itnim);
850 break;
851
852 case BFA_ITNIM_SM_HWFAIL:
853 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
854 bfa_itnim_iocdisable_cleanup(itnim);
855 bfa_itnim_offline_cb(itnim);
856 break;
857
858 case BFA_ITNIM_SM_SLER:
859 break;
860
861 default:
862 bfa_sm_fault(itnim->bfa, event);
863 }
864}
865
5fbe25c7 866/*
a36c61f9
KG
867 * Deleting itnim. Waiting for active IO cleanup.
868 */
869static void
870bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
871 enum bfa_itnim_event event)
872{
873 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
874 bfa_trc(itnim->bfa, event);
875
876 switch (event) {
877 case BFA_ITNIM_SM_CLEANUP:
878 if (bfa_itnim_send_fwdelete(itnim))
879 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
880 else
881 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
882 break;
883
884 case BFA_ITNIM_SM_HWFAIL:
885 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
886 bfa_itnim_iocdisable_cleanup(itnim);
887 break;
888
889 default:
890 bfa_sm_fault(itnim->bfa, event);
891 }
892}
893
5fbe25c7 894/*
a36c61f9
KG
895 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
896 */
897static void
898bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
899{
900 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
901 bfa_trc(itnim->bfa, event);
902
903 switch (event) {
904 case BFA_ITNIM_SM_FWRSP:
905 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
906 bfa_itnim_offline_cb(itnim);
907 break;
908
909 case BFA_ITNIM_SM_DELETE:
910 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
911 break;
912
913 case BFA_ITNIM_SM_HWFAIL:
914 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
915 bfa_itnim_offline_cb(itnim);
916 break;
917
918 default:
919 bfa_sm_fault(itnim->bfa, event);
920 }
921}
922
923static void
924bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
925 enum bfa_itnim_event event)
926{
927 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
928 bfa_trc(itnim->bfa, event);
929
930 switch (event) {
931 case BFA_ITNIM_SM_QRESUME:
932 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
933 bfa_itnim_send_fwdelete(itnim);
934 break;
935
936 case BFA_ITNIM_SM_DELETE:
937 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
938 break;
939
940 case BFA_ITNIM_SM_HWFAIL:
941 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
942 bfa_reqq_wcancel(&itnim->reqq_wait);
943 bfa_itnim_offline_cb(itnim);
944 break;
945
946 default:
947 bfa_sm_fault(itnim->bfa, event);
948 }
949}
950
5fbe25c7 951/*
a36c61f9
KG
952 * Offline state.
953 */
954static void
955bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
956{
957 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
958 bfa_trc(itnim->bfa, event);
959
960 switch (event) {
961 case BFA_ITNIM_SM_DELETE:
962 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
963 bfa_itnim_iotov_delete(itnim);
964 bfa_fcpim_delitn(itnim);
965 break;
966
967 case BFA_ITNIM_SM_ONLINE:
968 if (bfa_itnim_send_fwcreate(itnim))
969 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
970 else
971 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
972 break;
973
974 case BFA_ITNIM_SM_HWFAIL:
975 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
976 break;
977
978 default:
979 bfa_sm_fault(itnim->bfa, event);
980 }
981}
982
5fbe25c7 983/*
a36c61f9
KG
984 * IOC h/w failed state.
985 */
986static void
987bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
988 enum bfa_itnim_event event)
989{
990 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
991 bfa_trc(itnim->bfa, event);
992
993 switch (event) {
994 case BFA_ITNIM_SM_DELETE:
995 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
996 bfa_itnim_iotov_delete(itnim);
997 bfa_fcpim_delitn(itnim);
998 break;
999
1000 case BFA_ITNIM_SM_OFFLINE:
1001 bfa_itnim_offline_cb(itnim);
1002 break;
1003
1004 case BFA_ITNIM_SM_ONLINE:
1005 if (bfa_itnim_send_fwcreate(itnim))
1006 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1007 else
1008 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1009 break;
1010
1011 case BFA_ITNIM_SM_HWFAIL:
1012 break;
1013
1014 default:
1015 bfa_sm_fault(itnim->bfa, event);
1016 }
1017}
1018
5fbe25c7 1019/*
a36c61f9
KG
1020 * Itnim is deleted, waiting for firmware response to delete.
1021 */
1022static void
1023bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1024{
1025 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1026 bfa_trc(itnim->bfa, event);
1027
1028 switch (event) {
1029 case BFA_ITNIM_SM_FWRSP:
1030 case BFA_ITNIM_SM_HWFAIL:
1031 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1032 bfa_fcpim_delitn(itnim);
1033 break;
1034
1035 default:
1036 bfa_sm_fault(itnim->bfa, event);
1037 }
1038}
1039
1040static void
1041bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1042 enum bfa_itnim_event event)
1043{
1044 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1045 bfa_trc(itnim->bfa, event);
1046
1047 switch (event) {
1048 case BFA_ITNIM_SM_QRESUME:
1049 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1050 bfa_itnim_send_fwdelete(itnim);
1051 break;
1052
1053 case BFA_ITNIM_SM_HWFAIL:
1054 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1055 bfa_reqq_wcancel(&itnim->reqq_wait);
1056 bfa_fcpim_delitn(itnim);
1057 break;
1058
1059 default:
1060 bfa_sm_fault(itnim->bfa, event);
1061 }
1062}
1063
5fbe25c7 1064/*
a36c61f9
KG
1065 * Initiate cleanup of all IOs on an IOC failure.
1066 */
1067static void
1068bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1069{
1070 struct bfa_tskim_s *tskim;
1071 struct bfa_ioim_s *ioim;
1072 struct list_head *qe, *qen;
1073
1074 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1075 tskim = (struct bfa_tskim_s *) qe;
1076 bfa_tskim_iocdisable(tskim);
1077 }
1078
1079 list_for_each_safe(qe, qen, &itnim->io_q) {
1080 ioim = (struct bfa_ioim_s *) qe;
1081 bfa_ioim_iocdisable(ioim);
1082 }
1083
5fbe25c7 1084 /*
a36c61f9
KG
1085 * For IO request in pending queue, we pretend an early timeout.
1086 */
1087 list_for_each_safe(qe, qen, &itnim->pending_q) {
1088 ioim = (struct bfa_ioim_s *) qe;
1089 bfa_ioim_tov(ioim);
1090 }
1091
1092 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1093 ioim = (struct bfa_ioim_s *) qe;
1094 bfa_ioim_iocdisable(ioim);
1095 }
1096}
1097
5fbe25c7 1098/*
a36c61f9
KG
1099 * IO cleanup completion
1100 */
1101static void
1102bfa_itnim_cleanp_comp(void *itnim_cbarg)
1103{
1104 struct bfa_itnim_s *itnim = itnim_cbarg;
1105
1106 bfa_stats(itnim, cleanup_comps);
1107 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1108}
1109
5fbe25c7 1110/*
a36c61f9
KG
1111 * Initiate cleanup of all IOs.
1112 */
1113static void
1114bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1115{
1116 struct bfa_ioim_s *ioim;
1117 struct bfa_tskim_s *tskim;
1118 struct list_head *qe, *qen;
1119
1120 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1121
1122 list_for_each_safe(qe, qen, &itnim->io_q) {
1123 ioim = (struct bfa_ioim_s *) qe;
1124
5fbe25c7 1125 /*
a36c61f9
KG
1126 * Move IO to a cleanup queue from active queue so that a later
1127 * TM will not pickup this IO.
1128 */
1129 list_del(&ioim->qe);
1130 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1131
1132 bfa_wc_up(&itnim->wc);
1133 bfa_ioim_cleanup(ioim);
1134 }
1135
1136 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1137 tskim = (struct bfa_tskim_s *) qe;
1138 bfa_wc_up(&itnim->wc);
1139 bfa_tskim_cleanup(tskim);
1140 }
1141
1142 bfa_wc_wait(&itnim->wc);
1143}
1144
1145static void
1146__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1147{
1148 struct bfa_itnim_s *itnim = cbarg;
1149
1150 if (complete)
1151 bfa_cb_itnim_online(itnim->ditn);
1152}
1153
1154static void
1155__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1156{
1157 struct bfa_itnim_s *itnim = cbarg;
1158
1159 if (complete)
1160 bfa_cb_itnim_offline(itnim->ditn);
1161}
1162
1163static void
1164__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1165{
1166 struct bfa_itnim_s *itnim = cbarg;
1167
1168 if (complete)
1169 bfa_cb_itnim_sler(itnim->ditn);
1170}
1171
5fbe25c7 1172/*
a36c61f9
KG
1173 * Call to resume any I/O requests waiting for room in request queue.
1174 */
1175static void
1176bfa_itnim_qresume(void *cbarg)
1177{
1178 struct bfa_itnim_s *itnim = cbarg;
1179
1180 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1181}
1182
1183
1184
1185
5fbe25c7 1186/*
a36c61f9
KG
1187 * bfa_itnim_public
1188 */
1189
1190void
1191bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1192{
1193 bfa_wc_down(&itnim->wc);
1194}
1195
1196void
1197bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1198{
1199 bfa_wc_down(&itnim->wc);
1200}
1201
1202void
1203bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1204 u32 *dm_len)
1205{
5fbe25c7 1206 /*
a36c61f9
KG
1207 * ITN memory
1208 */
1209 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1210}
1211
1212void
1213bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1214{
1215 struct bfa_s *bfa = fcpim->bfa;
1216 struct bfa_itnim_s *itnim;
1217 int i, j;
1218
1219 INIT_LIST_HEAD(&fcpim->itnim_q);
1220
1221 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1222 fcpim->itnim_arr = itnim;
1223
1224 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
6a18b167 1225 memset(itnim, 0, sizeof(struct bfa_itnim_s));
a36c61f9
KG
1226 itnim->bfa = bfa;
1227 itnim->fcpim = fcpim;
1228 itnim->reqq = BFA_REQQ_QOS_LO;
1229 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1230 itnim->iotov_active = BFA_FALSE;
1231 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1232
1233 INIT_LIST_HEAD(&itnim->io_q);
1234 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1235 INIT_LIST_HEAD(&itnim->pending_q);
1236 INIT_LIST_HEAD(&itnim->tsk_q);
1237 INIT_LIST_HEAD(&itnim->delay_comp_q);
1238 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1239 itnim->ioprofile.io_latency.min[j] = ~0;
1240 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1241 }
1242
1243 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1244}
1245
1246void
1247bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1248{
1249 bfa_stats(itnim, ioc_disabled);
1250 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1251}
1252
1253static bfa_boolean_t
1254bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1255{
1256 struct bfi_itnim_create_req_s *m;
1257
1258 itnim->msg_no++;
1259
5fbe25c7 1260 /*
a36c61f9
KG
1261 * check for room in queue to send request now
1262 */
1263 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1264 if (!m) {
1265 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1266 return BFA_FALSE;
1267 }
1268
1269 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1270 bfa_lpuid(itnim->bfa));
1271 m->fw_handle = itnim->rport->fw_handle;
1272 m->class = FC_CLASS_3;
1273 m->seq_rec = itnim->seq_rec;
1274 m->msg_no = itnim->msg_no;
1275 bfa_stats(itnim, fw_create);
1276
5fbe25c7 1277 /*
a36c61f9
KG
1278 * queue I/O message to firmware
1279 */
1280 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1281 return BFA_TRUE;
1282}
1283
1284static bfa_boolean_t
1285bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1286{
1287 struct bfi_itnim_delete_req_s *m;
1288
5fbe25c7 1289 /*
a36c61f9
KG
1290 * check for room in queue to send request now
1291 */
1292 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1293 if (!m) {
1294 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1295 return BFA_FALSE;
1296 }
1297
1298 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1299 bfa_lpuid(itnim->bfa));
1300 m->fw_handle = itnim->rport->fw_handle;
1301 bfa_stats(itnim, fw_delete);
1302
5fbe25c7 1303 /*
a36c61f9
KG
1304 * queue I/O message to firmware
1305 */
1306 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1307 return BFA_TRUE;
1308}
1309
5fbe25c7 1310/*
a36c61f9
KG
1311 * Cleanup all pending failed inflight requests.
1312 */
1313static void
1314bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1315{
1316 struct bfa_ioim_s *ioim;
1317 struct list_head *qe, *qen;
1318
1319 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1320 ioim = (struct bfa_ioim_s *)qe;
1321 bfa_ioim_delayed_comp(ioim, iotov);
1322 }
1323}
1324
5fbe25c7 1325/*
a36c61f9
KG
1326 * Start all pending IO requests.
1327 */
1328static void
1329bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1330{
1331 struct bfa_ioim_s *ioim;
1332
1333 bfa_itnim_iotov_stop(itnim);
1334
5fbe25c7 1335 /*
a36c61f9
KG
1336 * Abort all inflight IO requests in the queue
1337 */
1338 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1339
5fbe25c7 1340 /*
a36c61f9
KG
1341 * Start all pending IO requests.
1342 */
1343 while (!list_empty(&itnim->pending_q)) {
1344 bfa_q_deq(&itnim->pending_q, &ioim);
1345 list_add_tail(&ioim->qe, &itnim->io_q);
1346 bfa_ioim_start(ioim);
1347 }
1348}
1349
5fbe25c7 1350/*
a36c61f9
KG
1351 * Fail all pending IO requests
1352 */
1353static void
1354bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1355{
1356 struct bfa_ioim_s *ioim;
1357
5fbe25c7 1358 /*
a36c61f9
KG
1359 * Fail all inflight IO requests in the queue
1360 */
1361 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1362
5fbe25c7 1363 /*
a36c61f9
KG
1364 * Fail any pending IO requests.
1365 */
1366 while (!list_empty(&itnim->pending_q)) {
1367 bfa_q_deq(&itnim->pending_q, &ioim);
1368 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1369 bfa_ioim_tov(ioim);
1370 }
1371}
1372
5fbe25c7 1373/*
a36c61f9
KG
1374 * IO TOV timer callback. Fail any pending IO requests.
1375 */
1376static void
1377bfa_itnim_iotov(void *itnim_arg)
1378{
1379 struct bfa_itnim_s *itnim = itnim_arg;
1380
1381 itnim->iotov_active = BFA_FALSE;
1382
1383 bfa_cb_itnim_tov_begin(itnim->ditn);
1384 bfa_itnim_iotov_cleanup(itnim);
1385 bfa_cb_itnim_tov(itnim->ditn);
1386}
1387
5fbe25c7 1388/*
a36c61f9
KG
1389 * Start IO TOV timer for failing back pending IO requests in offline state.
1390 */
1391static void
1392bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1393{
1394 if (itnim->fcpim->path_tov > 0) {
1395
1396 itnim->iotov_active = BFA_TRUE;
1397 bfa_assert(bfa_itnim_hold_io(itnim));
1398 bfa_timer_start(itnim->bfa, &itnim->timer,
1399 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1400 }
1401}
1402
5fbe25c7 1403/*
a36c61f9
KG
1404 * Stop IO TOV timer.
1405 */
1406static void
1407bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1408{
1409 if (itnim->iotov_active) {
1410 itnim->iotov_active = BFA_FALSE;
1411 bfa_timer_stop(&itnim->timer);
1412 }
1413}
1414
5fbe25c7 1415/*
a36c61f9
KG
1416 * Stop IO TOV timer.
1417 */
1418static void
1419bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1420{
1421 bfa_boolean_t pathtov_active = BFA_FALSE;
1422
1423 if (itnim->iotov_active)
1424 pathtov_active = BFA_TRUE;
1425
1426 bfa_itnim_iotov_stop(itnim);
1427 if (pathtov_active)
1428 bfa_cb_itnim_tov_begin(itnim->ditn);
1429 bfa_itnim_iotov_cleanup(itnim);
1430 if (pathtov_active)
1431 bfa_cb_itnim_tov(itnim->ditn);
1432}
1433
1434static void
1435bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1436{
1437 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1438 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1439 itnim->stats.iocomp_aborted;
1440 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1441 itnim->stats.iocomp_timedout;
1442 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1443 itnim->stats.iocom_sqer_needed;
1444 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1445 itnim->stats.iocom_res_free;
1446 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1447 itnim->stats.iocom_hostabrts;
1448 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1449 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1450 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1451}
1452
1453
1454
5fbe25c7 1455/*
a36c61f9
KG
1456 * bfa_itnim_public
1457 */
1458
5fbe25c7 1459/*
a36c61f9
KG
1460 * Itnim interrupt processing.
1461 */
1462void
1463bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1464{
1465 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1466 union bfi_itnim_i2h_msg_u msg;
1467 struct bfa_itnim_s *itnim;
1468
1469 bfa_trc(bfa, m->mhdr.msg_id);
1470
1471 msg.msg = m;
1472
1473 switch (m->mhdr.msg_id) {
1474 case BFI_ITNIM_I2H_CREATE_RSP:
1475 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1476 msg.create_rsp->bfa_handle);
1477 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1478 bfa_stats(itnim, create_comps);
1479 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1480 break;
1481
1482 case BFI_ITNIM_I2H_DELETE_RSP:
1483 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1484 msg.delete_rsp->bfa_handle);
1485 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1486 bfa_stats(itnim, delete_comps);
1487 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1488 break;
1489
1490 case BFI_ITNIM_I2H_SLER_EVENT:
1491 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1492 msg.sler_event->bfa_handle);
1493 bfa_stats(itnim, sler_events);
1494 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1495 break;
1496
1497 default:
1498 bfa_trc(bfa, m->mhdr.msg_id);
1499 bfa_assert(0);
1500 }
1501}
1502
1503
1504
5fbe25c7 1505/*
a36c61f9
KG
1506 * bfa_itnim_api
1507 */
1508
1509struct bfa_itnim_s *
1510bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1511{
1512 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1513 struct bfa_itnim_s *itnim;
1514
1515 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1516 bfa_assert(itnim->rport == rport);
1517
1518 itnim->ditn = ditn;
1519
1520 bfa_stats(itnim, creates);
1521 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1522
1523 return itnim;
1524}
1525
1526void
1527bfa_itnim_delete(struct bfa_itnim_s *itnim)
1528{
1529 bfa_stats(itnim, deletes);
1530 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1531}
1532
1533void
1534bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1535{
1536 itnim->seq_rec = seq_rec;
1537 bfa_stats(itnim, onlines);
1538 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1539}
1540
1541void
1542bfa_itnim_offline(struct bfa_itnim_s *itnim)
1543{
1544 bfa_stats(itnim, offlines);
1545 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1546}
1547
5fbe25c7 1548/*
a36c61f9
KG
1549 * Return true if itnim is considered offline for holding off IO request.
1550 * IO is not held if itnim is being deleted.
1551 */
1552bfa_boolean_t
1553bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1554{
1555 return itnim->fcpim->path_tov && itnim->iotov_active &&
1556 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1557 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1558 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1559 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1560 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1561 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1562}
1563
1564bfa_status_t
1565bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1566 struct bfa_itnim_ioprofile_s *ioprofile)
1567{
1568 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1569 if (!fcpim->io_profile)
1570 return BFA_STATUS_IOPROFILE_OFF;
1571
1572 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1573 itnim->ioprofile.io_profile_start_time =
1574 bfa_io_profile_start_time(itnim->bfa);
1575 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1576 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1577 *ioprofile = itnim->ioprofile;
1578
1579 return BFA_STATUS_OK;
1580}
1581
a36c61f9
KG
1582void
1583bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1584{
1585 int j;
6a18b167
JH
1586 memset(&itnim->stats, 0, sizeof(itnim->stats));
1587 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
a36c61f9
KG
1588 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1589 itnim->ioprofile.io_latency.min[j] = ~0;
1590}
1591
5fbe25c7 1592/*
a36c61f9
KG
1593 * BFA IO module state machine functions
1594 */
1595
5fbe25c7 1596/*
a36c61f9
KG
1597 * IO is not started (unallocated).
1598 */
1599static void
1600bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1601{
1602 bfa_trc_fp(ioim->bfa, ioim->iotag);
1603 bfa_trc_fp(ioim->bfa, event);
1604
1605 switch (event) {
1606 case BFA_IOIM_SM_START:
1607 if (!bfa_itnim_is_online(ioim->itnim)) {
1608 if (!bfa_itnim_hold_io(ioim->itnim)) {
1609 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1610 list_del(&ioim->qe);
1611 list_add_tail(&ioim->qe,
1612 &ioim->fcpim->ioim_comp_q);
1613 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1614 __bfa_cb_ioim_pathtov, ioim);
1615 } else {
1616 list_del(&ioim->qe);
1617 list_add_tail(&ioim->qe,
1618 &ioim->itnim->pending_q);
1619 }
1620 break;
1621 }
1622
1623 if (ioim->nsges > BFI_SGE_INLINE) {
1624 if (!bfa_ioim_sge_setup(ioim)) {
1625 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1626 return;
1627 }
1628 }
1629
1630 if (!bfa_ioim_send_ioreq(ioim)) {
1631 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1632 break;
1633 }
1634
1635 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1636 break;
1637
1638 case BFA_IOIM_SM_IOTOV:
1639 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1640 bfa_ioim_move_to_comp_q(ioim);
1641 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1642 __bfa_cb_ioim_pathtov, ioim);
1643 break;
1644
1645 case BFA_IOIM_SM_ABORT:
5fbe25c7 1646 /*
a36c61f9
KG
1647 * IO in pending queue can get abort requests. Complete abort
1648 * requests immediately.
1649 */
1650 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1651 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1652 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1653 __bfa_cb_ioim_abort, ioim);
1654 break;
1655
1656 default:
1657 bfa_sm_fault(ioim->bfa, event);
1658 }
1659}
1660
5fbe25c7 1661/*
a36c61f9
KG
1662 * IO is waiting for SG pages.
1663 */
1664static void
1665bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1666{
1667 bfa_trc(ioim->bfa, ioim->iotag);
1668 bfa_trc(ioim->bfa, event);
1669
1670 switch (event) {
1671 case BFA_IOIM_SM_SGALLOCED:
1672 if (!bfa_ioim_send_ioreq(ioim)) {
1673 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1674 break;
1675 }
1676 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1677 break;
1678
1679 case BFA_IOIM_SM_CLEANUP:
1680 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1681 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1682 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1683 ioim);
1684 bfa_ioim_notify_cleanup(ioim);
1685 break;
1686
1687 case BFA_IOIM_SM_ABORT:
1688 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1689 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1690 bfa_ioim_move_to_comp_q(ioim);
1691 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1692 ioim);
1693 break;
1694
1695 case BFA_IOIM_SM_HWFAIL:
1696 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1697 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1698 bfa_ioim_move_to_comp_q(ioim);
1699 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1700 ioim);
1701 break;
1702
1703 default:
1704 bfa_sm_fault(ioim->bfa, event);
1705 }
1706}
1707
5fbe25c7 1708/*
a36c61f9
KG
1709 * IO is active.
1710 */
1711static void
1712bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1713{
1714 bfa_trc_fp(ioim->bfa, ioim->iotag);
1715 bfa_trc_fp(ioim->bfa, event);
1716
1717 switch (event) {
1718 case BFA_IOIM_SM_COMP_GOOD:
1719 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1720 bfa_ioim_move_to_comp_q(ioim);
1721 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1722 __bfa_cb_ioim_good_comp, ioim);
1723 break;
1724
1725 case BFA_IOIM_SM_COMP:
1726 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1727 bfa_ioim_move_to_comp_q(ioim);
1728 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1729 ioim);
1730 break;
1731
1732 case BFA_IOIM_SM_DONE:
1733 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1734 bfa_ioim_move_to_comp_q(ioim);
1735 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1736 ioim);
1737 break;
1738
1739 case BFA_IOIM_SM_ABORT:
1740 ioim->iosp->abort_explicit = BFA_TRUE;
1741 ioim->io_cbfn = __bfa_cb_ioim_abort;
1742
1743 if (bfa_ioim_send_abort(ioim))
1744 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1745 else {
1746 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1747 bfa_stats(ioim->itnim, qwait);
1748 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1749 &ioim->iosp->reqq_wait);
1750 }
1751 break;
1752
1753 case BFA_IOIM_SM_CLEANUP:
1754 ioim->iosp->abort_explicit = BFA_FALSE;
1755 ioim->io_cbfn = __bfa_cb_ioim_failed;
1756
1757 if (bfa_ioim_send_abort(ioim))
1758 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1759 else {
1760 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1761 bfa_stats(ioim->itnim, qwait);
1762 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1763 &ioim->iosp->reqq_wait);
1764 }
1765 break;
1766
1767 case BFA_IOIM_SM_HWFAIL:
1768 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1769 bfa_ioim_move_to_comp_q(ioim);
1770 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1771 ioim);
1772 break;
1773
1774 case BFA_IOIM_SM_SQRETRY:
1775 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1776 /* max retry completed free IO */
1777 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1778 bfa_ioim_move_to_comp_q(ioim);
1779 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1780 __bfa_cb_ioim_failed, ioim);
1781 break;
1782 }
1783 /* waiting for IO tag resource free */
1784 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1785 break;
1786
1787 default:
1788 bfa_sm_fault(ioim->bfa, event);
1789 }
1790}
1791
5fbe25c7 1792/*
a36c61f9
KG
1793* IO is retried with new tag.
1794*/
1795static void
1796bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1797{
1798 bfa_trc_fp(ioim->bfa, ioim->iotag);
1799 bfa_trc_fp(ioim->bfa, event);
1800
1801 switch (event) {
1802 case BFA_IOIM_SM_FREE:
1803 /* abts and rrq done. Now retry the IO with new tag */
1804 if (!bfa_ioim_send_ioreq(ioim)) {
1805 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1806 break;
1807 }
1808 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1809 break;
1810
1811 case BFA_IOIM_SM_CLEANUP:
1812 ioim->iosp->abort_explicit = BFA_FALSE;
1813 ioim->io_cbfn = __bfa_cb_ioim_failed;
1814
1815 if (bfa_ioim_send_abort(ioim))
1816 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1817 else {
1818 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1819 bfa_stats(ioim->itnim, qwait);
1820 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1821 &ioim->iosp->reqq_wait);
1822 }
1823 break;
1824
1825 case BFA_IOIM_SM_HWFAIL:
1826 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1827 bfa_ioim_move_to_comp_q(ioim);
1828 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1829 __bfa_cb_ioim_failed, ioim);
1830 break;
1831
1832 case BFA_IOIM_SM_ABORT:
5fbe25c7 1833 /* in this state IO abort is done.
a36c61f9
KG
1834 * Waiting for IO tag resource free.
1835 */
1836 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1837 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1838 ioim);
1839 break;
1840
1841 default:
1842 bfa_sm_fault(ioim->bfa, event);
1843 }
1844}
1845
5fbe25c7 1846/*
a36c61f9
KG
1847 * IO is being aborted, waiting for completion from firmware.
1848 */
1849static void
1850bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1851{
1852 bfa_trc(ioim->bfa, ioim->iotag);
1853 bfa_trc(ioim->bfa, event);
1854
1855 switch (event) {
1856 case BFA_IOIM_SM_COMP_GOOD:
1857 case BFA_IOIM_SM_COMP:
1858 case BFA_IOIM_SM_DONE:
1859 case BFA_IOIM_SM_FREE:
1860 break;
1861
1862 case BFA_IOIM_SM_ABORT_DONE:
1863 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1864 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1865 ioim);
1866 break;
1867
1868 case BFA_IOIM_SM_ABORT_COMP:
1869 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1870 bfa_ioim_move_to_comp_q(ioim);
1871 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1872 ioim);
1873 break;
1874
1875 case BFA_IOIM_SM_COMP_UTAG:
1876 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1877 bfa_ioim_move_to_comp_q(ioim);
1878 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1879 ioim);
1880 break;
1881
1882 case BFA_IOIM_SM_CLEANUP:
1883 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1884 ioim->iosp->abort_explicit = BFA_FALSE;
1885
1886 if (bfa_ioim_send_abort(ioim))
1887 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1888 else {
1889 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1890 bfa_stats(ioim->itnim, qwait);
1891 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1892 &ioim->iosp->reqq_wait);
1893 }
1894 break;
1895
1896 case BFA_IOIM_SM_HWFAIL:
1897 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1898 bfa_ioim_move_to_comp_q(ioim);
1899 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1900 ioim);
1901 break;
1902
1903 default:
1904 bfa_sm_fault(ioim->bfa, event);
1905 }
1906}
1907
5fbe25c7 1908/*
a36c61f9
KG
1909 * IO is being cleaned up (implicit abort), waiting for completion from
1910 * firmware.
1911 */
1912static void
1913bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1914{
1915 bfa_trc(ioim->bfa, ioim->iotag);
1916 bfa_trc(ioim->bfa, event);
1917
1918 switch (event) {
1919 case BFA_IOIM_SM_COMP_GOOD:
1920 case BFA_IOIM_SM_COMP:
1921 case BFA_IOIM_SM_DONE:
1922 case BFA_IOIM_SM_FREE:
1923 break;
1924
1925 case BFA_IOIM_SM_ABORT:
5fbe25c7 1926 /*
a36c61f9
KG
1927 * IO is already being aborted implicitly
1928 */
1929 ioim->io_cbfn = __bfa_cb_ioim_abort;
1930 break;
1931
1932 case BFA_IOIM_SM_ABORT_DONE:
1933 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1934 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1935 bfa_ioim_notify_cleanup(ioim);
1936 break;
1937
1938 case BFA_IOIM_SM_ABORT_COMP:
1939 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1940 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1941 bfa_ioim_notify_cleanup(ioim);
1942 break;
1943
1944 case BFA_IOIM_SM_COMP_UTAG:
1945 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1946 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1947 bfa_ioim_notify_cleanup(ioim);
1948 break;
1949
1950 case BFA_IOIM_SM_HWFAIL:
1951 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1952 bfa_ioim_move_to_comp_q(ioim);
1953 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1954 ioim);
1955 break;
1956
1957 case BFA_IOIM_SM_CLEANUP:
5fbe25c7 1958 /*
a36c61f9
KG
1959 * IO can be in cleanup state already due to TM command.
1960 * 2nd cleanup request comes from ITN offline event.
1961 */
1962 break;
1963
1964 default:
1965 bfa_sm_fault(ioim->bfa, event);
1966 }
1967}
1968
5fbe25c7 1969/*
a36c61f9
KG
1970 * IO is waiting for room in request CQ
1971 */
1972static void
1973bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1974{
1975 bfa_trc(ioim->bfa, ioim->iotag);
1976 bfa_trc(ioim->bfa, event);
1977
1978 switch (event) {
1979 case BFA_IOIM_SM_QRESUME:
1980 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1981 bfa_ioim_send_ioreq(ioim);
1982 break;
1983
1984 case BFA_IOIM_SM_ABORT:
1985 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1986 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1987 bfa_ioim_move_to_comp_q(ioim);
1988 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1989 ioim);
1990 break;
1991
1992 case BFA_IOIM_SM_CLEANUP:
1993 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1994 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1995 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1996 ioim);
1997 bfa_ioim_notify_cleanup(ioim);
1998 break;
1999
2000 case BFA_IOIM_SM_HWFAIL:
2001 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2002 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2003 bfa_ioim_move_to_comp_q(ioim);
2004 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2005 ioim);
2006 break;
2007
2008 default:
2009 bfa_sm_fault(ioim->bfa, event);
2010 }
2011}
2012
5fbe25c7 2013/*
a36c61f9
KG
2014 * Active IO is being aborted, waiting for room in request CQ.
2015 */
2016static void
2017bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2018{
2019 bfa_trc(ioim->bfa, ioim->iotag);
2020 bfa_trc(ioim->bfa, event);
2021
2022 switch (event) {
2023 case BFA_IOIM_SM_QRESUME:
2024 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2025 bfa_ioim_send_abort(ioim);
2026 break;
2027
2028 case BFA_IOIM_SM_CLEANUP:
2029 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2030 ioim->iosp->abort_explicit = BFA_FALSE;
2031 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2032 break;
2033
2034 case BFA_IOIM_SM_COMP_GOOD:
2035 case BFA_IOIM_SM_COMP:
2036 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2037 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2038 bfa_ioim_move_to_comp_q(ioim);
2039 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2040 ioim);
2041 break;
2042
2043 case BFA_IOIM_SM_DONE:
2044 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2045 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2046 bfa_ioim_move_to_comp_q(ioim);
2047 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2048 ioim);
2049 break;
2050
2051 case BFA_IOIM_SM_HWFAIL:
2052 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2053 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2054 bfa_ioim_move_to_comp_q(ioim);
2055 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2056 ioim);
2057 break;
2058
2059 default:
2060 bfa_sm_fault(ioim->bfa, event);
2061 }
2062}
2063
5fbe25c7 2064/*
a36c61f9
KG
2065 * Active IO is being cleaned up, waiting for room in request CQ.
2066 */
2067static void
2068bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2069{
2070 bfa_trc(ioim->bfa, ioim->iotag);
2071 bfa_trc(ioim->bfa, event);
2072
2073 switch (event) {
2074 case BFA_IOIM_SM_QRESUME:
2075 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2076 bfa_ioim_send_abort(ioim);
2077 break;
2078
2079 case BFA_IOIM_SM_ABORT:
5fbe25c7 2080 /*
a36c61f9
KG
2081 * IO is alraedy being cleaned up implicitly
2082 */
2083 ioim->io_cbfn = __bfa_cb_ioim_abort;
2084 break;
2085
2086 case BFA_IOIM_SM_COMP_GOOD:
2087 case BFA_IOIM_SM_COMP:
2088 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2089 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2090 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2091 bfa_ioim_notify_cleanup(ioim);
2092 break;
2093
2094 case BFA_IOIM_SM_DONE:
2095 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2096 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2097 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2098 bfa_ioim_notify_cleanup(ioim);
2099 break;
2100
2101 case BFA_IOIM_SM_HWFAIL:
2102 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2103 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2104 bfa_ioim_move_to_comp_q(ioim);
2105 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2106 ioim);
2107 break;
2108
2109 default:
2110 bfa_sm_fault(ioim->bfa, event);
2111 }
2112}
2113
5fbe25c7 2114/*
a36c61f9
KG
2115 * IO bfa callback is pending.
2116 */
2117static void
2118bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2119{
2120 bfa_trc_fp(ioim->bfa, ioim->iotag);
2121 bfa_trc_fp(ioim->bfa, event);
2122
2123 switch (event) {
2124 case BFA_IOIM_SM_HCB:
2125 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2126 bfa_ioim_free(ioim);
2127 break;
2128
2129 case BFA_IOIM_SM_CLEANUP:
2130 bfa_ioim_notify_cleanup(ioim);
2131 break;
2132
2133 case BFA_IOIM_SM_HWFAIL:
2134 break;
2135
2136 default:
2137 bfa_sm_fault(ioim->bfa, event);
2138 }
2139}
2140
5fbe25c7 2141/*
a36c61f9
KG
2142 * IO bfa callback is pending. IO resource cannot be freed.
2143 */
2144static void
2145bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2146{
2147 bfa_trc(ioim->bfa, ioim->iotag);
2148 bfa_trc(ioim->bfa, event);
2149
2150 switch (event) {
2151 case BFA_IOIM_SM_HCB:
2152 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2153 list_del(&ioim->qe);
2154 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2155 break;
2156
2157 case BFA_IOIM_SM_FREE:
2158 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2159 break;
2160
2161 case BFA_IOIM_SM_CLEANUP:
2162 bfa_ioim_notify_cleanup(ioim);
2163 break;
2164
2165 case BFA_IOIM_SM_HWFAIL:
2166 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2167 break;
2168
2169 default:
2170 bfa_sm_fault(ioim->bfa, event);
2171 }
2172}
2173
5fbe25c7 2174/*
a36c61f9
KG
2175 * IO is completed, waiting resource free from firmware.
2176 */
2177static void
2178bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2179{
2180 bfa_trc(ioim->bfa, ioim->iotag);
2181 bfa_trc(ioim->bfa, event);
2182
2183 switch (event) {
2184 case BFA_IOIM_SM_FREE:
2185 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2186 bfa_ioim_free(ioim);
2187 break;
2188
2189 case BFA_IOIM_SM_CLEANUP:
2190 bfa_ioim_notify_cleanup(ioim);
2191 break;
2192
2193 case BFA_IOIM_SM_HWFAIL:
2194 break;
2195
2196 default:
2197 bfa_sm_fault(ioim->bfa, event);
2198 }
2199}
2200
2201
a36c61f9
KG
2202static void
2203__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2204{
2205 struct bfa_ioim_s *ioim = cbarg;
2206
2207 if (!complete) {
2208 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2209 return;
2210 }
2211
2212 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2213}
2214
2215static void
2216__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2217{
2218 struct bfa_ioim_s *ioim = cbarg;
2219 struct bfi_ioim_rsp_s *m;
2220 u8 *snsinfo = NULL;
2221 u8 sns_len = 0;
2222 s32 residue = 0;
2223
2224 if (!complete) {
2225 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2226 return;
2227 }
2228
2229 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2230 if (m->io_status == BFI_IOIM_STS_OK) {
5fbe25c7 2231 /*
a36c61f9
KG
2232 * setup sense information, if present
2233 */
2234 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2235 m->sns_len) {
2236 sns_len = m->sns_len;
2237 snsinfo = ioim->iosp->snsinfo;
2238 }
2239
5fbe25c7 2240 /*
a36c61f9
KG
2241 * setup residue value correctly for normal completions
2242 */
2243 if (m->resid_flags == FCP_RESID_UNDER) {
ba816ea8 2244 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2245 bfa_stats(ioim->itnim, iocomp_underrun);
2246 }
2247 if (m->resid_flags == FCP_RESID_OVER) {
ba816ea8 2248 residue = be32_to_cpu(m->residue);
a36c61f9
KG
2249 residue = -residue;
2250 bfa_stats(ioim->itnim, iocomp_overrun);
2251 }
2252 }
2253
2254 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2255 m->scsi_status, sns_len, snsinfo, residue);
2256}
2257
2258static void
2259__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2260{
2261 struct bfa_ioim_s *ioim = cbarg;
2262
2263 if (!complete) {
2264 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2265 return;
2266 }
2267
2268 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2269 0, 0, NULL, 0);
2270}
2271
2272static void
2273__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2274{
2275 struct bfa_ioim_s *ioim = cbarg;
2276
2277 bfa_stats(ioim->itnim, path_tov_expired);
2278 if (!complete) {
2279 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2280 return;
2281 }
2282
2283 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2284 0, 0, NULL, 0);
2285}
2286
2287static void
2288__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2289{
2290 struct bfa_ioim_s *ioim = cbarg;
2291
2292 if (!complete) {
2293 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2294 return;
2295 }
2296
2297 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2298}
2299
2300static void
2301bfa_ioim_sgpg_alloced(void *cbarg)
2302{
2303 struct bfa_ioim_s *ioim = cbarg;
2304
2305 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2306 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2307 bfa_ioim_sgpg_setup(ioim);
2308 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2309}
2310
5fbe25c7 2311/*
a36c61f9
KG
2312 * Send I/O request to firmware.
2313 */
2314static bfa_boolean_t
2315bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2316{
2317 struct bfa_itnim_s *itnim = ioim->itnim;
2318 struct bfi_ioim_req_s *m;
2319 static struct fcp_cmnd_s cmnd_z0 = { 0 };
2320 struct bfi_sge_s *sge;
2321 u32 pgdlen = 0;
2322 u32 fcp_dl;
2323 u64 addr;
2324 struct scatterlist *sg;
2325 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2326
5fbe25c7 2327 /*
a36c61f9
KG
2328 * check for room in queue to send request now
2329 */
2330 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2331 if (!m) {
2332 bfa_stats(ioim->itnim, qwait);
2333 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2334 &ioim->iosp->reqq_wait);
2335 return BFA_FALSE;
2336 }
2337
5fbe25c7 2338 /*
a36c61f9
KG
2339 * build i/o request message next
2340 */
ba816ea8 2341 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9
KG
2342 m->rport_hdl = ioim->itnim->rport->fw_handle;
2343 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2344
5fbe25c7 2345 /*
a36c61f9
KG
2346 * build inline IO SG element here
2347 */
2348 sge = &m->sges[0];
2349 if (ioim->nsges) {
2350 sg = (struct scatterlist *)scsi_sglist(cmnd);
2351 addr = bfa_os_sgaddr(sg_dma_address(sg));
2352 sge->sga = *(union bfi_addr_u *) &addr;
2353 pgdlen = sg_dma_len(sg);
2354 sge->sg_len = pgdlen;
2355 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2356 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2357 bfa_sge_to_be(sge);
2358 sge++;
2359 }
2360
2361 if (ioim->nsges > BFI_SGE_INLINE) {
2362 sge->sga = ioim->sgpg->sgpg_pa;
2363 } else {
2364 sge->sga.a32.addr_lo = 0;
2365 sge->sga.a32.addr_hi = 0;
2366 }
2367 sge->sg_len = pgdlen;
2368 sge->flags = BFI_SGE_PGDLEN;
2369 bfa_sge_to_be(sge);
2370
5fbe25c7 2371 /*
a36c61f9
KG
2372 * set up I/O command parameters
2373 */
6a18b167 2374 m->cmnd = cmnd_z0;
a36c61f9
KG
2375 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2376 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
6a18b167 2377 m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
a36c61f9 2378 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
ba816ea8 2379 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
a36c61f9 2380
5fbe25c7 2381 /*
a36c61f9
KG
2382 * set up I/O message header
2383 */
2384 switch (m->cmnd.iodir) {
2385 case FCP_IODIR_READ:
2386 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2387 bfa_stats(itnim, input_reqs);
2388 ioim->itnim->stats.rd_throughput += fcp_dl;
2389 break;
2390 case FCP_IODIR_WRITE:
2391 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2392 bfa_stats(itnim, output_reqs);
2393 ioim->itnim->stats.wr_throughput += fcp_dl;
2394 break;
2395 case FCP_IODIR_RW:
2396 bfa_stats(itnim, input_reqs);
2397 bfa_stats(itnim, output_reqs);
2398 default:
2399 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2400 }
2401 if (itnim->seq_rec ||
2402 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
2403 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2404
5fbe25c7 2405 /*
a36c61f9
KG
2406 * queue I/O message to firmware
2407 */
2408 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2409 return BFA_TRUE;
2410}
2411
5fbe25c7 2412/*
a36c61f9
KG
2413 * Setup any additional SG pages needed.Inline SG element is setup
2414 * at queuing time.
2415 */
2416static bfa_boolean_t
2417bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
2418{
2419 u16 nsgpgs;
2420
2421 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2422
5fbe25c7 2423 /*
a36c61f9
KG
2424 * allocate SG pages needed
2425 */
2426 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2427 if (!nsgpgs)
2428 return BFA_TRUE;
2429
2430 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2431 != BFA_STATUS_OK) {
2432 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2433 return BFA_FALSE;
2434 }
2435
2436 ioim->nsgpgs = nsgpgs;
2437 bfa_ioim_sgpg_setup(ioim);
2438
2439 return BFA_TRUE;
2440}
2441
2442static void
2443bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
2444{
2445 int sgeid, nsges, i;
2446 struct bfi_sge_s *sge;
2447 struct bfa_sgpg_s *sgpg;
2448 u32 pgcumsz;
2449 u64 addr;
2450 struct scatterlist *sg;
2451 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2452
2453 sgeid = BFI_SGE_INLINE;
2454 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
2455
2456 sg = scsi_sglist(cmnd);
2457 sg = sg_next(sg);
2458
2459 do {
2460 sge = sgpg->sgpg->sges;
2461 nsges = ioim->nsges - sgeid;
2462 if (nsges > BFI_SGPG_DATA_SGES)
2463 nsges = BFI_SGPG_DATA_SGES;
2464
2465 pgcumsz = 0;
2466 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
2467 addr = bfa_os_sgaddr(sg_dma_address(sg));
2468 sge->sga = *(union bfi_addr_u *) &addr;
2469 sge->sg_len = sg_dma_len(sg);
2470 pgcumsz += sge->sg_len;
2471
5fbe25c7 2472 /*
a36c61f9
KG
2473 * set flags
2474 */
2475 if (i < (nsges - 1))
2476 sge->flags = BFI_SGE_DATA;
2477 else if (sgeid < (ioim->nsges - 1))
2478 sge->flags = BFI_SGE_DATA_CPL;
2479 else
2480 sge->flags = BFI_SGE_DATA_LAST;
2481
2482 bfa_sge_to_le(sge);
2483 }
2484
2485 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2486
5fbe25c7 2487 /*
a36c61f9
KG
2488 * set the link element of each page
2489 */
2490 if (sgeid == ioim->nsges) {
2491 sge->flags = BFI_SGE_PGDLEN;
2492 sge->sga.a32.addr_lo = 0;
2493 sge->sga.a32.addr_hi = 0;
2494 } else {
2495 sge->flags = BFI_SGE_LINK;
2496 sge->sga = sgpg->sgpg_pa;
2497 }
2498 sge->sg_len = pgcumsz;
2499
2500 bfa_sge_to_le(sge);
2501 } while (sgeid < ioim->nsges);
2502}
2503
5fbe25c7 2504/*
a36c61f9
KG
2505 * Send I/O abort request to firmware.
2506 */
2507static bfa_boolean_t
2508bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2509{
2510 struct bfi_ioim_abort_req_s *m;
2511 enum bfi_ioim_h2i msgop;
2512
5fbe25c7 2513 /*
a36c61f9
KG
2514 * check for room in queue to send request now
2515 */
2516 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2517 if (!m)
2518 return BFA_FALSE;
2519
5fbe25c7 2520 /*
a36c61f9
KG
2521 * build i/o request message next
2522 */
2523 if (ioim->iosp->abort_explicit)
2524 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2525 else
2526 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2527
2528 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
ba816ea8 2529 m->io_tag = cpu_to_be16(ioim->iotag);
a36c61f9
KG
2530 m->abort_tag = ++ioim->abort_tag;
2531
5fbe25c7 2532 /*
a36c61f9
KG
2533 * queue I/O message to firmware
2534 */
2535 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2536 return BFA_TRUE;
2537}
2538
5fbe25c7 2539/*
a36c61f9
KG
2540 * Call to resume any I/O requests waiting for room in request queue.
2541 */
2542static void
2543bfa_ioim_qresume(void *cbarg)
2544{
2545 struct bfa_ioim_s *ioim = cbarg;
2546
2547 bfa_stats(ioim->itnim, qresumes);
2548 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2549}
2550
2551
2552static void
2553bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2554{
5fbe25c7 2555 /*
a36c61f9
KG
2556 * Move IO from itnim queue to fcpim global queue since itnim will be
2557 * freed.
2558 */
2559 list_del(&ioim->qe);
2560 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2561
2562 if (!ioim->iosp->tskim) {
2563 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2564 bfa_cb_dequeue(&ioim->hcb_qe);
2565 list_del(&ioim->qe);
2566 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2567 }
2568 bfa_itnim_iodone(ioim->itnim);
2569 } else
2570 bfa_tskim_iodone(ioim->iosp->tskim);
2571}
2572
2573static bfa_boolean_t
2574bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2575{
2576 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2577 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2578 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2579 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2580 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2581 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2582 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2583 return BFA_FALSE;
2584
2585 return BFA_TRUE;
2586}
2587
5fbe25c7 2588/*
a36c61f9
KG
2589 * or after the link comes back.
2590 */
2591void
2592bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2593{
5fbe25c7 2594 /*
a36c61f9
KG
2595 * If path tov timer expired, failback with PATHTOV status - these
2596 * IO requests are not normally retried by IO stack.
2597 *
2598 * Otherwise device cameback online and fail it with normal failed
2599 * status so that IO stack retries these failed IO requests.
2600 */
2601 if (iotov)
2602 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2603 else {
2604 ioim->io_cbfn = __bfa_cb_ioim_failed;
2605 bfa_stats(ioim->itnim, iocom_nexus_abort);
2606 }
2607 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2608
5fbe25c7 2609 /*
a36c61f9
KG
2610 * Move IO to fcpim global queue since itnim will be
2611 * freed.
2612 */
2613 list_del(&ioim->qe);
2614 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2615}
2616
2617
5fbe25c7 2618/*
a36c61f9
KG
2619 * Memory allocation and initialization.
2620 */
2621void
2622bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2623{
2624 struct bfa_ioim_s *ioim;
2625 struct bfa_ioim_sp_s *iosp;
2626 u16 i;
2627 u8 *snsinfo;
2628 u32 snsbufsz;
2629
5fbe25c7 2630 /*
a36c61f9
KG
2631 * claim memory first
2632 */
2633 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2634 fcpim->ioim_arr = ioim;
2635 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2636
2637 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2638 fcpim->ioim_sp_arr = iosp;
2639 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2640
5fbe25c7 2641 /*
a36c61f9
KG
2642 * Claim DMA memory for per IO sense data.
2643 */
2644 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2645 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2646 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2647
2648 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2649 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2650 snsinfo = fcpim->snsbase.kva;
2651 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2652
5fbe25c7 2653 /*
a36c61f9
KG
2654 * Initialize ioim free queues
2655 */
2656 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2657 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2658 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2659
2660 for (i = 0; i < fcpim->num_ioim_reqs;
2661 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2662 /*
2663 * initialize IOIM
2664 */
6a18b167 2665 memset(ioim, 0, sizeof(struct bfa_ioim_s));
a36c61f9
KG
2666 ioim->iotag = i;
2667 ioim->bfa = fcpim->bfa;
2668 ioim->fcpim = fcpim;
2669 ioim->iosp = iosp;
2670 iosp->snsinfo = snsinfo;
2671 INIT_LIST_HEAD(&ioim->sgpg_q);
2672 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2673 bfa_ioim_qresume, ioim);
2674 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2675 bfa_ioim_sgpg_alloced, ioim);
2676 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2677
2678 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2679 }
2680}
2681
a36c61f9
KG
2682void
2683bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2684{
2685 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2686 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2687 struct bfa_ioim_s *ioim;
2688 u16 iotag;
2689 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2690
ba816ea8 2691 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
2692
2693 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2694 bfa_assert(ioim->iotag == iotag);
2695
2696 bfa_trc(ioim->bfa, ioim->iotag);
2697 bfa_trc(ioim->bfa, rsp->io_status);
2698 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2699
2700 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
6a18b167 2701 ioim->iosp->comp_rspmsg = *m;
a36c61f9
KG
2702
2703 switch (rsp->io_status) {
2704 case BFI_IOIM_STS_OK:
2705 bfa_stats(ioim->itnim, iocomp_ok);
2706 if (rsp->reuse_io_tag == 0)
2707 evt = BFA_IOIM_SM_DONE;
2708 else
2709 evt = BFA_IOIM_SM_COMP;
2710 break;
2711
2712 case BFI_IOIM_STS_TIMEDOUT:
2713 bfa_stats(ioim->itnim, iocomp_timedout);
2714 case BFI_IOIM_STS_ABORTED:
2715 rsp->io_status = BFI_IOIM_STS_ABORTED;
2716 bfa_stats(ioim->itnim, iocomp_aborted);
2717 if (rsp->reuse_io_tag == 0)
2718 evt = BFA_IOIM_SM_DONE;
2719 else
2720 evt = BFA_IOIM_SM_COMP;
2721 break;
2722
2723 case BFI_IOIM_STS_PROTO_ERR:
2724 bfa_stats(ioim->itnim, iocom_proto_err);
2725 bfa_assert(rsp->reuse_io_tag);
2726 evt = BFA_IOIM_SM_COMP;
2727 break;
2728
2729 case BFI_IOIM_STS_SQER_NEEDED:
2730 bfa_stats(ioim->itnim, iocom_sqer_needed);
2731 bfa_assert(rsp->reuse_io_tag == 0);
2732 evt = BFA_IOIM_SM_SQRETRY;
2733 break;
2734
2735 case BFI_IOIM_STS_RES_FREE:
2736 bfa_stats(ioim->itnim, iocom_res_free);
2737 evt = BFA_IOIM_SM_FREE;
2738 break;
2739
2740 case BFI_IOIM_STS_HOST_ABORTED:
2741 bfa_stats(ioim->itnim, iocom_hostabrts);
2742 if (rsp->abort_tag != ioim->abort_tag) {
2743 bfa_trc(ioim->bfa, rsp->abort_tag);
2744 bfa_trc(ioim->bfa, ioim->abort_tag);
2745 return;
2746 }
2747
2748 if (rsp->reuse_io_tag)
2749 evt = BFA_IOIM_SM_ABORT_COMP;
2750 else
2751 evt = BFA_IOIM_SM_ABORT_DONE;
2752 break;
2753
2754 case BFI_IOIM_STS_UTAG:
2755 bfa_stats(ioim->itnim, iocom_utags);
2756 evt = BFA_IOIM_SM_COMP_UTAG;
2757 break;
2758
2759 default:
2760 bfa_assert(0);
2761 }
2762
2763 bfa_sm_send_event(ioim, evt);
2764}
2765
2766void
2767bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2768{
2769 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2770 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2771 struct bfa_ioim_s *ioim;
2772 u16 iotag;
2773
ba816ea8 2774 iotag = be16_to_cpu(rsp->io_tag);
a36c61f9
KG
2775
2776 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2777 bfa_assert(ioim->iotag == iotag);
2778
2779 bfa_trc_fp(ioim->bfa, ioim->iotag);
2780 bfa_ioim_cb_profile_comp(fcpim, ioim);
2781
2782 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2783}
2784
2785void
2786bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2787{
6a18b167 2788 ioim->start_time = jiffies;
a36c61f9
KG
2789}
2790
2791void
2792bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2793{
2794 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2795 u32 index = bfa_ioim_get_index(fcp_dl);
6a18b167 2796 u64 end_time = jiffies;
a36c61f9
KG
2797 struct bfa_itnim_latency_s *io_lat =
2798 &(ioim->itnim->ioprofile.io_latency);
2799 u32 val = (u32)(end_time - ioim->start_time);
2800
2801 bfa_itnim_ioprofile_update(ioim->itnim, index);
2802
2803 io_lat->count[index]++;
2804 io_lat->min[index] = (io_lat->min[index] < val) ?
2805 io_lat->min[index] : val;
2806 io_lat->max[index] = (io_lat->max[index] > val) ?
2807 io_lat->max[index] : val;
2808 io_lat->avg[index] += val;
2809}
5fbe25c7 2810/*
a36c61f9
KG
2811 * Called by itnim to clean up IO while going offline.
2812 */
2813void
2814bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2815{
2816 bfa_trc(ioim->bfa, ioim->iotag);
2817 bfa_stats(ioim->itnim, io_cleanups);
2818
2819 ioim->iosp->tskim = NULL;
2820 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2821}
2822
2823void
2824bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2825{
2826 bfa_trc(ioim->bfa, ioim->iotag);
2827 bfa_stats(ioim->itnim, io_tmaborts);
2828
2829 ioim->iosp->tskim = tskim;
2830 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2831}
2832
5fbe25c7 2833/*
a36c61f9
KG
2834 * IOC failure handling.
2835 */
2836void
2837bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2838{
2839 bfa_trc(ioim->bfa, ioim->iotag);
2840 bfa_stats(ioim->itnim, io_iocdowns);
2841 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2842}
2843
5fbe25c7 2844/*
a36c61f9
KG
2845 * IO offline TOV popped. Fail the pending IO.
2846 */
2847void
2848bfa_ioim_tov(struct bfa_ioim_s *ioim)
2849{
2850 bfa_trc(ioim->bfa, ioim->iotag);
2851 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2852}
2853
2854
5fbe25c7 2855/*
a36c61f9
KG
2856 * Allocate IOIM resource for initiator mode I/O request.
2857 */
2858struct bfa_ioim_s *
2859bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2860 struct bfa_itnim_s *itnim, u16 nsges)
2861{
2862 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2863 struct bfa_ioim_s *ioim;
2864
5fbe25c7 2865 /*
a36c61f9
KG
2866 * alocate IOIM resource
2867 */
2868 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2869 if (!ioim) {
2870 bfa_stats(itnim, no_iotags);
2871 return NULL;
2872 }
2873
2874 ioim->dio = dio;
2875 ioim->itnim = itnim;
2876 ioim->nsges = nsges;
2877 ioim->nsgpgs = 0;
2878
2879 bfa_stats(itnim, total_ios);
2880 fcpim->ios_active++;
2881
2882 list_add_tail(&ioim->qe, &itnim->io_q);
2883 bfa_trc_fp(ioim->bfa, ioim->iotag);
2884
2885 return ioim;
2886}
2887
2888void
2889bfa_ioim_free(struct bfa_ioim_s *ioim)
2890{
2891 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2892
2893 bfa_trc_fp(ioim->bfa, ioim->iotag);
2894 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2895
2896 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2897 (ioim->nsges > BFI_SGE_INLINE));
2898
2899 if (ioim->nsgpgs > 0)
2900 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2901
2902 bfa_stats(ioim->itnim, io_comps);
2903 fcpim->ios_active--;
2904
2905 list_del(&ioim->qe);
2906 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2907}
2908
2909void
2910bfa_ioim_start(struct bfa_ioim_s *ioim)
2911{
2912 bfa_trc_fp(ioim->bfa, ioim->iotag);
2913
2914 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2915
5fbe25c7 2916 /*
a36c61f9
KG
2917 * Obtain the queue over which this request has to be issued
2918 */
2919 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2920 bfa_cb_ioim_get_reqq(ioim->dio) :
2921 bfa_itnim_get_reqq(ioim);
2922
2923 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2924}
2925
5fbe25c7 2926/*
a36c61f9
KG
2927 * Driver I/O abort request.
2928 */
2929bfa_status_t
2930bfa_ioim_abort(struct bfa_ioim_s *ioim)
2931{
2932
2933 bfa_trc(ioim->bfa, ioim->iotag);
2934
2935 if (!bfa_ioim_is_abortable(ioim))
2936 return BFA_STATUS_FAILED;
2937
2938 bfa_stats(ioim->itnim, io_aborts);
2939 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2940
2941 return BFA_STATUS_OK;
2942}
2943
2944
5fbe25c7 2945/*
a36c61f9
KG
2946 * BFA TSKIM state machine functions
2947 */
2948
5fbe25c7 2949/*
a36c61f9
KG
2950 * Task management command beginning state.
2951 */
2952static void
2953bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2954{
2955 bfa_trc(tskim->bfa, event);
2956
2957 switch (event) {
2958 case BFA_TSKIM_SM_START:
2959 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2960 bfa_tskim_gather_ios(tskim);
2961
5fbe25c7 2962 /*
a36c61f9
KG
2963 * If device is offline, do not send TM on wire. Just cleanup
2964 * any pending IO requests and complete TM request.
2965 */
2966 if (!bfa_itnim_is_online(tskim->itnim)) {
2967 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2968 tskim->tsk_status = BFI_TSKIM_STS_OK;
2969 bfa_tskim_cleanup_ios(tskim);
2970 return;
2971 }
2972
2973 if (!bfa_tskim_send(tskim)) {
2974 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2975 bfa_stats(tskim->itnim, tm_qwait);
2976 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2977 &tskim->reqq_wait);
2978 }
2979 break;
2980
2981 default:
2982 bfa_sm_fault(tskim->bfa, event);
2983 }
2984}
2985
5fbe25c7 2986/*
a36c61f9
KG
2987 * brief
2988 * TM command is active, awaiting completion from firmware to
2989 * cleanup IO requests in TM scope.
2990 */
2991static void
2992bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2993{
2994 bfa_trc(tskim->bfa, event);
2995
2996 switch (event) {
2997 case BFA_TSKIM_SM_DONE:
2998 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2999 bfa_tskim_cleanup_ios(tskim);
3000 break;
3001
3002 case BFA_TSKIM_SM_CLEANUP:
3003 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3004 if (!bfa_tskim_send_abort(tskim)) {
3005 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3006 bfa_stats(tskim->itnim, tm_qwait);
3007 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3008 &tskim->reqq_wait);
3009 }
3010 break;
3011
3012 case BFA_TSKIM_SM_HWFAIL:
3013 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3014 bfa_tskim_iocdisable_ios(tskim);
3015 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3016 break;
3017
3018 default:
3019 bfa_sm_fault(tskim->bfa, event);
3020 }
3021}
3022
5fbe25c7 3023/*
a36c61f9
KG
3024 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3025 * completion event from firmware.
3026 */
3027static void
3028bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3029{
3030 bfa_trc(tskim->bfa, event);
3031
3032 switch (event) {
3033 case BFA_TSKIM_SM_DONE:
5fbe25c7 3034 /*
a36c61f9
KG
3035 * Ignore and wait for ABORT completion from firmware.
3036 */
3037 break;
3038
3039 case BFA_TSKIM_SM_CLEANUP_DONE:
3040 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3041 bfa_tskim_cleanup_ios(tskim);
3042 break;
3043
3044 case BFA_TSKIM_SM_HWFAIL:
3045 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3046 bfa_tskim_iocdisable_ios(tskim);
3047 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3048 break;
3049
3050 default:
3051 bfa_sm_fault(tskim->bfa, event);
3052 }
3053}
3054
3055static void
3056bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3057{
3058 bfa_trc(tskim->bfa, event);
3059
3060 switch (event) {
3061 case BFA_TSKIM_SM_IOS_DONE:
3062 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3063 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3064 break;
3065
3066 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3067 /*
a36c61f9
KG
3068 * Ignore, TM command completed on wire.
3069 * Notify TM conmpletion on IO cleanup completion.
3070 */
3071 break;
3072
3073 case BFA_TSKIM_SM_HWFAIL:
3074 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3075 bfa_tskim_iocdisable_ios(tskim);
3076 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3077 break;
3078
3079 default:
3080 bfa_sm_fault(tskim->bfa, event);
3081 }
3082}
3083
5fbe25c7 3084/*
a36c61f9
KG
3085 * Task management command is waiting for room in request CQ
3086 */
3087static void
3088bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3089{
3090 bfa_trc(tskim->bfa, event);
3091
3092 switch (event) {
3093 case BFA_TSKIM_SM_QRESUME:
3094 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3095 bfa_tskim_send(tskim);
3096 break;
3097
3098 case BFA_TSKIM_SM_CLEANUP:
5fbe25c7 3099 /*
a36c61f9
KG
3100 * No need to send TM on wire since ITN is offline.
3101 */
3102 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3103 bfa_reqq_wcancel(&tskim->reqq_wait);
3104 bfa_tskim_cleanup_ios(tskim);
3105 break;
3106
3107 case BFA_TSKIM_SM_HWFAIL:
3108 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3109 bfa_reqq_wcancel(&tskim->reqq_wait);
3110 bfa_tskim_iocdisable_ios(tskim);
3111 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3112 break;
3113
3114 default:
3115 bfa_sm_fault(tskim->bfa, event);
3116 }
3117}
3118
5fbe25c7 3119/*
a36c61f9
KG
3120 * Task management command is active, awaiting for room in request CQ
3121 * to send clean up request.
3122 */
3123static void
3124bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3125 enum bfa_tskim_event event)
3126{
3127 bfa_trc(tskim->bfa, event);
3128
3129 switch (event) {
3130 case BFA_TSKIM_SM_DONE:
3131 bfa_reqq_wcancel(&tskim->reqq_wait);
5fbe25c7 3132 /*
a36c61f9
KG
3133 *
3134 * Fall through !!!
3135 */
3136
3137 case BFA_TSKIM_SM_QRESUME:
3138 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3139 bfa_tskim_send_abort(tskim);
3140 break;
3141
3142 case BFA_TSKIM_SM_HWFAIL:
3143 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3144 bfa_reqq_wcancel(&tskim->reqq_wait);
3145 bfa_tskim_iocdisable_ios(tskim);
3146 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3147 break;
3148
3149 default:
3150 bfa_sm_fault(tskim->bfa, event);
3151 }
3152}
3153
5fbe25c7 3154/*
a36c61f9
KG
3155 * BFA callback is pending
3156 */
3157static void
3158bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3159{
3160 bfa_trc(tskim->bfa, event);
3161
3162 switch (event) {
3163 case BFA_TSKIM_SM_HCB:
3164 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3165 bfa_tskim_free(tskim);
3166 break;
3167
3168 case BFA_TSKIM_SM_CLEANUP:
3169 bfa_tskim_notify_comp(tskim);
3170 break;
3171
3172 case BFA_TSKIM_SM_HWFAIL:
3173 break;
3174
3175 default:
3176 bfa_sm_fault(tskim->bfa, event);
3177 }
3178}
3179
3180
a36c61f9
KG
3181static void
3182__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3183{
3184 struct bfa_tskim_s *tskim = cbarg;
3185
3186 if (!complete) {
3187 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3188 return;
3189 }
3190
3191 bfa_stats(tskim->itnim, tm_success);
3192 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3193}
3194
3195static void
3196__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3197{
3198 struct bfa_tskim_s *tskim = cbarg;
3199
3200 if (!complete) {
3201 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3202 return;
3203 }
3204
3205 bfa_stats(tskim->itnim, tm_failures);
3206 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3207 BFI_TSKIM_STS_FAILED);
3208}
3209
3210static bfa_boolean_t
3211bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3212{
3213 switch (tskim->tm_cmnd) {
3214 case FCP_TM_TARGET_RESET:
3215 return BFA_TRUE;
3216
3217 case FCP_TM_ABORT_TASK_SET:
3218 case FCP_TM_CLEAR_TASK_SET:
3219 case FCP_TM_LUN_RESET:
3220 case FCP_TM_CLEAR_ACA:
3221 return (tskim->lun == lun);
3222
3223 default:
3224 bfa_assert(0);
3225 }
3226
3227 return BFA_FALSE;
3228}
3229
5fbe25c7 3230/*
a36c61f9
KG
3231 * Gather affected IO requests and task management commands.
3232 */
3233static void
3234bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3235{
3236 struct bfa_itnim_s *itnim = tskim->itnim;
3237 struct bfa_ioim_s *ioim;
3238 struct list_head *qe, *qen;
3239
3240 INIT_LIST_HEAD(&tskim->io_q);
3241
5fbe25c7 3242 /*
a36c61f9
KG
3243 * Gather any active IO requests first.
3244 */
3245 list_for_each_safe(qe, qen, &itnim->io_q) {
3246 ioim = (struct bfa_ioim_s *) qe;
3247 if (bfa_tskim_match_scope
3248 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3249 list_del(&ioim->qe);
3250 list_add_tail(&ioim->qe, &tskim->io_q);
3251 }
3252 }
3253
5fbe25c7 3254 /*
a36c61f9
KG
3255 * Failback any pending IO requests immediately.
3256 */
3257 list_for_each_safe(qe, qen, &itnim->pending_q) {
3258 ioim = (struct bfa_ioim_s *) qe;
3259 if (bfa_tskim_match_scope
3260 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3261 list_del(&ioim->qe);
3262 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3263 bfa_ioim_tov(ioim);
3264 }
3265 }
3266}
3267
5fbe25c7 3268/*
a36c61f9
KG
3269 * IO cleanup completion
3270 */
3271static void
3272bfa_tskim_cleanp_comp(void *tskim_cbarg)
3273{
3274 struct bfa_tskim_s *tskim = tskim_cbarg;
3275
3276 bfa_stats(tskim->itnim, tm_io_comps);
3277 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3278}
3279
5fbe25c7 3280/*
a36c61f9
KG
3281 * Gather affected IO requests and task management commands.
3282 */
3283static void
3284bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3285{
3286 struct bfa_ioim_s *ioim;
3287 struct list_head *qe, *qen;
3288
3289 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3290
3291 list_for_each_safe(qe, qen, &tskim->io_q) {
3292 ioim = (struct bfa_ioim_s *) qe;
3293 bfa_wc_up(&tskim->wc);
3294 bfa_ioim_cleanup_tm(ioim, tskim);
3295 }
3296
3297 bfa_wc_wait(&tskim->wc);
3298}
3299
5fbe25c7 3300/*
a36c61f9
KG
3301 * Send task management request to firmware.
3302 */
3303static bfa_boolean_t
3304bfa_tskim_send(struct bfa_tskim_s *tskim)
3305{
3306 struct bfa_itnim_s *itnim = tskim->itnim;
3307 struct bfi_tskim_req_s *m;
3308
5fbe25c7 3309 /*
a36c61f9
KG
3310 * check for room in queue to send request now
3311 */
3312 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3313 if (!m)
3314 return BFA_FALSE;
3315
5fbe25c7 3316 /*
a36c61f9
KG
3317 * build i/o request message next
3318 */
3319 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3320 bfa_lpuid(tskim->bfa));
3321
ba816ea8 3322 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9
KG
3323 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3324 m->t_secs = tskim->tsecs;
3325 m->lun = tskim->lun;
3326 m->tm_flags = tskim->tm_cmnd;
3327
5fbe25c7 3328 /*
a36c61f9
KG
3329 * queue I/O message to firmware
3330 */
3331 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3332 return BFA_TRUE;
3333}
3334
5fbe25c7 3335/*
a36c61f9
KG
3336 * Send abort request to cleanup an active TM to firmware.
3337 */
3338static bfa_boolean_t
3339bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3340{
3341 struct bfa_itnim_s *itnim = tskim->itnim;
3342 struct bfi_tskim_abortreq_s *m;
3343
5fbe25c7 3344 /*
a36c61f9
KG
3345 * check for room in queue to send request now
3346 */
3347 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3348 if (!m)
3349 return BFA_FALSE;
3350
5fbe25c7 3351 /*
a36c61f9
KG
3352 * build i/o request message next
3353 */
3354 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3355 bfa_lpuid(tskim->bfa));
3356
ba816ea8 3357 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
a36c61f9 3358
5fbe25c7 3359 /*
a36c61f9
KG
3360 * queue I/O message to firmware
3361 */
3362 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3363 return BFA_TRUE;
3364}
3365
5fbe25c7 3366/*
a36c61f9
KG
3367 * Call to resume task management cmnd waiting for room in request queue.
3368 */
3369static void
3370bfa_tskim_qresume(void *cbarg)
3371{
3372 struct bfa_tskim_s *tskim = cbarg;
3373
3374 bfa_stats(tskim->itnim, tm_qresumes);
3375 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3376}
3377
5fbe25c7 3378/*
a36c61f9
KG
3379 * Cleanup IOs associated with a task mangement command on IOC failures.
3380 */
3381static void
3382bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3383{
3384 struct bfa_ioim_s *ioim;
3385 struct list_head *qe, *qen;
3386
3387 list_for_each_safe(qe, qen, &tskim->io_q) {
3388 ioim = (struct bfa_ioim_s *) qe;
3389 bfa_ioim_iocdisable(ioim);
3390 }
3391}
3392
3393
5fbe25c7 3394/*
a36c61f9
KG
3395 * Notification on completions from related ioim.
3396 */
3397void
3398bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3399{
3400 bfa_wc_down(&tskim->wc);
3401}
3402
5fbe25c7 3403/*
a36c61f9
KG
3404 * Handle IOC h/w failure notification from itnim.
3405 */
3406void
3407bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3408{
3409 tskim->notify = BFA_FALSE;
3410 bfa_stats(tskim->itnim, tm_iocdowns);
3411 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3412}
3413
5fbe25c7 3414/*
a36c61f9
KG
3415 * Cleanup TM command and associated IOs as part of ITNIM offline.
3416 */
3417void
3418bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3419{
3420 tskim->notify = BFA_TRUE;
3421 bfa_stats(tskim->itnim, tm_cleanups);
3422 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3423}
3424
5fbe25c7 3425/*
a36c61f9
KG
3426 * Memory allocation and initialization.
3427 */
3428void
3429bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3430{
3431 struct bfa_tskim_s *tskim;
3432 u16 i;
3433
3434 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3435
3436 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3437 fcpim->tskim_arr = tskim;
3438
3439 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3440 /*
3441 * initialize TSKIM
3442 */
6a18b167 3443 memset(tskim, 0, sizeof(struct bfa_tskim_s));
a36c61f9
KG
3444 tskim->tsk_tag = i;
3445 tskim->bfa = fcpim->bfa;
3446 tskim->fcpim = fcpim;
3447 tskim->notify = BFA_FALSE;
3448 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3449 tskim);
3450 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3451
3452 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3453 }
3454
3455 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3456}
3457
a36c61f9
KG
3458void
3459bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3460{
3461 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3462 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3463 struct bfa_tskim_s *tskim;
ba816ea8 3464 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
a36c61f9
KG
3465
3466 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3467 bfa_assert(tskim->tsk_tag == tsk_tag);
3468
3469 tskim->tsk_status = rsp->tsk_status;
3470
5fbe25c7 3471 /*
a36c61f9
KG
3472 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3473 * requests. All other statuses are for normal completions.
3474 */
3475 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3476 bfa_stats(tskim->itnim, tm_cleanup_comps);
3477 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3478 } else {
3479 bfa_stats(tskim->itnim, tm_fw_rsps);
3480 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3481 }
3482}
3483
3484
a36c61f9
KG
3485struct bfa_tskim_s *
3486bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3487{
3488 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3489 struct bfa_tskim_s *tskim;
3490
3491 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3492
3493 if (tskim)
3494 tskim->dtsk = dtsk;
3495
3496 return tskim;
3497}
3498
3499void
3500bfa_tskim_free(struct bfa_tskim_s *tskim)
3501{
3502 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3503 list_del(&tskim->qe);
3504 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3505}
3506
5fbe25c7 3507/*
a36c61f9
KG
3508 * Start a task management command.
3509 *
3510 * @param[in] tskim BFA task management command instance
3511 * @param[in] itnim i-t nexus for the task management command
3512 * @param[in] lun lun, if applicable
3513 * @param[in] tm_cmnd Task management command code.
3514 * @param[in] t_secs Timeout in seconds
3515 *
3516 * @return None.
3517 */
3518void
3519bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
3520 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3521{
3522 tskim->itnim = itnim;
3523 tskim->lun = lun;
3524 tskim->tm_cmnd = tm_cmnd;
3525 tskim->tsecs = tsecs;
3526 tskim->notify = BFA_FALSE;
3527 bfa_stats(itnim, tm_cmnds);
3528
3529 list_add_tail(&tskim->qe, &itnim->tsk_q);
3530 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3531}