blob: 314c931229806f821a1487712ec135b212f2c6ee [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070020
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070025 * BFA ITNIM Related definitions
26 */
27static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
28
29#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
30 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
31
32#define bfa_fcpim_additn(__itnim) \
33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
34#define bfa_fcpim_delitn(__itnim) do { \
35 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
36 bfa_itnim_update_del_itn_stats(__itnim); \
37 list_del(&(__itnim)->qe); \
38 bfa_assert(list_empty(&(__itnim)->io_q)); \
39 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
40 bfa_assert(list_empty(&(__itnim)->pending_q)); \
41} while (0)
42
43#define bfa_itnim_online_cb(__itnim) do { \
44 if ((__itnim)->bfa->fcs) \
45 bfa_cb_itnim_online((__itnim)->ditn); \
46 else { \
47 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
48 __bfa_cb_itnim_online, (__itnim)); \
49 } \
50} while (0)
51
52#define bfa_itnim_offline_cb(__itnim) do { \
53 if ((__itnim)->bfa->fcs) \
54 bfa_cb_itnim_offline((__itnim)->ditn); \
55 else { \
56 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
57 __bfa_cb_itnim_offline, (__itnim)); \
58 } \
59} while (0)
60
61#define bfa_itnim_sler_cb(__itnim) do { \
62 if ((__itnim)->bfa->fcs) \
63 bfa_cb_itnim_sler((__itnim)->ditn); \
64 else { \
65 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
66 __bfa_cb_itnim_sler, (__itnim)); \
67 } \
68} while (0)
69
Jing Huang5fbe25c2010-10-18 17:17:23 -070070/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -080071 * itnim state machine event
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070072 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070073enum bfa_itnim_event {
74 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
75 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
76 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
77 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
78 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
79 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
80 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
81 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
82 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
83};
84
Jing Huang5fbe25c2010-10-18 17:17:23 -070085/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070086 * BFA IOIM related definitions
87 */
88#define bfa_ioim_move_to_comp_q(__ioim) do { \
89 list_del(&(__ioim)->qe); \
90 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
91} while (0)
92
93
94#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
95 if ((__fcpim)->profile_comp) \
96 (__fcpim)->profile_comp(__ioim); \
97} while (0)
98
99#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
100 if ((__fcpim)->profile_start) \
101 (__fcpim)->profile_start(__ioim); \
102} while (0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700103
Jing Huang5fbe25c2010-10-18 17:17:23 -0700104/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700105 * IO state machine events
106 */
107enum bfa_ioim_event {
108 BFA_IOIM_SM_START = 1, /* io start request from host */
109 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
110 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
111 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
112 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
113 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
114 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
115 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
116 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
117 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
118 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
119 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
120 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
121 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
122 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
123 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
124 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
125 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
126};
127
128
Jing Huang5fbe25c2010-10-18 17:17:23 -0700129/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700130 * BFA TSKIM related definitions
131 */
132
Jing Huang5fbe25c2010-10-18 17:17:23 -0700133/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700134 * task management completion handling
135 */
136#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
137 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
138 bfa_tskim_notify_comp(__tskim); \
139} while (0)
140
141#define bfa_tskim_notify_comp(__tskim) do { \
142 if ((__tskim)->notify) \
143 bfa_itnim_tskdone((__tskim)->itnim); \
144} while (0)
145
146
147enum bfa_tskim_event {
148 BFA_TSKIM_SM_START = 1, /* TM command start */
149 BFA_TSKIM_SM_DONE = 2, /* TM completion */
150 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
151 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
152 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
153 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
154 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
155 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
156};
157
Jing Huang5fbe25c2010-10-18 17:17:23 -0700158/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700159 * forward declaration for BFA ITNIM functions
160 */
161static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
162static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
163static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
164static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
165static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
166static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
167static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
168static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
169static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
170static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
171static void bfa_itnim_iotov(void *itnim_arg);
172static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
173static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
174static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
175
Jing Huang5fbe25c2010-10-18 17:17:23 -0700176/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700177 * forward declaration of ITNIM state machine
178 */
179static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
180 enum bfa_itnim_event event);
181static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
182 enum bfa_itnim_event event);
183static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
184 enum bfa_itnim_event event);
185static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
186 enum bfa_itnim_event event);
187static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
188 enum bfa_itnim_event event);
189static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
190 enum bfa_itnim_event event);
191static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
192 enum bfa_itnim_event event);
193static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
194 enum bfa_itnim_event event);
195static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
196 enum bfa_itnim_event event);
197static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
198 enum bfa_itnim_event event);
199static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
200 enum bfa_itnim_event event);
201static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
202 enum bfa_itnim_event event);
203static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
204 enum bfa_itnim_event event);
205static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
206 enum bfa_itnim_event event);
207static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
208 enum bfa_itnim_event event);
209
Jing Huang5fbe25c2010-10-18 17:17:23 -0700210/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700211 * forward declaration for BFA IOIM functions
212 */
213static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -0800214static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700215static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
216static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
217static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
218static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
219static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
220static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
221static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
222static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
223
Jing Huang5fbe25c2010-10-18 17:17:23 -0700224/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700225 * forward declaration of BFA IO state machine
226 */
227static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
228 enum bfa_ioim_event event);
229static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
230 enum bfa_ioim_event event);
231static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
232 enum bfa_ioim_event event);
233static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
234 enum bfa_ioim_event event);
235static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event);
237static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event);
239static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700251/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700252 * forward declaration for BFA TSKIM functions
253 */
254static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
255static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
256static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
Maggie Zhangf3148782010-12-09 19:11:39 -0800257 struct scsi_lun lun);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700258static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
259static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
260static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
261static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
262static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
263static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
264
Jing Huang5fbe25c2010-10-18 17:17:23 -0700265/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700266 * forward declaration of BFA TSKIM state machine
267 */
268static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
269 enum bfa_tskim_event event);
270static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
271 enum bfa_tskim_event event);
272static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
273 enum bfa_tskim_event event);
274static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
275 enum bfa_tskim_event event);
276static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
277 enum bfa_tskim_event event);
278static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event);
280static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700282/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800283 * BFA FCP Initiator Mode module
Jing Huang7725ccf2009-09-23 17:46:15 -0700284 */
285
Jing Huang5fbe25c2010-10-18 17:17:23 -0700286/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800287 * Compute and return memory needed by FCP(im) module.
Jing Huang7725ccf2009-09-23 17:46:15 -0700288 */
289static void
290bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
291 u32 *dm_len)
292{
293 bfa_itnim_meminfo(cfg, km_len, dm_len);
294
Jing Huang5fbe25c2010-10-18 17:17:23 -0700295 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700296 * IO memory
297 */
298 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
299 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
300 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
301 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
302
303 *km_len += cfg->fwcfg.num_ioim_reqs *
304 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
305
306 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
307
Jing Huang5fbe25c2010-10-18 17:17:23 -0700308 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700309 * task management command memory
310 */
311 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
312 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
313 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
314}
315
316
317static void
318bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700319 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
Jing Huang7725ccf2009-09-23 17:46:15 -0700320{
321 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
322
323 bfa_trc(bfa, cfg->drvcfg.path_tov);
324 bfa_trc(bfa, cfg->fwcfg.num_rports);
325 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
326 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
327
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700328 fcpim->bfa = bfa;
329 fcpim->num_itnims = cfg->fwcfg.num_rports;
Jing Huang7725ccf2009-09-23 17:46:15 -0700330 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
331 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700332 fcpim->path_tov = cfg->drvcfg.path_tov;
333 fcpim->delay_comp = cfg->drvcfg.delay_comp;
334 fcpim->profile_comp = NULL;
335 fcpim->profile_start = NULL;
Jing Huang7725ccf2009-09-23 17:46:15 -0700336
337 bfa_itnim_attach(fcpim, meminfo);
338 bfa_tskim_attach(fcpim, meminfo);
339 bfa_ioim_attach(fcpim, meminfo);
340}
341
342static void
Jing Huang7725ccf2009-09-23 17:46:15 -0700343bfa_fcpim_detach(struct bfa_s *bfa)
344{
Jing Huang7725ccf2009-09-23 17:46:15 -0700345}
346
347static void
348bfa_fcpim_start(struct bfa_s *bfa)
349{
350}
351
352static void
353bfa_fcpim_stop(struct bfa_s *bfa)
354{
355}
356
357static void
358bfa_fcpim_iocdisable(struct bfa_s *bfa)
359{
360 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
361 struct bfa_itnim_s *itnim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700362 struct list_head *qe, *qen;
Jing Huang7725ccf2009-09-23 17:46:15 -0700363
364 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
365 itnim = (struct bfa_itnim_s *) qe;
366 bfa_itnim_iocdisable(itnim);
367 }
368}
369
370void
371bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
372{
373 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
374
375 fcpim->path_tov = path_tov * 1000;
376 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
377 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
378}
379
380u16
381bfa_fcpim_path_tov_get(struct bfa_s *bfa)
382{
383 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
384
Jing Huangf8ceafd2009-09-25 12:29:54 -0700385 return fcpim->path_tov / 1000;
Jing Huang7725ccf2009-09-23 17:46:15 -0700386}
387
Jing Huang7725ccf2009-09-23 17:46:15 -0700388u16
389bfa_fcpim_qdepth_get(struct bfa_s *bfa)
390{
391 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
392
Jing Huangf8ceafd2009-09-25 12:29:54 -0700393 return fcpim->q_depth;
Jing Huang7725ccf2009-09-23 17:46:15 -0700394}
395
Jing Huang5fbe25c2010-10-18 17:17:23 -0700396/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700397 * BFA ITNIM module state machine functions
398 */
399
Jing Huang5fbe25c2010-10-18 17:17:23 -0700400/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800401 * Beginning/unallocated state - no events expected.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700402 */
403static void
404bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
405{
406 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
407 bfa_trc(itnim->bfa, event);
408
409 switch (event) {
410 case BFA_ITNIM_SM_CREATE:
411 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
412 itnim->is_online = BFA_FALSE;
413 bfa_fcpim_additn(itnim);
414 break;
415
416 default:
417 bfa_sm_fault(itnim->bfa, event);
418 }
419}
420
Jing Huang5fbe25c2010-10-18 17:17:23 -0700421/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800422 * Beginning state, only online event expected.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700423 */
424static void
425bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
426{
427 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
428 bfa_trc(itnim->bfa, event);
429
430 switch (event) {
431 case BFA_ITNIM_SM_ONLINE:
432 if (bfa_itnim_send_fwcreate(itnim))
433 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
434 else
435 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
436 break;
437
438 case BFA_ITNIM_SM_DELETE:
439 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
440 bfa_fcpim_delitn(itnim);
441 break;
442
443 case BFA_ITNIM_SM_HWFAIL:
444 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
445 break;
446
447 default:
448 bfa_sm_fault(itnim->bfa, event);
449 }
450}
451
Jing Huang5fbe25c2010-10-18 17:17:23 -0700452/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700453 * Waiting for itnim create response from firmware.
454 */
455static void
456bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
457{
458 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
459 bfa_trc(itnim->bfa, event);
460
461 switch (event) {
462 case BFA_ITNIM_SM_FWRSP:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
464 itnim->is_online = BFA_TRUE;
465 bfa_itnim_iotov_online(itnim);
466 bfa_itnim_online_cb(itnim);
467 break;
468
469 case BFA_ITNIM_SM_DELETE:
470 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
471 break;
472
473 case BFA_ITNIM_SM_OFFLINE:
474 if (bfa_itnim_send_fwdelete(itnim))
475 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
476 else
477 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
478 break;
479
480 case BFA_ITNIM_SM_HWFAIL:
481 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
482 break;
483
484 default:
485 bfa_sm_fault(itnim->bfa, event);
486 }
487}
488
489static void
490bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
491 enum bfa_itnim_event event)
492{
493 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
494 bfa_trc(itnim->bfa, event);
495
496 switch (event) {
497 case BFA_ITNIM_SM_QRESUME:
498 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
499 bfa_itnim_send_fwcreate(itnim);
500 break;
501
502 case BFA_ITNIM_SM_DELETE:
503 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
504 bfa_reqq_wcancel(&itnim->reqq_wait);
505 bfa_fcpim_delitn(itnim);
506 break;
507
508 case BFA_ITNIM_SM_OFFLINE:
509 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
510 bfa_reqq_wcancel(&itnim->reqq_wait);
511 bfa_itnim_offline_cb(itnim);
512 break;
513
514 case BFA_ITNIM_SM_HWFAIL:
515 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
516 bfa_reqq_wcancel(&itnim->reqq_wait);
517 break;
518
519 default:
520 bfa_sm_fault(itnim->bfa, event);
521 }
522}
523
Jing Huang5fbe25c2010-10-18 17:17:23 -0700524/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800525 * Waiting for itnim create response from firmware, a delete is pending.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700526 */
527static void
528bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
529 enum bfa_itnim_event event)
530{
531 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
532 bfa_trc(itnim->bfa, event);
533
534 switch (event) {
535 case BFA_ITNIM_SM_FWRSP:
536 if (bfa_itnim_send_fwdelete(itnim))
537 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
538 else
539 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
540 break;
541
542 case BFA_ITNIM_SM_HWFAIL:
543 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
544 bfa_fcpim_delitn(itnim);
545 break;
546
547 default:
548 bfa_sm_fault(itnim->bfa, event);
549 }
550}
551
Jing Huang5fbe25c2010-10-18 17:17:23 -0700552/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800553 * Online state - normal parking state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700554 */
555static void
556bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
557{
558 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
559 bfa_trc(itnim->bfa, event);
560
561 switch (event) {
562 case BFA_ITNIM_SM_OFFLINE:
563 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
564 itnim->is_online = BFA_FALSE;
565 bfa_itnim_iotov_start(itnim);
566 bfa_itnim_cleanup(itnim);
567 break;
568
569 case BFA_ITNIM_SM_DELETE:
570 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
571 itnim->is_online = BFA_FALSE;
572 bfa_itnim_cleanup(itnim);
573 break;
574
575 case BFA_ITNIM_SM_SLER:
576 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
577 itnim->is_online = BFA_FALSE;
578 bfa_itnim_iotov_start(itnim);
579 bfa_itnim_sler_cb(itnim);
580 break;
581
582 case BFA_ITNIM_SM_HWFAIL:
583 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
584 itnim->is_online = BFA_FALSE;
585 bfa_itnim_iotov_start(itnim);
586 bfa_itnim_iocdisable_cleanup(itnim);
587 break;
588
589 default:
590 bfa_sm_fault(itnim->bfa, event);
591 }
592}
593
Jing Huang5fbe25c2010-10-18 17:17:23 -0700594/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800595 * Second level error recovery need.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700596 */
597static void
598bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
599{
600 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
601 bfa_trc(itnim->bfa, event);
602
603 switch (event) {
604 case BFA_ITNIM_SM_OFFLINE:
605 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
606 bfa_itnim_cleanup(itnim);
607 break;
608
609 case BFA_ITNIM_SM_DELETE:
610 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
611 bfa_itnim_cleanup(itnim);
612 bfa_itnim_iotov_delete(itnim);
613 break;
614
615 case BFA_ITNIM_SM_HWFAIL:
616 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
617 bfa_itnim_iocdisable_cleanup(itnim);
618 break;
619
620 default:
621 bfa_sm_fault(itnim->bfa, event);
622 }
623}
624
Jing Huang5fbe25c2010-10-18 17:17:23 -0700625/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800626 * Going offline. Waiting for active IO cleanup.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700627 */
628static void
629bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
630 enum bfa_itnim_event event)
631{
632 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
633 bfa_trc(itnim->bfa, event);
634
635 switch (event) {
636 case BFA_ITNIM_SM_CLEANUP:
637 if (bfa_itnim_send_fwdelete(itnim))
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
639 else
640 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
641 break;
642
643 case BFA_ITNIM_SM_DELETE:
644 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
645 bfa_itnim_iotov_delete(itnim);
646 break;
647
648 case BFA_ITNIM_SM_HWFAIL:
649 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
650 bfa_itnim_iocdisable_cleanup(itnim);
651 bfa_itnim_offline_cb(itnim);
652 break;
653
654 case BFA_ITNIM_SM_SLER:
655 break;
656
657 default:
658 bfa_sm_fault(itnim->bfa, event);
659 }
660}
661
Jing Huang5fbe25c2010-10-18 17:17:23 -0700662/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800663 * Deleting itnim. Waiting for active IO cleanup.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700664 */
665static void
666bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
667 enum bfa_itnim_event event)
668{
669 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
670 bfa_trc(itnim->bfa, event);
671
672 switch (event) {
673 case BFA_ITNIM_SM_CLEANUP:
674 if (bfa_itnim_send_fwdelete(itnim))
675 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
676 else
677 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
678 break;
679
680 case BFA_ITNIM_SM_HWFAIL:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
682 bfa_itnim_iocdisable_cleanup(itnim);
683 break;
684
685 default:
686 bfa_sm_fault(itnim->bfa, event);
687 }
688}
689
Jing Huang5fbe25c2010-10-18 17:17:23 -0700690/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700691 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
692 */
693static void
694bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
695{
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event);
698
699 switch (event) {
700 case BFA_ITNIM_SM_FWRSP:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
702 bfa_itnim_offline_cb(itnim);
703 break;
704
705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
707 break;
708
709 case BFA_ITNIM_SM_HWFAIL:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
711 bfa_itnim_offline_cb(itnim);
712 break;
713
714 default:
715 bfa_sm_fault(itnim->bfa, event);
716 }
717}
718
719static void
720bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
721 enum bfa_itnim_event event)
722{
723 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
724 bfa_trc(itnim->bfa, event);
725
726 switch (event) {
727 case BFA_ITNIM_SM_QRESUME:
728 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
729 bfa_itnim_send_fwdelete(itnim);
730 break;
731
732 case BFA_ITNIM_SM_DELETE:
733 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
734 break;
735
736 case BFA_ITNIM_SM_HWFAIL:
737 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
738 bfa_reqq_wcancel(&itnim->reqq_wait);
739 bfa_itnim_offline_cb(itnim);
740 break;
741
742 default:
743 bfa_sm_fault(itnim->bfa, event);
744 }
745}
746
Jing Huang5fbe25c2010-10-18 17:17:23 -0700747/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800748 * Offline state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700749 */
750static void
751bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
752{
753 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
754 bfa_trc(itnim->bfa, event);
755
756 switch (event) {
757 case BFA_ITNIM_SM_DELETE:
758 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
759 bfa_itnim_iotov_delete(itnim);
760 bfa_fcpim_delitn(itnim);
761 break;
762
763 case BFA_ITNIM_SM_ONLINE:
764 if (bfa_itnim_send_fwcreate(itnim))
765 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
766 else
767 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
768 break;
769
770 case BFA_ITNIM_SM_HWFAIL:
771 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
772 break;
773
774 default:
775 bfa_sm_fault(itnim->bfa, event);
776 }
777}
778
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700779static void
780bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
781 enum bfa_itnim_event event)
782{
783 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
784 bfa_trc(itnim->bfa, event);
785
786 switch (event) {
787 case BFA_ITNIM_SM_DELETE:
788 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
789 bfa_itnim_iotov_delete(itnim);
790 bfa_fcpim_delitn(itnim);
791 break;
792
793 case BFA_ITNIM_SM_OFFLINE:
794 bfa_itnim_offline_cb(itnim);
795 break;
796
797 case BFA_ITNIM_SM_ONLINE:
798 if (bfa_itnim_send_fwcreate(itnim))
799 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
800 else
801 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
802 break;
803
804 case BFA_ITNIM_SM_HWFAIL:
805 break;
806
807 default:
808 bfa_sm_fault(itnim->bfa, event);
809 }
810}
811
Jing Huang5fbe25c2010-10-18 17:17:23 -0700812/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800813 * Itnim is deleted, waiting for firmware response to delete.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700814 */
815static void
816bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
817{
818 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
819 bfa_trc(itnim->bfa, event);
820
821 switch (event) {
822 case BFA_ITNIM_SM_FWRSP:
823 case BFA_ITNIM_SM_HWFAIL:
824 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
825 bfa_fcpim_delitn(itnim);
826 break;
827
828 default:
829 bfa_sm_fault(itnim->bfa, event);
830 }
831}
832
833static void
834bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
835 enum bfa_itnim_event event)
836{
837 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
838 bfa_trc(itnim->bfa, event);
839
840 switch (event) {
841 case BFA_ITNIM_SM_QRESUME:
842 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
843 bfa_itnim_send_fwdelete(itnim);
844 break;
845
846 case BFA_ITNIM_SM_HWFAIL:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
848 bfa_reqq_wcancel(&itnim->reqq_wait);
849 bfa_fcpim_delitn(itnim);
850 break;
851
852 default:
853 bfa_sm_fault(itnim->bfa, event);
854 }
855}
856
Jing Huang5fbe25c2010-10-18 17:17:23 -0700857/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800858 * Initiate cleanup of all IOs on an IOC failure.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700859 */
860static void
861bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
862{
863 struct bfa_tskim_s *tskim;
864 struct bfa_ioim_s *ioim;
865 struct list_head *qe, *qen;
866
867 list_for_each_safe(qe, qen, &itnim->tsk_q) {
868 tskim = (struct bfa_tskim_s *) qe;
869 bfa_tskim_iocdisable(tskim);
870 }
871
872 list_for_each_safe(qe, qen, &itnim->io_q) {
873 ioim = (struct bfa_ioim_s *) qe;
874 bfa_ioim_iocdisable(ioim);
875 }
876
Jing Huang5fbe25c2010-10-18 17:17:23 -0700877 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700878 * For IO request in pending queue, we pretend an early timeout.
879 */
880 list_for_each_safe(qe, qen, &itnim->pending_q) {
881 ioim = (struct bfa_ioim_s *) qe;
882 bfa_ioim_tov(ioim);
883 }
884
885 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
886 ioim = (struct bfa_ioim_s *) qe;
887 bfa_ioim_iocdisable(ioim);
888 }
889}
890
Jing Huang5fbe25c2010-10-18 17:17:23 -0700891/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800892 * IO cleanup completion
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700893 */
894static void
895bfa_itnim_cleanp_comp(void *itnim_cbarg)
896{
897 struct bfa_itnim_s *itnim = itnim_cbarg;
898
899 bfa_stats(itnim, cleanup_comps);
900 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
901}
902
Jing Huang5fbe25c2010-10-18 17:17:23 -0700903/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800904 * Initiate cleanup of all IOs.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700905 */
906static void
907bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
908{
909 struct bfa_ioim_s *ioim;
910 struct bfa_tskim_s *tskim;
911 struct list_head *qe, *qen;
912
913 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
914
915 list_for_each_safe(qe, qen, &itnim->io_q) {
916 ioim = (struct bfa_ioim_s *) qe;
917
Jing Huang5fbe25c2010-10-18 17:17:23 -0700918 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700919 * Move IO to a cleanup queue from active queue so that a later
920 * TM will not pickup this IO.
921 */
922 list_del(&ioim->qe);
923 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
924
925 bfa_wc_up(&itnim->wc);
926 bfa_ioim_cleanup(ioim);
927 }
928
929 list_for_each_safe(qe, qen, &itnim->tsk_q) {
930 tskim = (struct bfa_tskim_s *) qe;
931 bfa_wc_up(&itnim->wc);
932 bfa_tskim_cleanup(tskim);
933 }
934
935 bfa_wc_wait(&itnim->wc);
936}
937
938static void
939__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
940{
941 struct bfa_itnim_s *itnim = cbarg;
942
943 if (complete)
944 bfa_cb_itnim_online(itnim->ditn);
945}
946
947static void
948__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
949{
950 struct bfa_itnim_s *itnim = cbarg;
951
952 if (complete)
953 bfa_cb_itnim_offline(itnim->ditn);
954}
955
956static void
957__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
958{
959 struct bfa_itnim_s *itnim = cbarg;
960
961 if (complete)
962 bfa_cb_itnim_sler(itnim->ditn);
963}
964
Jing Huang5fbe25c2010-10-18 17:17:23 -0700965/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700966 * Call to resume any I/O requests waiting for room in request queue.
967 */
968static void
969bfa_itnim_qresume(void *cbarg)
970{
971 struct bfa_itnim_s *itnim = cbarg;
972
973 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
974}
975
Jing Huang5fbe25c2010-10-18 17:17:23 -0700976/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700977 * bfa_itnim_public
978 */
979
980void
981bfa_itnim_iodone(struct bfa_itnim_s *itnim)
982{
983 bfa_wc_down(&itnim->wc);
984}
985
986void
987bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
988{
989 bfa_wc_down(&itnim->wc);
990}
991
992void
993bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
994 u32 *dm_len)
995{
Jing Huang5fbe25c2010-10-18 17:17:23 -0700996 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700997 * ITN memory
998 */
999 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1000}
1001
1002void
1003bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1004{
1005 struct bfa_s *bfa = fcpim->bfa;
1006 struct bfa_itnim_s *itnim;
1007 int i, j;
1008
1009 INIT_LIST_HEAD(&fcpim->itnim_q);
1010
1011 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1012 fcpim->itnim_arr = itnim;
1013
1014 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
Jing Huang6a18b162010-10-18 17:08:54 -07001015 memset(itnim, 0, sizeof(struct bfa_itnim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001016 itnim->bfa = bfa;
1017 itnim->fcpim = fcpim;
1018 itnim->reqq = BFA_REQQ_QOS_LO;
1019 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1020 itnim->iotov_active = BFA_FALSE;
1021 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1022
1023 INIT_LIST_HEAD(&itnim->io_q);
1024 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1025 INIT_LIST_HEAD(&itnim->pending_q);
1026 INIT_LIST_HEAD(&itnim->tsk_q);
1027 INIT_LIST_HEAD(&itnim->delay_comp_q);
1028 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1029 itnim->ioprofile.io_latency.min[j] = ~0;
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 }
1032
1033 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1034}
1035
1036void
1037bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1038{
1039 bfa_stats(itnim, ioc_disabled);
1040 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1041}
1042
1043static bfa_boolean_t
1044bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1045{
1046 struct bfi_itnim_create_req_s *m;
1047
1048 itnim->msg_no++;
1049
Jing Huang5fbe25c2010-10-18 17:17:23 -07001050 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001051 * check for room in queue to send request now
1052 */
1053 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1054 if (!m) {
1055 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1056 return BFA_FALSE;
1057 }
1058
1059 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1060 bfa_lpuid(itnim->bfa));
1061 m->fw_handle = itnim->rport->fw_handle;
1062 m->class = FC_CLASS_3;
1063 m->seq_rec = itnim->seq_rec;
1064 m->msg_no = itnim->msg_no;
1065 bfa_stats(itnim, fw_create);
1066
Jing Huang5fbe25c2010-10-18 17:17:23 -07001067 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001068 * queue I/O message to firmware
1069 */
1070 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1071 return BFA_TRUE;
1072}
1073
1074static bfa_boolean_t
1075bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1076{
1077 struct bfi_itnim_delete_req_s *m;
1078
Jing Huang5fbe25c2010-10-18 17:17:23 -07001079 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001080 * check for room in queue to send request now
1081 */
1082 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1083 if (!m) {
1084 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1085 return BFA_FALSE;
1086 }
1087
1088 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1089 bfa_lpuid(itnim->bfa));
1090 m->fw_handle = itnim->rport->fw_handle;
1091 bfa_stats(itnim, fw_delete);
1092
Jing Huang5fbe25c2010-10-18 17:17:23 -07001093 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001094 * queue I/O message to firmware
1095 */
1096 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1097 return BFA_TRUE;
1098}
1099
Jing Huang5fbe25c2010-10-18 17:17:23 -07001100/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001101 * Cleanup all pending failed inflight requests.
1102 */
1103static void
1104bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1105{
1106 struct bfa_ioim_s *ioim;
1107 struct list_head *qe, *qen;
1108
1109 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1110 ioim = (struct bfa_ioim_s *)qe;
1111 bfa_ioim_delayed_comp(ioim, iotov);
1112 }
1113}
1114
Jing Huang5fbe25c2010-10-18 17:17:23 -07001115/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001116 * Start all pending IO requests.
1117 */
1118static void
1119bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1120{
1121 struct bfa_ioim_s *ioim;
1122
1123 bfa_itnim_iotov_stop(itnim);
1124
Jing Huang5fbe25c2010-10-18 17:17:23 -07001125 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001126 * Abort all inflight IO requests in the queue
1127 */
1128 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1129
Jing Huang5fbe25c2010-10-18 17:17:23 -07001130 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001131 * Start all pending IO requests.
1132 */
1133 while (!list_empty(&itnim->pending_q)) {
1134 bfa_q_deq(&itnim->pending_q, &ioim);
1135 list_add_tail(&ioim->qe, &itnim->io_q);
1136 bfa_ioim_start(ioim);
1137 }
1138}
1139
Jing Huang5fbe25c2010-10-18 17:17:23 -07001140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001141 * Fail all pending IO requests
1142 */
1143static void
1144bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1145{
1146 struct bfa_ioim_s *ioim;
1147
Jing Huang5fbe25c2010-10-18 17:17:23 -07001148 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001149 * Fail all inflight IO requests in the queue
1150 */
1151 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1152
Jing Huang5fbe25c2010-10-18 17:17:23 -07001153 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001154 * Fail any pending IO requests.
1155 */
1156 while (!list_empty(&itnim->pending_q)) {
1157 bfa_q_deq(&itnim->pending_q, &ioim);
1158 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1159 bfa_ioim_tov(ioim);
1160 }
1161}
1162
Jing Huang5fbe25c2010-10-18 17:17:23 -07001163/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001164 * IO TOV timer callback. Fail any pending IO requests.
1165 */
1166static void
1167bfa_itnim_iotov(void *itnim_arg)
1168{
1169 struct bfa_itnim_s *itnim = itnim_arg;
1170
1171 itnim->iotov_active = BFA_FALSE;
1172
1173 bfa_cb_itnim_tov_begin(itnim->ditn);
1174 bfa_itnim_iotov_cleanup(itnim);
1175 bfa_cb_itnim_tov(itnim->ditn);
1176}
1177
Jing Huang5fbe25c2010-10-18 17:17:23 -07001178/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001179 * Start IO TOV timer for failing back pending IO requests in offline state.
1180 */
1181static void
1182bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1183{
1184 if (itnim->fcpim->path_tov > 0) {
1185
1186 itnim->iotov_active = BFA_TRUE;
1187 bfa_assert(bfa_itnim_hold_io(itnim));
1188 bfa_timer_start(itnim->bfa, &itnim->timer,
1189 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1190 }
1191}
1192
Jing Huang5fbe25c2010-10-18 17:17:23 -07001193/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001194 * Stop IO TOV timer.
1195 */
1196static void
1197bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1198{
1199 if (itnim->iotov_active) {
1200 itnim->iotov_active = BFA_FALSE;
1201 bfa_timer_stop(&itnim->timer);
1202 }
1203}
1204
Jing Huang5fbe25c2010-10-18 17:17:23 -07001205/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001206 * Stop IO TOV timer.
1207 */
1208static void
1209bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1210{
1211 bfa_boolean_t pathtov_active = BFA_FALSE;
1212
1213 if (itnim->iotov_active)
1214 pathtov_active = BFA_TRUE;
1215
1216 bfa_itnim_iotov_stop(itnim);
1217 if (pathtov_active)
1218 bfa_cb_itnim_tov_begin(itnim->ditn);
1219 bfa_itnim_iotov_cleanup(itnim);
1220 if (pathtov_active)
1221 bfa_cb_itnim_tov(itnim->ditn);
1222}
1223
1224static void
1225bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1226{
1227 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1228 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1229 itnim->stats.iocomp_aborted;
1230 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1231 itnim->stats.iocomp_timedout;
1232 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1233 itnim->stats.iocom_sqer_needed;
1234 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1235 itnim->stats.iocom_res_free;
1236 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1237 itnim->stats.iocom_hostabrts;
1238 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1239 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1240 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1241}
1242
Jing Huang5fbe25c2010-10-18 17:17:23 -07001243/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001244 * bfa_itnim_public
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001245 */
1246
Jing Huang5fbe25c2010-10-18 17:17:23 -07001247/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001248 * Itnim interrupt processing.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001249 */
1250void
1251bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1252{
1253 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1254 union bfi_itnim_i2h_msg_u msg;
1255 struct bfa_itnim_s *itnim;
1256
1257 bfa_trc(bfa, m->mhdr.msg_id);
1258
1259 msg.msg = m;
1260
1261 switch (m->mhdr.msg_id) {
1262 case BFI_ITNIM_I2H_CREATE_RSP:
1263 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1264 msg.create_rsp->bfa_handle);
1265 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1266 bfa_stats(itnim, create_comps);
1267 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1268 break;
1269
1270 case BFI_ITNIM_I2H_DELETE_RSP:
1271 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1272 msg.delete_rsp->bfa_handle);
1273 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1274 bfa_stats(itnim, delete_comps);
1275 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1276 break;
1277
1278 case BFI_ITNIM_I2H_SLER_EVENT:
1279 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1280 msg.sler_event->bfa_handle);
1281 bfa_stats(itnim, sler_events);
1282 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1283 break;
1284
1285 default:
1286 bfa_trc(bfa, m->mhdr.msg_id);
1287 bfa_assert(0);
1288 }
1289}
1290
Jing Huang5fbe25c2010-10-18 17:17:23 -07001291/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001292 * bfa_itnim_api
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001293 */
1294
1295struct bfa_itnim_s *
1296bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1297{
1298 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1299 struct bfa_itnim_s *itnim;
1300
1301 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1302 bfa_assert(itnim->rport == rport);
1303
1304 itnim->ditn = ditn;
1305
1306 bfa_stats(itnim, creates);
1307 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1308
1309 return itnim;
1310}
1311
1312void
1313bfa_itnim_delete(struct bfa_itnim_s *itnim)
1314{
1315 bfa_stats(itnim, deletes);
1316 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1317}
1318
1319void
1320bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1321{
1322 itnim->seq_rec = seq_rec;
1323 bfa_stats(itnim, onlines);
1324 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1325}
1326
1327void
1328bfa_itnim_offline(struct bfa_itnim_s *itnim)
1329{
1330 bfa_stats(itnim, offlines);
1331 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1332}
1333
Jing Huang5fbe25c2010-10-18 17:17:23 -07001334/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001335 * Return true if itnim is considered offline for holding off IO request.
1336 * IO is not held if itnim is being deleted.
1337 */
1338bfa_boolean_t
1339bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1340{
1341 return itnim->fcpim->path_tov && itnim->iotov_active &&
1342 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1343 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1344 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1345 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1346 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1347 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1348}
1349
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001350void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001351bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1352{
1353 int j;
Jing Huang6a18b162010-10-18 17:08:54 -07001354 memset(&itnim->stats, 0, sizeof(itnim->stats));
1355 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001356 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1357 itnim->ioprofile.io_latency.min[j] = ~0;
1358}
1359
Jing Huang5fbe25c2010-10-18 17:17:23 -07001360/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001361 * BFA IO module state machine functions
1362 */
1363
Jing Huang5fbe25c2010-10-18 17:17:23 -07001364/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001365 * IO is not started (unallocated).
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001366 */
1367static void
1368bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1369{
1370 bfa_trc_fp(ioim->bfa, ioim->iotag);
1371 bfa_trc_fp(ioim->bfa, event);
1372
1373 switch (event) {
1374 case BFA_IOIM_SM_START:
1375 if (!bfa_itnim_is_online(ioim->itnim)) {
1376 if (!bfa_itnim_hold_io(ioim->itnim)) {
1377 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1378 list_del(&ioim->qe);
1379 list_add_tail(&ioim->qe,
1380 &ioim->fcpim->ioim_comp_q);
1381 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1382 __bfa_cb_ioim_pathtov, ioim);
1383 } else {
1384 list_del(&ioim->qe);
1385 list_add_tail(&ioim->qe,
1386 &ioim->itnim->pending_q);
1387 }
1388 break;
1389 }
1390
1391 if (ioim->nsges > BFI_SGE_INLINE) {
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08001392 if (!bfa_ioim_sgpg_alloc(ioim)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001393 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1394 return;
1395 }
1396 }
1397
1398 if (!bfa_ioim_send_ioreq(ioim)) {
1399 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1400 break;
1401 }
1402
1403 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1404 break;
1405
1406 case BFA_IOIM_SM_IOTOV:
1407 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1408 bfa_ioim_move_to_comp_q(ioim);
1409 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1410 __bfa_cb_ioim_pathtov, ioim);
1411 break;
1412
1413 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001414 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001415 * IO in pending queue can get abort requests. Complete abort
1416 * requests immediately.
1417 */
1418 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1419 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1420 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1421 __bfa_cb_ioim_abort, ioim);
1422 break;
1423
1424 default:
1425 bfa_sm_fault(ioim->bfa, event);
1426 }
1427}
1428
Jing Huang5fbe25c2010-10-18 17:17:23 -07001429/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001430 * IO is waiting for SG pages.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001431 */
1432static void
1433bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1434{
1435 bfa_trc(ioim->bfa, ioim->iotag);
1436 bfa_trc(ioim->bfa, event);
1437
1438 switch (event) {
1439 case BFA_IOIM_SM_SGALLOCED:
1440 if (!bfa_ioim_send_ioreq(ioim)) {
1441 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1442 break;
1443 }
1444 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1445 break;
1446
1447 case BFA_IOIM_SM_CLEANUP:
1448 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1449 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1450 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1451 ioim);
1452 bfa_ioim_notify_cleanup(ioim);
1453 break;
1454
1455 case BFA_IOIM_SM_ABORT:
1456 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1457 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1458 bfa_ioim_move_to_comp_q(ioim);
1459 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1460 ioim);
1461 break;
1462
1463 case BFA_IOIM_SM_HWFAIL:
1464 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1465 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1466 bfa_ioim_move_to_comp_q(ioim);
1467 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1468 ioim);
1469 break;
1470
1471 default:
1472 bfa_sm_fault(ioim->bfa, event);
1473 }
1474}
1475
Jing Huang5fbe25c2010-10-18 17:17:23 -07001476/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001477 * IO is active.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001478 */
1479static void
1480bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1481{
1482 bfa_trc_fp(ioim->bfa, ioim->iotag);
1483 bfa_trc_fp(ioim->bfa, event);
1484
1485 switch (event) {
1486 case BFA_IOIM_SM_COMP_GOOD:
1487 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1488 bfa_ioim_move_to_comp_q(ioim);
1489 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1490 __bfa_cb_ioim_good_comp, ioim);
1491 break;
1492
1493 case BFA_IOIM_SM_COMP:
1494 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1495 bfa_ioim_move_to_comp_q(ioim);
1496 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1497 ioim);
1498 break;
1499
1500 case BFA_IOIM_SM_DONE:
1501 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1502 bfa_ioim_move_to_comp_q(ioim);
1503 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1504 ioim);
1505 break;
1506
1507 case BFA_IOIM_SM_ABORT:
1508 ioim->iosp->abort_explicit = BFA_TRUE;
1509 ioim->io_cbfn = __bfa_cb_ioim_abort;
1510
1511 if (bfa_ioim_send_abort(ioim))
1512 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1513 else {
1514 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1515 bfa_stats(ioim->itnim, qwait);
1516 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1517 &ioim->iosp->reqq_wait);
1518 }
1519 break;
1520
1521 case BFA_IOIM_SM_CLEANUP:
1522 ioim->iosp->abort_explicit = BFA_FALSE;
1523 ioim->io_cbfn = __bfa_cb_ioim_failed;
1524
1525 if (bfa_ioim_send_abort(ioim))
1526 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1527 else {
1528 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1529 bfa_stats(ioim->itnim, qwait);
1530 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1531 &ioim->iosp->reqq_wait);
1532 }
1533 break;
1534
1535 case BFA_IOIM_SM_HWFAIL:
1536 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1537 bfa_ioim_move_to_comp_q(ioim);
1538 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1539 ioim);
1540 break;
1541
1542 case BFA_IOIM_SM_SQRETRY:
1543 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1544 /* max retry completed free IO */
1545 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1546 bfa_ioim_move_to_comp_q(ioim);
1547 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1548 __bfa_cb_ioim_failed, ioim);
1549 break;
1550 }
1551 /* waiting for IO tag resource free */
1552 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1553 break;
1554
1555 default:
1556 bfa_sm_fault(ioim->bfa, event);
1557 }
1558}
1559
Jing Huang5fbe25c2010-10-18 17:17:23 -07001560/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001561 * IO is retried with new tag.
1562 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001563static void
1564bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1565{
1566 bfa_trc_fp(ioim->bfa, ioim->iotag);
1567 bfa_trc_fp(ioim->bfa, event);
1568
1569 switch (event) {
1570 case BFA_IOIM_SM_FREE:
1571 /* abts and rrq done. Now retry the IO with new tag */
1572 if (!bfa_ioim_send_ioreq(ioim)) {
1573 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1574 break;
1575 }
1576 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1577 break;
1578
1579 case BFA_IOIM_SM_CLEANUP:
1580 ioim->iosp->abort_explicit = BFA_FALSE;
1581 ioim->io_cbfn = __bfa_cb_ioim_failed;
1582
1583 if (bfa_ioim_send_abort(ioim))
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1585 else {
1586 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1587 bfa_stats(ioim->itnim, qwait);
1588 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1589 &ioim->iosp->reqq_wait);
1590 }
1591 break;
1592
1593 case BFA_IOIM_SM_HWFAIL:
1594 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1595 bfa_ioim_move_to_comp_q(ioim);
1596 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1597 __bfa_cb_ioim_failed, ioim);
1598 break;
1599
1600 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001601 /* in this state IO abort is done.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001602 * Waiting for IO tag resource free.
1603 */
1604 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1605 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1606 ioim);
1607 break;
1608
1609 default:
1610 bfa_sm_fault(ioim->bfa, event);
1611 }
1612}
1613
Jing Huang5fbe25c2010-10-18 17:17:23 -07001614/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001615 * IO is being aborted, waiting for completion from firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001616 */
1617static void
1618bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1619{
1620 bfa_trc(ioim->bfa, ioim->iotag);
1621 bfa_trc(ioim->bfa, event);
1622
1623 switch (event) {
1624 case BFA_IOIM_SM_COMP_GOOD:
1625 case BFA_IOIM_SM_COMP:
1626 case BFA_IOIM_SM_DONE:
1627 case BFA_IOIM_SM_FREE:
1628 break;
1629
1630 case BFA_IOIM_SM_ABORT_DONE:
1631 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1632 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1633 ioim);
1634 break;
1635
1636 case BFA_IOIM_SM_ABORT_COMP:
1637 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1638 bfa_ioim_move_to_comp_q(ioim);
1639 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1640 ioim);
1641 break;
1642
1643 case BFA_IOIM_SM_COMP_UTAG:
1644 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1645 bfa_ioim_move_to_comp_q(ioim);
1646 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1647 ioim);
1648 break;
1649
1650 case BFA_IOIM_SM_CLEANUP:
1651 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1652 ioim->iosp->abort_explicit = BFA_FALSE;
1653
1654 if (bfa_ioim_send_abort(ioim))
1655 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1656 else {
1657 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1658 bfa_stats(ioim->itnim, qwait);
1659 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1660 &ioim->iosp->reqq_wait);
1661 }
1662 break;
1663
1664 case BFA_IOIM_SM_HWFAIL:
1665 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1666 bfa_ioim_move_to_comp_q(ioim);
1667 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1668 ioim);
1669 break;
1670
1671 default:
1672 bfa_sm_fault(ioim->bfa, event);
1673 }
1674}
1675
Jing Huang5fbe25c2010-10-18 17:17:23 -07001676/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001677 * IO is being cleaned up (implicit abort), waiting for completion from
1678 * firmware.
1679 */
1680static void
1681bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1682{
1683 bfa_trc(ioim->bfa, ioim->iotag);
1684 bfa_trc(ioim->bfa, event);
1685
1686 switch (event) {
1687 case BFA_IOIM_SM_COMP_GOOD:
1688 case BFA_IOIM_SM_COMP:
1689 case BFA_IOIM_SM_DONE:
1690 case BFA_IOIM_SM_FREE:
1691 break;
1692
1693 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001694 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001695 * IO is already being aborted implicitly
1696 */
1697 ioim->io_cbfn = __bfa_cb_ioim_abort;
1698 break;
1699
1700 case BFA_IOIM_SM_ABORT_DONE:
1701 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1702 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1703 bfa_ioim_notify_cleanup(ioim);
1704 break;
1705
1706 case BFA_IOIM_SM_ABORT_COMP:
1707 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1708 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1709 bfa_ioim_notify_cleanup(ioim);
1710 break;
1711
1712 case BFA_IOIM_SM_COMP_UTAG:
1713 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1714 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1715 bfa_ioim_notify_cleanup(ioim);
1716 break;
1717
1718 case BFA_IOIM_SM_HWFAIL:
1719 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1720 bfa_ioim_move_to_comp_q(ioim);
1721 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1722 ioim);
1723 break;
1724
1725 case BFA_IOIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001726 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001727 * IO can be in cleanup state already due to TM command.
1728 * 2nd cleanup request comes from ITN offline event.
1729 */
1730 break;
1731
1732 default:
1733 bfa_sm_fault(ioim->bfa, event);
1734 }
1735}
1736
Jing Huang5fbe25c2010-10-18 17:17:23 -07001737/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001738 * IO is waiting for room in request CQ
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001739 */
1740static void
1741bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1742{
1743 bfa_trc(ioim->bfa, ioim->iotag);
1744 bfa_trc(ioim->bfa, event);
1745
1746 switch (event) {
1747 case BFA_IOIM_SM_QRESUME:
1748 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1749 bfa_ioim_send_ioreq(ioim);
1750 break;
1751
1752 case BFA_IOIM_SM_ABORT:
1753 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1754 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1755 bfa_ioim_move_to_comp_q(ioim);
1756 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1757 ioim);
1758 break;
1759
1760 case BFA_IOIM_SM_CLEANUP:
1761 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1762 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1763 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1764 ioim);
1765 bfa_ioim_notify_cleanup(ioim);
1766 break;
1767
1768 case BFA_IOIM_SM_HWFAIL:
1769 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1770 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1771 bfa_ioim_move_to_comp_q(ioim);
1772 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1773 ioim);
1774 break;
1775
1776 default:
1777 bfa_sm_fault(ioim->bfa, event);
1778 }
1779}
1780
Jing Huang5fbe25c2010-10-18 17:17:23 -07001781/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001782 * Active IO is being aborted, waiting for room in request CQ.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001783 */
1784static void
1785bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1786{
1787 bfa_trc(ioim->bfa, ioim->iotag);
1788 bfa_trc(ioim->bfa, event);
1789
1790 switch (event) {
1791 case BFA_IOIM_SM_QRESUME:
1792 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1793 bfa_ioim_send_abort(ioim);
1794 break;
1795
1796 case BFA_IOIM_SM_CLEANUP:
1797 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1798 ioim->iosp->abort_explicit = BFA_FALSE;
1799 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1800 break;
1801
1802 case BFA_IOIM_SM_COMP_GOOD:
1803 case BFA_IOIM_SM_COMP:
1804 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1805 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1806 bfa_ioim_move_to_comp_q(ioim);
1807 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1808 ioim);
1809 break;
1810
1811 case BFA_IOIM_SM_DONE:
1812 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1813 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1814 bfa_ioim_move_to_comp_q(ioim);
1815 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1816 ioim);
1817 break;
1818
1819 case BFA_IOIM_SM_HWFAIL:
1820 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1821 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1822 bfa_ioim_move_to_comp_q(ioim);
1823 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1824 ioim);
1825 break;
1826
1827 default:
1828 bfa_sm_fault(ioim->bfa, event);
1829 }
1830}
1831
Jing Huang5fbe25c2010-10-18 17:17:23 -07001832/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001833 * Active IO is being cleaned up, waiting for room in request CQ.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001834 */
1835static void
1836bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1837{
1838 bfa_trc(ioim->bfa, ioim->iotag);
1839 bfa_trc(ioim->bfa, event);
1840
1841 switch (event) {
1842 case BFA_IOIM_SM_QRESUME:
1843 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1844 bfa_ioim_send_abort(ioim);
1845 break;
1846
1847 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001848 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001849 * IO is alraedy being cleaned up implicitly
1850 */
1851 ioim->io_cbfn = __bfa_cb_ioim_abort;
1852 break;
1853
1854 case BFA_IOIM_SM_COMP_GOOD:
1855 case BFA_IOIM_SM_COMP:
1856 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1857 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1858 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1859 bfa_ioim_notify_cleanup(ioim);
1860 break;
1861
1862 case BFA_IOIM_SM_DONE:
1863 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1864 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1865 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1866 bfa_ioim_notify_cleanup(ioim);
1867 break;
1868
1869 case BFA_IOIM_SM_HWFAIL:
1870 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1871 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1872 bfa_ioim_move_to_comp_q(ioim);
1873 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1874 ioim);
1875 break;
1876
1877 default:
1878 bfa_sm_fault(ioim->bfa, event);
1879 }
1880}
1881
Jing Huang5fbe25c2010-10-18 17:17:23 -07001882/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001883 * IO bfa callback is pending.
1884 */
1885static void
1886bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1887{
1888 bfa_trc_fp(ioim->bfa, ioim->iotag);
1889 bfa_trc_fp(ioim->bfa, event);
1890
1891 switch (event) {
1892 case BFA_IOIM_SM_HCB:
1893 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1894 bfa_ioim_free(ioim);
1895 break;
1896
1897 case BFA_IOIM_SM_CLEANUP:
1898 bfa_ioim_notify_cleanup(ioim);
1899 break;
1900
1901 case BFA_IOIM_SM_HWFAIL:
1902 break;
1903
1904 default:
1905 bfa_sm_fault(ioim->bfa, event);
1906 }
1907}
1908
Jing Huang5fbe25c2010-10-18 17:17:23 -07001909/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001910 * IO bfa callback is pending. IO resource cannot be freed.
1911 */
1912static void
1913bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1914{
1915 bfa_trc(ioim->bfa, ioim->iotag);
1916 bfa_trc(ioim->bfa, event);
1917
1918 switch (event) {
1919 case BFA_IOIM_SM_HCB:
1920 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1921 list_del(&ioim->qe);
1922 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1923 break;
1924
1925 case BFA_IOIM_SM_FREE:
1926 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1927 break;
1928
1929 case BFA_IOIM_SM_CLEANUP:
1930 bfa_ioim_notify_cleanup(ioim);
1931 break;
1932
1933 case BFA_IOIM_SM_HWFAIL:
1934 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1935 break;
1936
1937 default:
1938 bfa_sm_fault(ioim->bfa, event);
1939 }
1940}
1941
Jing Huang5fbe25c2010-10-18 17:17:23 -07001942/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001943 * IO is completed, waiting resource free from firmware.
1944 */
1945static void
1946bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1947{
1948 bfa_trc(ioim->bfa, ioim->iotag);
1949 bfa_trc(ioim->bfa, event);
1950
1951 switch (event) {
1952 case BFA_IOIM_SM_FREE:
1953 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1954 bfa_ioim_free(ioim);
1955 break;
1956
1957 case BFA_IOIM_SM_CLEANUP:
1958 bfa_ioim_notify_cleanup(ioim);
1959 break;
1960
1961 case BFA_IOIM_SM_HWFAIL:
1962 break;
1963
1964 default:
1965 bfa_sm_fault(ioim->bfa, event);
1966 }
1967}
1968
1969
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001970static void
1971__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1972{
1973 struct bfa_ioim_s *ioim = cbarg;
1974
1975 if (!complete) {
1976 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1977 return;
1978 }
1979
1980 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1981}
1982
1983static void
1984__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1985{
1986 struct bfa_ioim_s *ioim = cbarg;
1987 struct bfi_ioim_rsp_s *m;
1988 u8 *snsinfo = NULL;
1989 u8 sns_len = 0;
1990 s32 residue = 0;
1991
1992 if (!complete) {
1993 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1994 return;
1995 }
1996
1997 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1998 if (m->io_status == BFI_IOIM_STS_OK) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07001999 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002000 * setup sense information, if present
2001 */
2002 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2003 m->sns_len) {
2004 sns_len = m->sns_len;
2005 snsinfo = ioim->iosp->snsinfo;
2006 }
2007
Jing Huang5fbe25c2010-10-18 17:17:23 -07002008 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002009 * setup residue value correctly for normal completions
2010 */
2011 if (m->resid_flags == FCP_RESID_UNDER) {
Jing Huangba816ea2010-10-18 17:10:50 -07002012 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002013 bfa_stats(ioim->itnim, iocomp_underrun);
2014 }
2015 if (m->resid_flags == FCP_RESID_OVER) {
Jing Huangba816ea2010-10-18 17:10:50 -07002016 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002017 residue = -residue;
2018 bfa_stats(ioim->itnim, iocomp_overrun);
2019 }
2020 }
2021
2022 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2023 m->scsi_status, sns_len, snsinfo, residue);
2024}
2025
2026static void
2027__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2028{
2029 struct bfa_ioim_s *ioim = cbarg;
2030
2031 if (!complete) {
2032 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2033 return;
2034 }
2035
2036 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2037 0, 0, NULL, 0);
2038}
2039
2040static void
2041__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2042{
2043 struct bfa_ioim_s *ioim = cbarg;
2044
2045 bfa_stats(ioim->itnim, path_tov_expired);
2046 if (!complete) {
2047 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2048 return;
2049 }
2050
2051 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2052 0, 0, NULL, 0);
2053}
2054
2055static void
2056__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2057{
2058 struct bfa_ioim_s *ioim = cbarg;
2059
2060 if (!complete) {
2061 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2062 return;
2063 }
2064
2065 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2066}
2067
2068static void
2069bfa_ioim_sgpg_alloced(void *cbarg)
2070{
2071 struct bfa_ioim_s *ioim = cbarg;
2072
2073 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2074 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002075 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002076 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2077}
2078
Jing Huang5fbe25c2010-10-18 17:17:23 -07002079/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002080 * Send I/O request to firmware.
2081 */
2082static bfa_boolean_t
2083bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2084{
2085 struct bfa_itnim_s *itnim = ioim->itnim;
2086 struct bfi_ioim_req_s *m;
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002087 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002088 struct bfi_sge_s *sge, *sgpge;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002089 u32 pgdlen = 0;
2090 u32 fcp_dl;
2091 u64 addr;
2092 struct scatterlist *sg;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002093 struct bfa_sgpg_s *sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002094 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002095 u32 i, sge_id, pgcumsz;
Maggie Zhangf3148782010-12-09 19:11:39 -08002096 enum dma_data_direction dmadir;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002097
Jing Huang5fbe25c2010-10-18 17:17:23 -07002098 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002099 * check for room in queue to send request now
2100 */
2101 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2102 if (!m) {
2103 bfa_stats(ioim->itnim, qwait);
2104 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2105 &ioim->iosp->reqq_wait);
2106 return BFA_FALSE;
2107 }
2108
Jing Huang5fbe25c2010-10-18 17:17:23 -07002109 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002110 * build i/o request message next
2111 */
Jing Huangba816ea2010-10-18 17:10:50 -07002112 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002113 m->rport_hdl = ioim->itnim->rport->fw_handle;
Maggie Zhangf3148782010-12-09 19:11:39 -08002114 m->io_timeout = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002115
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002116 sge = &m->sges[0];
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002117 sgpg = ioim->sgpg;
2118 sge_id = 0;
2119 sgpge = NULL;
2120 pgcumsz = 0;
2121 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2122 if (i == 0) {
2123 /* build inline IO SG element */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002124 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002125 sge->sga = *(union bfi_addr_u *) &addr;
2126 pgdlen = sg_dma_len(sg);
2127 sge->sg_len = pgdlen;
2128 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002129 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002130 bfa_sge_to_be(sge);
2131 sge++;
2132 } else {
2133 if (sge_id == 0)
2134 sgpge = sgpg->sgpg->sges;
2135
Maggie Zhangf16a1752010-12-09 19:12:32 -08002136 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002137 sgpge->sga = *(union bfi_addr_u *) &addr;
2138 sgpge->sg_len = sg_dma_len(sg);
2139 pgcumsz += sgpge->sg_len;
2140
2141 /* set flags */
2142 if (i < (ioim->nsges - 1) &&
2143 sge_id < (BFI_SGPG_DATA_SGES - 1))
2144 sgpge->flags = BFI_SGE_DATA;
2145 else if (i < (ioim->nsges - 1))
2146 sgpge->flags = BFI_SGE_DATA_CPL;
2147 else
2148 sgpge->flags = BFI_SGE_DATA_LAST;
2149
2150 bfa_sge_to_le(sgpge);
2151
2152 sgpge++;
2153 if (i == (ioim->nsges - 1)) {
2154 sgpge->flags = BFI_SGE_PGDLEN;
2155 sgpge->sga.a32.addr_lo = 0;
2156 sgpge->sga.a32.addr_hi = 0;
2157 sgpge->sg_len = pgcumsz;
2158 bfa_sge_to_le(sgpge);
2159 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2160 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2161 sgpge->flags = BFI_SGE_LINK;
2162 sgpge->sga = sgpg->sgpg_pa;
2163 sgpge->sg_len = pgcumsz;
2164 bfa_sge_to_le(sgpge);
2165 sge_id = 0;
2166 pgcumsz = 0;
2167 }
2168 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002169 }
2170
2171 if (ioim->nsges > BFI_SGE_INLINE) {
2172 sge->sga = ioim->sgpg->sgpg_pa;
2173 } else {
2174 sge->sga.a32.addr_lo = 0;
2175 sge->sga.a32.addr_hi = 0;
2176 }
2177 sge->sg_len = pgdlen;
2178 sge->flags = BFI_SGE_PGDLEN;
2179 bfa_sge_to_be(sge);
2180
Jing Huang5fbe25c2010-10-18 17:17:23 -07002181 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002182 * set up I/O command parameters
2183 */
Jing Huang6a18b162010-10-18 17:08:54 -07002184 m->cmnd = cmnd_z0;
Maggie Zhangf3148782010-12-09 19:11:39 -08002185 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2186 dmadir = cmnd->sc_data_direction;
2187 if (dmadir == DMA_TO_DEVICE)
2188 m->cmnd.iodir = FCP_IODIR_WRITE;
2189 else if (dmadir == DMA_FROM_DEVICE)
2190 m->cmnd.iodir = FCP_IODIR_READ;
2191 else
2192 m->cmnd.iodir = FCP_IODIR_NONE;
2193
2194 m->cmnd.cdb = *(scsi_cdb_t *) cmnd->cmnd;
2195 fcp_dl = scsi_bufflen(cmnd);
Jing Huangba816ea2010-10-18 17:10:50 -07002196 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002197
Jing Huang5fbe25c2010-10-18 17:17:23 -07002198 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002199 * set up I/O message header
2200 */
2201 switch (m->cmnd.iodir) {
2202 case FCP_IODIR_READ:
2203 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2204 bfa_stats(itnim, input_reqs);
2205 ioim->itnim->stats.rd_throughput += fcp_dl;
2206 break;
2207 case FCP_IODIR_WRITE:
2208 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2209 bfa_stats(itnim, output_reqs);
2210 ioim->itnim->stats.wr_throughput += fcp_dl;
2211 break;
2212 case FCP_IODIR_RW:
2213 bfa_stats(itnim, input_reqs);
2214 bfa_stats(itnim, output_reqs);
2215 default:
2216 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2217 }
2218 if (itnim->seq_rec ||
Maggie Zhangf3148782010-12-09 19:11:39 -08002219 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002220 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2221
Jing Huang5fbe25c2010-10-18 17:17:23 -07002222 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002223 * queue I/O message to firmware
2224 */
2225 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2226 return BFA_TRUE;
2227}
2228
Jing Huang5fbe25c2010-10-18 17:17:23 -07002229/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002230 * Setup any additional SG pages needed.Inline SG element is setup
2231 * at queuing time.
2232 */
2233static bfa_boolean_t
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002234bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002235{
2236 u16 nsgpgs;
2237
2238 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2239
Jing Huang5fbe25c2010-10-18 17:17:23 -07002240 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002241 * allocate SG pages needed
2242 */
2243 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2244 if (!nsgpgs)
2245 return BFA_TRUE;
2246
2247 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2248 != BFA_STATUS_OK) {
2249 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2250 return BFA_FALSE;
2251 }
2252
2253 ioim->nsgpgs = nsgpgs;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002254 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002255
2256 return BFA_TRUE;
2257}
2258
Jing Huang5fbe25c2010-10-18 17:17:23 -07002259/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002260 * Send I/O abort request to firmware.
2261 */
2262static bfa_boolean_t
2263bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2264{
2265 struct bfi_ioim_abort_req_s *m;
2266 enum bfi_ioim_h2i msgop;
2267
Jing Huang5fbe25c2010-10-18 17:17:23 -07002268 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002269 * check for room in queue to send request now
2270 */
2271 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2272 if (!m)
2273 return BFA_FALSE;
2274
Jing Huang5fbe25c2010-10-18 17:17:23 -07002275 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002276 * build i/o request message next
2277 */
2278 if (ioim->iosp->abort_explicit)
2279 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2280 else
2281 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2282
2283 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002284 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002285 m->abort_tag = ++ioim->abort_tag;
2286
Jing Huang5fbe25c2010-10-18 17:17:23 -07002287 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002288 * queue I/O message to firmware
2289 */
2290 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2291 return BFA_TRUE;
2292}
2293
Jing Huang5fbe25c2010-10-18 17:17:23 -07002294/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002295 * Call to resume any I/O requests waiting for room in request queue.
2296 */
2297static void
2298bfa_ioim_qresume(void *cbarg)
2299{
2300 struct bfa_ioim_s *ioim = cbarg;
2301
2302 bfa_stats(ioim->itnim, qresumes);
2303 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2304}
2305
2306
2307static void
2308bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2309{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002310 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002311 * Move IO from itnim queue to fcpim global queue since itnim will be
2312 * freed.
2313 */
2314 list_del(&ioim->qe);
2315 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2316
2317 if (!ioim->iosp->tskim) {
2318 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2319 bfa_cb_dequeue(&ioim->hcb_qe);
2320 list_del(&ioim->qe);
2321 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2322 }
2323 bfa_itnim_iodone(ioim->itnim);
2324 } else
Maggie Zhangf7f738122010-12-09 19:08:43 -08002325 bfa_wc_down(&ioim->iosp->tskim->wc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002326}
2327
2328static bfa_boolean_t
2329bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2330{
2331 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2332 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2333 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2334 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2335 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2336 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2337 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2338 return BFA_FALSE;
2339
2340 return BFA_TRUE;
2341}
2342
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002343void
2344bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2345{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002346 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002347 * If path tov timer expired, failback with PATHTOV status - these
2348 * IO requests are not normally retried by IO stack.
2349 *
2350 * Otherwise device cameback online and fail it with normal failed
2351 * status so that IO stack retries these failed IO requests.
2352 */
2353 if (iotov)
2354 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2355 else {
2356 ioim->io_cbfn = __bfa_cb_ioim_failed;
2357 bfa_stats(ioim->itnim, iocom_nexus_abort);
2358 }
2359 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2360
Jing Huang5fbe25c2010-10-18 17:17:23 -07002361 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002362 * Move IO to fcpim global queue since itnim will be
2363 * freed.
2364 */
2365 list_del(&ioim->qe);
2366 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2367}
2368
2369
Jing Huang5fbe25c2010-10-18 17:17:23 -07002370/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002371 * Memory allocation and initialization.
2372 */
2373void
2374bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2375{
2376 struct bfa_ioim_s *ioim;
2377 struct bfa_ioim_sp_s *iosp;
2378 u16 i;
2379 u8 *snsinfo;
2380 u32 snsbufsz;
2381
Jing Huang5fbe25c2010-10-18 17:17:23 -07002382 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002383 * claim memory first
2384 */
2385 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2386 fcpim->ioim_arr = ioim;
2387 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2388
2389 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2390 fcpim->ioim_sp_arr = iosp;
2391 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2392
Jing Huang5fbe25c2010-10-18 17:17:23 -07002393 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002394 * Claim DMA memory for per IO sense data.
2395 */
2396 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2397 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2398 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2399
2400 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2401 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2402 snsinfo = fcpim->snsbase.kva;
2403 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2404
Jing Huang5fbe25c2010-10-18 17:17:23 -07002405 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002406 * Initialize ioim free queues
2407 */
2408 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2409 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2410 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2411
2412 for (i = 0; i < fcpim->num_ioim_reqs;
2413 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2414 /*
2415 * initialize IOIM
2416 */
Jing Huang6a18b162010-10-18 17:08:54 -07002417 memset(ioim, 0, sizeof(struct bfa_ioim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002418 ioim->iotag = i;
2419 ioim->bfa = fcpim->bfa;
2420 ioim->fcpim = fcpim;
2421 ioim->iosp = iosp;
2422 iosp->snsinfo = snsinfo;
2423 INIT_LIST_HEAD(&ioim->sgpg_q);
2424 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2425 bfa_ioim_qresume, ioim);
2426 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2427 bfa_ioim_sgpg_alloced, ioim);
2428 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2429
2430 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2431 }
2432}
2433
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002434void
2435bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2436{
2437 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2438 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2439 struct bfa_ioim_s *ioim;
2440 u16 iotag;
2441 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2442
Jing Huangba816ea2010-10-18 17:10:50 -07002443 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002444
2445 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2446 bfa_assert(ioim->iotag == iotag);
2447
2448 bfa_trc(ioim->bfa, ioim->iotag);
2449 bfa_trc(ioim->bfa, rsp->io_status);
2450 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2451
2452 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
Jing Huang6a18b162010-10-18 17:08:54 -07002453 ioim->iosp->comp_rspmsg = *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002454
2455 switch (rsp->io_status) {
2456 case BFI_IOIM_STS_OK:
2457 bfa_stats(ioim->itnim, iocomp_ok);
2458 if (rsp->reuse_io_tag == 0)
2459 evt = BFA_IOIM_SM_DONE;
2460 else
2461 evt = BFA_IOIM_SM_COMP;
2462 break;
2463
2464 case BFI_IOIM_STS_TIMEDOUT:
2465 bfa_stats(ioim->itnim, iocomp_timedout);
2466 case BFI_IOIM_STS_ABORTED:
2467 rsp->io_status = BFI_IOIM_STS_ABORTED;
2468 bfa_stats(ioim->itnim, iocomp_aborted);
2469 if (rsp->reuse_io_tag == 0)
2470 evt = BFA_IOIM_SM_DONE;
2471 else
2472 evt = BFA_IOIM_SM_COMP;
2473 break;
2474
2475 case BFI_IOIM_STS_PROTO_ERR:
2476 bfa_stats(ioim->itnim, iocom_proto_err);
2477 bfa_assert(rsp->reuse_io_tag);
2478 evt = BFA_IOIM_SM_COMP;
2479 break;
2480
2481 case BFI_IOIM_STS_SQER_NEEDED:
2482 bfa_stats(ioim->itnim, iocom_sqer_needed);
2483 bfa_assert(rsp->reuse_io_tag == 0);
2484 evt = BFA_IOIM_SM_SQRETRY;
2485 break;
2486
2487 case BFI_IOIM_STS_RES_FREE:
2488 bfa_stats(ioim->itnim, iocom_res_free);
2489 evt = BFA_IOIM_SM_FREE;
2490 break;
2491
2492 case BFI_IOIM_STS_HOST_ABORTED:
2493 bfa_stats(ioim->itnim, iocom_hostabrts);
2494 if (rsp->abort_tag != ioim->abort_tag) {
2495 bfa_trc(ioim->bfa, rsp->abort_tag);
2496 bfa_trc(ioim->bfa, ioim->abort_tag);
2497 return;
2498 }
2499
2500 if (rsp->reuse_io_tag)
2501 evt = BFA_IOIM_SM_ABORT_COMP;
2502 else
2503 evt = BFA_IOIM_SM_ABORT_DONE;
2504 break;
2505
2506 case BFI_IOIM_STS_UTAG:
2507 bfa_stats(ioim->itnim, iocom_utags);
2508 evt = BFA_IOIM_SM_COMP_UTAG;
2509 break;
2510
2511 default:
2512 bfa_assert(0);
2513 }
2514
2515 bfa_sm_send_event(ioim, evt);
2516}
2517
2518void
2519bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2520{
2521 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2522 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2523 struct bfa_ioim_s *ioim;
2524 u16 iotag;
2525
Jing Huangba816ea2010-10-18 17:10:50 -07002526 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002527
2528 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2529 bfa_assert(ioim->iotag == iotag);
2530
2531 bfa_trc_fp(ioim->bfa, ioim->iotag);
2532 bfa_ioim_cb_profile_comp(fcpim, ioim);
2533
2534 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2535}
2536
Jing Huang5fbe25c2010-10-18 17:17:23 -07002537/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002538 * Called by itnim to clean up IO while going offline.
2539 */
2540void
2541bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2542{
2543 bfa_trc(ioim->bfa, ioim->iotag);
2544 bfa_stats(ioim->itnim, io_cleanups);
2545
2546 ioim->iosp->tskim = NULL;
2547 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2548}
2549
2550void
2551bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2552{
2553 bfa_trc(ioim->bfa, ioim->iotag);
2554 bfa_stats(ioim->itnim, io_tmaborts);
2555
2556 ioim->iosp->tskim = tskim;
2557 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2558}
2559
Jing Huang5fbe25c2010-10-18 17:17:23 -07002560/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002561 * IOC failure handling.
2562 */
2563void
2564bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2565{
2566 bfa_trc(ioim->bfa, ioim->iotag);
2567 bfa_stats(ioim->itnim, io_iocdowns);
2568 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2569}
2570
Jing Huang5fbe25c2010-10-18 17:17:23 -07002571/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002572 * IO offline TOV popped. Fail the pending IO.
2573 */
2574void
2575bfa_ioim_tov(struct bfa_ioim_s *ioim)
2576{
2577 bfa_trc(ioim->bfa, ioim->iotag);
2578 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2579}
2580
2581
Jing Huang5fbe25c2010-10-18 17:17:23 -07002582/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002583 * Allocate IOIM resource for initiator mode I/O request.
2584 */
2585struct bfa_ioim_s *
2586bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2587 struct bfa_itnim_s *itnim, u16 nsges)
2588{
2589 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2590 struct bfa_ioim_s *ioim;
2591
Jing Huang5fbe25c2010-10-18 17:17:23 -07002592 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002593 * alocate IOIM resource
2594 */
2595 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2596 if (!ioim) {
2597 bfa_stats(itnim, no_iotags);
2598 return NULL;
2599 }
2600
2601 ioim->dio = dio;
2602 ioim->itnim = itnim;
2603 ioim->nsges = nsges;
2604 ioim->nsgpgs = 0;
2605
2606 bfa_stats(itnim, total_ios);
2607 fcpim->ios_active++;
2608
2609 list_add_tail(&ioim->qe, &itnim->io_q);
2610 bfa_trc_fp(ioim->bfa, ioim->iotag);
2611
2612 return ioim;
2613}
2614
2615void
2616bfa_ioim_free(struct bfa_ioim_s *ioim)
2617{
2618 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2619
2620 bfa_trc_fp(ioim->bfa, ioim->iotag);
2621 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2622
2623 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2624 (ioim->nsges > BFI_SGE_INLINE));
2625
2626 if (ioim->nsgpgs > 0)
2627 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2628
2629 bfa_stats(ioim->itnim, io_comps);
2630 fcpim->ios_active--;
2631
2632 list_del(&ioim->qe);
2633 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2634}
2635
2636void
2637bfa_ioim_start(struct bfa_ioim_s *ioim)
2638{
2639 bfa_trc_fp(ioim->bfa, ioim->iotag);
2640
2641 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2642
Jing Huang5fbe25c2010-10-18 17:17:23 -07002643 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002644 * Obtain the queue over which this request has to be issued
2645 */
2646 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
Maggie Zhangf3148782010-12-09 19:11:39 -08002647 BFA_FALSE : bfa_itnim_get_reqq(ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002648
2649 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2650}
2651
Jing Huang5fbe25c2010-10-18 17:17:23 -07002652/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002653 * Driver I/O abort request.
2654 */
2655bfa_status_t
2656bfa_ioim_abort(struct bfa_ioim_s *ioim)
2657{
2658
2659 bfa_trc(ioim->bfa, ioim->iotag);
2660
2661 if (!bfa_ioim_is_abortable(ioim))
2662 return BFA_STATUS_FAILED;
2663
2664 bfa_stats(ioim->itnim, io_aborts);
2665 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2666
2667 return BFA_STATUS_OK;
2668}
2669
Jing Huang5fbe25c2010-10-18 17:17:23 -07002670/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002671 * BFA TSKIM state machine functions
2672 */
2673
Jing Huang5fbe25c2010-10-18 17:17:23 -07002674/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002675 * Task management command beginning state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002676 */
2677static void
2678bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2679{
2680 bfa_trc(tskim->bfa, event);
2681
2682 switch (event) {
2683 case BFA_TSKIM_SM_START:
2684 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2685 bfa_tskim_gather_ios(tskim);
2686
Jing Huang5fbe25c2010-10-18 17:17:23 -07002687 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002688 * If device is offline, do not send TM on wire. Just cleanup
2689 * any pending IO requests and complete TM request.
2690 */
2691 if (!bfa_itnim_is_online(tskim->itnim)) {
2692 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2693 tskim->tsk_status = BFI_TSKIM_STS_OK;
2694 bfa_tskim_cleanup_ios(tskim);
2695 return;
2696 }
2697
2698 if (!bfa_tskim_send(tskim)) {
2699 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2700 bfa_stats(tskim->itnim, tm_qwait);
2701 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2702 &tskim->reqq_wait);
2703 }
2704 break;
2705
2706 default:
2707 bfa_sm_fault(tskim->bfa, event);
2708 }
2709}
2710
Jing Huang5fbe25c2010-10-18 17:17:23 -07002711/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002712 * TM command is active, awaiting completion from firmware to
2713 * cleanup IO requests in TM scope.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002714 */
2715static void
2716bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2717{
2718 bfa_trc(tskim->bfa, event);
2719
2720 switch (event) {
2721 case BFA_TSKIM_SM_DONE:
2722 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2723 bfa_tskim_cleanup_ios(tskim);
2724 break;
2725
2726 case BFA_TSKIM_SM_CLEANUP:
2727 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2728 if (!bfa_tskim_send_abort(tskim)) {
2729 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2730 bfa_stats(tskim->itnim, tm_qwait);
2731 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2732 &tskim->reqq_wait);
2733 }
2734 break;
2735
2736 case BFA_TSKIM_SM_HWFAIL:
2737 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2738 bfa_tskim_iocdisable_ios(tskim);
2739 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2740 break;
2741
2742 default:
2743 bfa_sm_fault(tskim->bfa, event);
2744 }
2745}
2746
Jing Huang5fbe25c2010-10-18 17:17:23 -07002747/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002748 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2749 * completion event from firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002750 */
2751static void
2752bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2753{
2754 bfa_trc(tskim->bfa, event);
2755
2756 switch (event) {
2757 case BFA_TSKIM_SM_DONE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002758 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002759 * Ignore and wait for ABORT completion from firmware.
2760 */
2761 break;
2762
2763 case BFA_TSKIM_SM_CLEANUP_DONE:
2764 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2765 bfa_tskim_cleanup_ios(tskim);
2766 break;
2767
2768 case BFA_TSKIM_SM_HWFAIL:
2769 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2770 bfa_tskim_iocdisable_ios(tskim);
2771 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2772 break;
2773
2774 default:
2775 bfa_sm_fault(tskim->bfa, event);
2776 }
2777}
2778
2779static void
2780bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2781{
2782 bfa_trc(tskim->bfa, event);
2783
2784 switch (event) {
2785 case BFA_TSKIM_SM_IOS_DONE:
2786 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2787 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2788 break;
2789
2790 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002791 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002792 * Ignore, TM command completed on wire.
2793 * Notify TM conmpletion on IO cleanup completion.
2794 */
2795 break;
2796
2797 case BFA_TSKIM_SM_HWFAIL:
2798 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2799 bfa_tskim_iocdisable_ios(tskim);
2800 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2801 break;
2802
2803 default:
2804 bfa_sm_fault(tskim->bfa, event);
2805 }
2806}
2807
Jing Huang5fbe25c2010-10-18 17:17:23 -07002808/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002809 * Task management command is waiting for room in request CQ
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002810 */
2811static void
2812bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2813{
2814 bfa_trc(tskim->bfa, event);
2815
2816 switch (event) {
2817 case BFA_TSKIM_SM_QRESUME:
2818 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2819 bfa_tskim_send(tskim);
2820 break;
2821
2822 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002823 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002824 * No need to send TM on wire since ITN is offline.
2825 */
2826 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2827 bfa_reqq_wcancel(&tskim->reqq_wait);
2828 bfa_tskim_cleanup_ios(tskim);
2829 break;
2830
2831 case BFA_TSKIM_SM_HWFAIL:
2832 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2833 bfa_reqq_wcancel(&tskim->reqq_wait);
2834 bfa_tskim_iocdisable_ios(tskim);
2835 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2836 break;
2837
2838 default:
2839 bfa_sm_fault(tskim->bfa, event);
2840 }
2841}
2842
Jing Huang5fbe25c2010-10-18 17:17:23 -07002843/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002844 * Task management command is active, awaiting for room in request CQ
2845 * to send clean up request.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002846 */
2847static void
2848bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2849 enum bfa_tskim_event event)
2850{
2851 bfa_trc(tskim->bfa, event);
2852
2853 switch (event) {
2854 case BFA_TSKIM_SM_DONE:
2855 bfa_reqq_wcancel(&tskim->reqq_wait);
Jing Huang5fbe25c2010-10-18 17:17:23 -07002856 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002857 * Fall through !!!
2858 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002859 case BFA_TSKIM_SM_QRESUME:
2860 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2861 bfa_tskim_send_abort(tskim);
2862 break;
2863
2864 case BFA_TSKIM_SM_HWFAIL:
2865 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2866 bfa_reqq_wcancel(&tskim->reqq_wait);
2867 bfa_tskim_iocdisable_ios(tskim);
2868 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2869 break;
2870
2871 default:
2872 bfa_sm_fault(tskim->bfa, event);
2873 }
2874}
2875
Jing Huang5fbe25c2010-10-18 17:17:23 -07002876/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002877 * BFA callback is pending
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002878 */
2879static void
2880bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2881{
2882 bfa_trc(tskim->bfa, event);
2883
2884 switch (event) {
2885 case BFA_TSKIM_SM_HCB:
2886 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2887 bfa_tskim_free(tskim);
2888 break;
2889
2890 case BFA_TSKIM_SM_CLEANUP:
2891 bfa_tskim_notify_comp(tskim);
2892 break;
2893
2894 case BFA_TSKIM_SM_HWFAIL:
2895 break;
2896
2897 default:
2898 bfa_sm_fault(tskim->bfa, event);
2899 }
2900}
2901
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002902static void
2903__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2904{
2905 struct bfa_tskim_s *tskim = cbarg;
2906
2907 if (!complete) {
2908 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2909 return;
2910 }
2911
2912 bfa_stats(tskim->itnim, tm_success);
2913 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2914}
2915
2916static void
2917__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2918{
2919 struct bfa_tskim_s *tskim = cbarg;
2920
2921 if (!complete) {
2922 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2923 return;
2924 }
2925
2926 bfa_stats(tskim->itnim, tm_failures);
2927 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2928 BFI_TSKIM_STS_FAILED);
2929}
2930
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002931static bfa_boolean_t
Maggie Zhangf3148782010-12-09 19:11:39 -08002932bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002933{
2934 switch (tskim->tm_cmnd) {
2935 case FCP_TM_TARGET_RESET:
2936 return BFA_TRUE;
2937
2938 case FCP_TM_ABORT_TASK_SET:
2939 case FCP_TM_CLEAR_TASK_SET:
2940 case FCP_TM_LUN_RESET:
2941 case FCP_TM_CLEAR_ACA:
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002942 return !memcmp(&tskim->lun, &lun, sizeof(lun));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002943
2944 default:
2945 bfa_assert(0);
2946 }
2947
2948 return BFA_FALSE;
2949}
2950
Jing Huang5fbe25c2010-10-18 17:17:23 -07002951/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002952 * Gather affected IO requests and task management commands.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002953 */
2954static void
2955bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2956{
2957 struct bfa_itnim_s *itnim = tskim->itnim;
2958 struct bfa_ioim_s *ioim;
Maggie Zhangf3148782010-12-09 19:11:39 -08002959 struct list_head *qe, *qen;
2960 struct scsi_cmnd *cmnd;
2961 struct scsi_lun scsilun;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002962
2963 INIT_LIST_HEAD(&tskim->io_q);
2964
Jing Huang5fbe25c2010-10-18 17:17:23 -07002965 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002966 * Gather any active IO requests first.
2967 */
2968 list_for_each_safe(qe, qen, &itnim->io_q) {
2969 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08002970 cmnd = (struct scsi_cmnd *) ioim->dio;
2971 int_to_scsilun(cmnd->device->lun, &scsilun);
2972 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002973 list_del(&ioim->qe);
2974 list_add_tail(&ioim->qe, &tskim->io_q);
2975 }
2976 }
2977
Jing Huang5fbe25c2010-10-18 17:17:23 -07002978 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002979 * Failback any pending IO requests immediately.
2980 */
2981 list_for_each_safe(qe, qen, &itnim->pending_q) {
2982 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08002983 cmnd = (struct scsi_cmnd *) ioim->dio;
2984 int_to_scsilun(cmnd->device->lun, &scsilun);
2985 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002986 list_del(&ioim->qe);
2987 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2988 bfa_ioim_tov(ioim);
2989 }
2990 }
2991}
2992
Jing Huang5fbe25c2010-10-18 17:17:23 -07002993/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002994 * IO cleanup completion
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002995 */
2996static void
2997bfa_tskim_cleanp_comp(void *tskim_cbarg)
2998{
2999 struct bfa_tskim_s *tskim = tskim_cbarg;
3000
3001 bfa_stats(tskim->itnim, tm_io_comps);
3002 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3003}
3004
Jing Huang5fbe25c2010-10-18 17:17:23 -07003005/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003006 * Gather affected IO requests and task management commands.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003007 */
3008static void
3009bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3010{
3011 struct bfa_ioim_s *ioim;
3012 struct list_head *qe, *qen;
3013
3014 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3015
3016 list_for_each_safe(qe, qen, &tskim->io_q) {
3017 ioim = (struct bfa_ioim_s *) qe;
3018 bfa_wc_up(&tskim->wc);
3019 bfa_ioim_cleanup_tm(ioim, tskim);
3020 }
3021
3022 bfa_wc_wait(&tskim->wc);
3023}
3024
Jing Huang5fbe25c2010-10-18 17:17:23 -07003025/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003026 * Send task management request to firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003027 */
3028static bfa_boolean_t
3029bfa_tskim_send(struct bfa_tskim_s *tskim)
3030{
3031 struct bfa_itnim_s *itnim = tskim->itnim;
3032 struct bfi_tskim_req_s *m;
3033
Jing Huang5fbe25c2010-10-18 17:17:23 -07003034 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003035 * check for room in queue to send request now
3036 */
3037 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3038 if (!m)
3039 return BFA_FALSE;
3040
Jing Huang5fbe25c2010-10-18 17:17:23 -07003041 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003042 * build i/o request message next
3043 */
3044 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3045 bfa_lpuid(tskim->bfa));
3046
Jing Huangba816ea2010-10-18 17:10:50 -07003047 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003048 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3049 m->t_secs = tskim->tsecs;
3050 m->lun = tskim->lun;
3051 m->tm_flags = tskim->tm_cmnd;
3052
Jing Huang5fbe25c2010-10-18 17:17:23 -07003053 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003054 * queue I/O message to firmware
3055 */
3056 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3057 return BFA_TRUE;
3058}
3059
Jing Huang5fbe25c2010-10-18 17:17:23 -07003060/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003061 * Send abort request to cleanup an active TM to firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003062 */
3063static bfa_boolean_t
3064bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3065{
3066 struct bfa_itnim_s *itnim = tskim->itnim;
3067 struct bfi_tskim_abortreq_s *m;
3068
Jing Huang5fbe25c2010-10-18 17:17:23 -07003069 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003070 * check for room in queue to send request now
3071 */
3072 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3073 if (!m)
3074 return BFA_FALSE;
3075
Jing Huang5fbe25c2010-10-18 17:17:23 -07003076 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003077 * build i/o request message next
3078 */
3079 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3080 bfa_lpuid(tskim->bfa));
3081
Jing Huangba816ea2010-10-18 17:10:50 -07003082 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003083
Jing Huang5fbe25c2010-10-18 17:17:23 -07003084 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003085 * queue I/O message to firmware
3086 */
3087 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3088 return BFA_TRUE;
3089}
3090
Jing Huang5fbe25c2010-10-18 17:17:23 -07003091/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003092 * Call to resume task management cmnd waiting for room in request queue.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003093 */
3094static void
3095bfa_tskim_qresume(void *cbarg)
3096{
3097 struct bfa_tskim_s *tskim = cbarg;
3098
3099 bfa_stats(tskim->itnim, tm_qresumes);
3100 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3101}
3102
Jing Huang5fbe25c2010-10-18 17:17:23 -07003103/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003104 * Cleanup IOs associated with a task mangement command on IOC failures.
3105 */
3106static void
3107bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3108{
3109 struct bfa_ioim_s *ioim;
3110 struct list_head *qe, *qen;
3111
3112 list_for_each_safe(qe, qen, &tskim->io_q) {
3113 ioim = (struct bfa_ioim_s *) qe;
3114 bfa_ioim_iocdisable(ioim);
3115 }
3116}
3117
Jing Huang5fbe25c2010-10-18 17:17:23 -07003118/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003119 * Notification on completions from related ioim.
3120 */
3121void
3122bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3123{
3124 bfa_wc_down(&tskim->wc);
3125}
3126
Jing Huang5fbe25c2010-10-18 17:17:23 -07003127/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003128 * Handle IOC h/w failure notification from itnim.
3129 */
3130void
3131bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3132{
3133 tskim->notify = BFA_FALSE;
3134 bfa_stats(tskim->itnim, tm_iocdowns);
3135 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3136}
3137
Jing Huang5fbe25c2010-10-18 17:17:23 -07003138/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003139 * Cleanup TM command and associated IOs as part of ITNIM offline.
3140 */
3141void
3142bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3143{
3144 tskim->notify = BFA_TRUE;
3145 bfa_stats(tskim->itnim, tm_cleanups);
3146 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3147}
3148
Jing Huang5fbe25c2010-10-18 17:17:23 -07003149/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003150 * Memory allocation and initialization.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003151 */
3152void
3153bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3154{
3155 struct bfa_tskim_s *tskim;
3156 u16 i;
3157
3158 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3159
3160 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3161 fcpim->tskim_arr = tskim;
3162
3163 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3164 /*
3165 * initialize TSKIM
3166 */
Jing Huang6a18b162010-10-18 17:08:54 -07003167 memset(tskim, 0, sizeof(struct bfa_tskim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003168 tskim->tsk_tag = i;
3169 tskim->bfa = fcpim->bfa;
3170 tskim->fcpim = fcpim;
3171 tskim->notify = BFA_FALSE;
3172 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3173 tskim);
3174 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3175
3176 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3177 }
3178
3179 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3180}
3181
3182void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003183bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3184{
3185 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3186 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3187 struct bfa_tskim_s *tskim;
Jing Huangba816ea2010-10-18 17:10:50 -07003188 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003189
3190 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3191 bfa_assert(tskim->tsk_tag == tsk_tag);
3192
3193 tskim->tsk_status = rsp->tsk_status;
3194
Jing Huang5fbe25c2010-10-18 17:17:23 -07003195 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003196 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3197 * requests. All other statuses are for normal completions.
3198 */
3199 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3200 bfa_stats(tskim->itnim, tm_cleanup_comps);
3201 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3202 } else {
3203 bfa_stats(tskim->itnim, tm_fw_rsps);
3204 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3205 }
3206}
3207
3208
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003209struct bfa_tskim_s *
3210bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3211{
3212 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3213 struct bfa_tskim_s *tskim;
3214
3215 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3216
3217 if (tskim)
3218 tskim->dtsk = dtsk;
3219
3220 return tskim;
3221}
3222
3223void
3224bfa_tskim_free(struct bfa_tskim_s *tskim)
3225{
3226 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3227 list_del(&tskim->qe);
3228 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3229}
3230
Jing Huang5fbe25c2010-10-18 17:17:23 -07003231/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003232 * Start a task management command.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003233 *
3234 * @param[in] tskim BFA task management command instance
3235 * @param[in] itnim i-t nexus for the task management command
3236 * @param[in] lun lun, if applicable
3237 * @param[in] tm_cmnd Task management command code.
3238 * @param[in] t_secs Timeout in seconds
3239 *
3240 * @return None.
3241 */
3242void
Maggie Zhangf3148782010-12-09 19:11:39 -08003243bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3244 struct scsi_lun lun,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003245 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3246{
3247 tskim->itnim = itnim;
3248 tskim->lun = lun;
3249 tskim->tm_cmnd = tm_cmnd;
3250 tskim->tsecs = tsecs;
3251 tskim->notify = BFA_FALSE;
3252 bfa_stats(itnim, tm_cmnds);
3253
3254 list_add_tail(&tskim->qe, &itnim->tsk_q);
3255 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3256}