Merge commit 'v2.6.34-rc2' into perf/core
[linux-2.6-block.git] / drivers / scsi / bfa / bfa_tskim.c
CommitLineData
7725ccfd
JH
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_cb_ioim_macros.h>
20
21BFA_TRC_FILE(HAL, TSKIM);
22
23/**
24 * task management completion handling
25 */
f8ceafde
JH
26#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
27 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \
28 __cbfn, (__tskim)); \
7725ccfd
JH
29 bfa_tskim_notify_comp(__tskim); \
30} while (0)
31
f8ceafde
JH
32#define bfa_tskim_notify_comp(__tskim) do { \
33 if ((__tskim)->notify) \
7725ccfd
JH
34 bfa_itnim_tskdone((__tskim)->itnim); \
35} while (0)
36
37/*
38 * forward declarations
39 */
40static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
41static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
42static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
43 lun_t lun);
44static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
45static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
46static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
47static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
48static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
49static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
50
51/**
52 * bfa_tskim_sm
53 */
54
55enum bfa_tskim_event {
56 BFA_TSKIM_SM_START = 1, /* TM command start */
57 BFA_TSKIM_SM_DONE = 2, /* TM completion */
58 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
59 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
60 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
61 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
62 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
63 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
64};
65
66static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
67 enum bfa_tskim_event event);
68static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
69 enum bfa_tskim_event event);
70static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
71 enum bfa_tskim_event event);
72static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
73 enum bfa_tskim_event event);
74static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
75 enum bfa_tskim_event event);
76static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
77 enum bfa_tskim_event event);
78static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
79 enum bfa_tskim_event event);
80
81/**
82 * Task management command beginning state.
83 */
84static void
85bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
86{
87 bfa_trc(tskim->bfa, event);
88
89 switch (event) {
90 case BFA_TSKIM_SM_START:
91 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
92 bfa_tskim_gather_ios(tskim);
93
94 /**
95 * If device is offline, do not send TM on wire. Just cleanup
96 * any pending IO requests and complete TM request.
97 */
98 if (!bfa_itnim_is_online(tskim->itnim)) {
99 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
100 tskim->tsk_status = BFI_TSKIM_STS_OK;
101 bfa_tskim_cleanup_ios(tskim);
102 return;
103 }
104
105 if (!bfa_tskim_send(tskim)) {
106 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
107 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
108 &tskim->reqq_wait);
109 }
110 break;
111
112 default:
e641de37 113 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
114 }
115}
116
117/**
118 * brief
119 * TM command is active, awaiting completion from firmware to
120 * cleanup IO requests in TM scope.
121 */
122static void
123bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
124{
125 bfa_trc(tskim->bfa, event);
126
127 switch (event) {
128 case BFA_TSKIM_SM_DONE:
129 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
130 bfa_tskim_cleanup_ios(tskim);
131 break;
132
133 case BFA_TSKIM_SM_CLEANUP:
134 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
135 if (!bfa_tskim_send_abort(tskim)) {
136 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
137 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
138 &tskim->reqq_wait);
139 }
140 break;
141
142 case BFA_TSKIM_SM_HWFAIL:
143 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
144 bfa_tskim_iocdisable_ios(tskim);
145 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
146 break;
147
148 default:
e641de37 149 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
150 }
151}
152
153/**
154 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
155 * completion event from firmware.
156 */
157static void
158bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
159{
160 bfa_trc(tskim->bfa, event);
161
162 switch (event) {
163 case BFA_TSKIM_SM_DONE:
164 /**
165 * Ignore and wait for ABORT completion from firmware.
166 */
167 break;
168
169 case BFA_TSKIM_SM_CLEANUP_DONE:
170 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
171 bfa_tskim_cleanup_ios(tskim);
172 break;
173
174 case BFA_TSKIM_SM_HWFAIL:
175 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
176 bfa_tskim_iocdisable_ios(tskim);
177 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
178 break;
179
180 default:
e641de37 181 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
182 }
183}
184
185static void
186bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
187{
188 bfa_trc(tskim->bfa, event);
189
190 switch (event) {
191 case BFA_TSKIM_SM_IOS_DONE:
192 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
193 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
194 break;
195
196 case BFA_TSKIM_SM_CLEANUP:
197 /**
198 * Ignore, TM command completed on wire.
199 * Notify TM conmpletion on IO cleanup completion.
200 */
201 break;
202
203 case BFA_TSKIM_SM_HWFAIL:
204 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
205 bfa_tskim_iocdisable_ios(tskim);
206 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
207 break;
208
209 default:
e641de37 210 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
211 }
212}
213
214/**
215 * Task management command is waiting for room in request CQ
216 */
217static void
218bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
219{
220 bfa_trc(tskim->bfa, event);
221
222 switch (event) {
223 case BFA_TSKIM_SM_QRESUME:
224 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
225 bfa_tskim_send(tskim);
226 break;
227
228 case BFA_TSKIM_SM_CLEANUP:
229 /**
230 * No need to send TM on wire since ITN is offline.
231 */
232 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
233 bfa_reqq_wcancel(&tskim->reqq_wait);
234 bfa_tskim_cleanup_ios(tskim);
235 break;
236
237 case BFA_TSKIM_SM_HWFAIL:
238 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
239 bfa_reqq_wcancel(&tskim->reqq_wait);
240 bfa_tskim_iocdisable_ios(tskim);
241 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
242 break;
243
244 default:
e641de37 245 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
246 }
247}
248
249/**
250 * Task management command is active, awaiting for room in request CQ
251 * to send clean up request.
252 */
253static void
254bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
255 enum bfa_tskim_event event)
256{
257 bfa_trc(tskim->bfa, event);
258
259 switch (event) {
260 case BFA_TSKIM_SM_DONE:
261 bfa_reqq_wcancel(&tskim->reqq_wait);
262 /**
263 *
264 * Fall through !!!
265 */
266
267 case BFA_TSKIM_SM_QRESUME:
268 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
269 bfa_tskim_send_abort(tskim);
270 break;
271
272 case BFA_TSKIM_SM_HWFAIL:
273 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
274 bfa_reqq_wcancel(&tskim->reqq_wait);
275 bfa_tskim_iocdisable_ios(tskim);
276 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
277 break;
278
279 default:
e641de37 280 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
281 }
282}
283
284/**
285 * BFA callback is pending
286 */
287static void
288bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
289{
290 bfa_trc(tskim->bfa, event);
291
292 switch (event) {
293 case BFA_TSKIM_SM_HCB:
294 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
295 bfa_tskim_free(tskim);
296 break;
297
298 case BFA_TSKIM_SM_CLEANUP:
299 bfa_tskim_notify_comp(tskim);
300 break;
301
302 case BFA_TSKIM_SM_HWFAIL:
303 break;
304
305 default:
e641de37 306 bfa_sm_fault(tskim->bfa, event);
7725ccfd
JH
307 }
308}
309
310
311
312/**
313 * bfa_tskim_private
314 */
315
316static void
317__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
318{
319 struct bfa_tskim_s *tskim = cbarg;
320
321 if (!complete) {
322 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
323 return;
324 }
325
326 bfa_stats(tskim->itnim, tm_success);
327 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
328}
329
330static void
331__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
332{
333 struct bfa_tskim_s *tskim = cbarg;
334
335 if (!complete) {
336 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
337 return;
338 }
339
340 bfa_stats(tskim->itnim, tm_failures);
341 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
342 BFI_TSKIM_STS_FAILED);
343}
344
345static bfa_boolean_t
346bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
347{
348 switch (tskim->tm_cmnd) {
349 case FCP_TM_TARGET_RESET:
350 return BFA_TRUE;
351
352 case FCP_TM_ABORT_TASK_SET:
353 case FCP_TM_CLEAR_TASK_SET:
354 case FCP_TM_LUN_RESET:
355 case FCP_TM_CLEAR_ACA:
356 return (tskim->lun == lun);
357
358 default:
359 bfa_assert(0);
360 }
361
362 return BFA_FALSE;
363}
364
365/**
366 * Gather affected IO requests and task management commands.
367 */
368static void
369bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
370{
371 struct bfa_itnim_s *itnim = tskim->itnim;
372 struct bfa_ioim_s *ioim;
373 struct list_head *qe, *qen;
374
375 INIT_LIST_HEAD(&tskim->io_q);
376
377 /**
378 * Gather any active IO requests first.
379 */
380 list_for_each_safe(qe, qen, &itnim->io_q) {
381 ioim = (struct bfa_ioim_s *) qe;
382 if (bfa_tskim_match_scope
383 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
384 list_del(&ioim->qe);
385 list_add_tail(&ioim->qe, &tskim->io_q);
386 }
387 }
388
389 /**
390 * Failback any pending IO requests immediately.
391 */
392 list_for_each_safe(qe, qen, &itnim->pending_q) {
393 ioim = (struct bfa_ioim_s *) qe;
394 if (bfa_tskim_match_scope
395 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
396 list_del(&ioim->qe);
397 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
398 bfa_ioim_tov(ioim);
399 }
400 }
401}
402
403/**
404 * IO cleanup completion
405 */
406static void
407bfa_tskim_cleanp_comp(void *tskim_cbarg)
408{
409 struct bfa_tskim_s *tskim = tskim_cbarg;
410
411 bfa_stats(tskim->itnim, tm_io_comps);
412 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
413}
414
415/**
416 * Gather affected IO requests and task management commands.
417 */
418static void
419bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
420{
421 struct bfa_ioim_s *ioim;
422 struct list_head *qe, *qen;
423
424 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
425
426 list_for_each_safe(qe, qen, &tskim->io_q) {
427 ioim = (struct bfa_ioim_s *) qe;
428 bfa_wc_up(&tskim->wc);
429 bfa_ioim_cleanup_tm(ioim, tskim);
430 }
431
432 bfa_wc_wait(&tskim->wc);
433}
434
435/**
436 * Send task management request to firmware.
437 */
438static bfa_boolean_t
439bfa_tskim_send(struct bfa_tskim_s *tskim)
440{
441 struct bfa_itnim_s *itnim = tskim->itnim;
442 struct bfi_tskim_req_s *m;
443
444 /**
445 * check for room in queue to send request now
446 */
447 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
448 if (!m)
449 return BFA_FALSE;
450
451 /**
452 * build i/o request message next
453 */
454 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
455 bfa_lpuid(tskim->bfa));
456
457 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
458 m->itn_fhdl = tskim->itnim->rport->fw_handle;
459 m->t_secs = tskim->tsecs;
460 m->lun = tskim->lun;
461 m->tm_flags = tskim->tm_cmnd;
462
463 /**
464 * queue I/O message to firmware
465 */
466 bfa_reqq_produce(tskim->bfa, itnim->reqq);
467 return BFA_TRUE;
468}
469
470/**
471 * Send abort request to cleanup an active TM to firmware.
472 */
473static bfa_boolean_t
474bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
475{
476 struct bfa_itnim_s *itnim = tskim->itnim;
477 struct bfi_tskim_abortreq_s *m;
478
479 /**
480 * check for room in queue to send request now
481 */
482 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
483 if (!m)
484 return BFA_FALSE;
485
486 /**
487 * build i/o request message next
488 */
489 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
490 bfa_lpuid(tskim->bfa));
491
492 m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
493
494 /**
495 * queue I/O message to firmware
496 */
497 bfa_reqq_produce(tskim->bfa, itnim->reqq);
498 return BFA_TRUE;
499}
500
501/**
502 * Call to resume task management cmnd waiting for room in request queue.
503 */
504static void
505bfa_tskim_qresume(void *cbarg)
506{
507 struct bfa_tskim_s *tskim = cbarg;
508
509 bfa_fcpim_stats(tskim->fcpim, qresumes);
510 bfa_stats(tskim->itnim, tm_qresumes);
511 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
512}
513
514/**
515 * Cleanup IOs associated with a task mangement command on IOC failures.
516 */
517static void
518bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
519{
520 struct bfa_ioim_s *ioim;
521 struct list_head *qe, *qen;
522
523 list_for_each_safe(qe, qen, &tskim->io_q) {
524 ioim = (struct bfa_ioim_s *) qe;
525 bfa_ioim_iocdisable(ioim);
526 }
527}
528
529
530
531/**
532 * bfa_tskim_friend
533 */
534
535/**
536 * Notification on completions from related ioim.
537 */
538void
539bfa_tskim_iodone(struct bfa_tskim_s *tskim)
540{
541 bfa_wc_down(&tskim->wc);
542}
543
544/**
545 * Handle IOC h/w failure notification from itnim.
546 */
547void
548bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
549{
550 tskim->notify = BFA_FALSE;
551 bfa_stats(tskim->itnim, tm_iocdowns);
552 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
553}
554
555/**
556 * Cleanup TM command and associated IOs as part of ITNIM offline.
557 */
558void
559bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
560{
561 tskim->notify = BFA_TRUE;
562 bfa_stats(tskim->itnim, tm_cleanups);
563 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
564}
565
566/**
567 * Memory allocation and initialization.
568 */
569void
570bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
571{
572 struct bfa_tskim_s *tskim;
573 u16 i;
574
575 INIT_LIST_HEAD(&fcpim->tskim_free_q);
576
577 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
578 fcpim->tskim_arr = tskim;
579
580 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
581 /*
582 * initialize TSKIM
583 */
584 bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
585 tskim->tsk_tag = i;
586 tskim->bfa = fcpim->bfa;
587 tskim->fcpim = fcpim;
588 tskim->notify = BFA_FALSE;
589 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
590 tskim);
591 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
592
593 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
594 }
595
596 bfa_meminfo_kva(minfo) = (u8 *) tskim;
597}
598
599void
600bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
601{
602 /**
603 * @todo
604 */
605}
606
607void
608bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
609{
610 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
611 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
612 struct bfa_tskim_s *tskim;
613 u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
614
615 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
616 bfa_assert(tskim->tsk_tag == tsk_tag);
617
618 tskim->tsk_status = rsp->tsk_status;
619
620 /**
621 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
622 * requests. All other statuses are for normal completions.
623 */
624 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
625 bfa_stats(tskim->itnim, tm_cleanup_comps);
626 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
627 } else {
628 bfa_stats(tskim->itnim, tm_fw_rsps);
629 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
630 }
631}
632
633
634
635/**
636 * bfa_tskim_api
637 */
638
639
640struct bfa_tskim_s *
641bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
642{
643 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
644 struct bfa_tskim_s *tskim;
645
646 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
647
648 if (!tskim)
649 bfa_fcpim_stats(fcpim, no_tskims);
650 else
651 tskim->dtsk = dtsk;
652
653 return tskim;
654}
655
656void
657bfa_tskim_free(struct bfa_tskim_s *tskim)
658{
659 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
660 list_del(&tskim->qe);
661 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
662}
663
664/**
665 * Start a task management command.
666 *
667 * @param[in] tskim BFA task management command instance
668 * @param[in] itnim i-t nexus for the task management command
669 * @param[in] lun lun, if applicable
670 * @param[in] tm_cmnd Task management command code.
671 * @param[in] t_secs Timeout in seconds
672 *
673 * @return None.
674 */
675void
676bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
677 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
678{
679 tskim->itnim = itnim;
680 tskim->lun = lun;
681 tskim->tm_cmnd = tm_cmnd;
682 tskim->tsecs = tsecs;
683 tskim->notify = BFA_FALSE;
684 bfa_stats(itnim, tm_cmnds);
685
686 list_add_tail(&tskim->qe, &itnim->tsk_q);
687 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
688}
689
690