1 /* bnx2fc_io.c: QLogic Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
4 * Copyright (c) 2008-2013 Broadcom Corporation
5 * Copyright (c) 2014-2016 QLogic Corporation
6 * Copyright (c) 2016-2017 Cavium Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
17 #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
19 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
21 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
22 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
23 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
26 struct fcoe_fcp_rsp_payload *fcp_rsp,
29 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30 unsigned int timer_msec)
32 struct bnx2fc_interface *interface = io_req->port->priv;
34 if (queue_delayed_work(interface->timer_work_queue,
35 &io_req->timeout_work,
36 msecs_to_jiffies(timer_msec)))
37 kref_get(&io_req->refcount);
40 static void bnx2fc_cmd_timeout(struct work_struct *work)
42 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
44 u8 cmd_type = io_req->cmd_type;
45 struct bnx2fc_rport *tgt = io_req->tgt;
48 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
49 "req_flags = %lx\n", cmd_type, io_req->req_flags);
51 spin_lock_bh(&tgt->tgt_lock);
52 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
53 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
55 * ideally we should hold the io_req until RRQ complets,
56 * and release io_req from timeout hold.
58 spin_unlock_bh(&tgt->tgt_lock);
59 bnx2fc_send_rrq(io_req);
62 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
63 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
69 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
70 &io_req->req_flags)) {
71 /* Handle eh_abort timeout */
72 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
73 complete(&io_req->abts_done);
74 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
75 &io_req->req_flags)) {
76 /* Handle internally generated ABTS timeout */
77 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
78 kref_read(&io_req->refcount));
79 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
80 &io_req->req_flags))) {
82 * Cleanup and return original command to
85 bnx2fc_initiate_cleanup(io_req);
86 kref_put(&io_req->refcount, bnx2fc_cmd_release);
87 spin_unlock_bh(&tgt->tgt_lock);
92 /* Hanlde IO timeout */
93 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
94 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
95 &io_req->req_flags)) {
96 BNX2FC_IO_DBG(io_req, "IO completed before "
101 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
102 &io_req->req_flags)) {
103 rc = bnx2fc_initiate_abts(io_req);
107 kref_put(&io_req->refcount, bnx2fc_cmd_release);
108 spin_unlock_bh(&tgt->tgt_lock);
112 BNX2FC_IO_DBG(io_req, "IO already in "
113 "ABTS processing\n");
119 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
120 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
122 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
123 &io_req->req_flags)) {
124 kref_put(&io_req->refcount, bnx2fc_cmd_release);
125 spin_unlock_bh(&tgt->tgt_lock);
131 * Handle ELS timeout.
132 * tgt_lock is used to sync compl path and timeout
133 * path. If els compl path is processing this IO, we
134 * have nothing to do here, just release the timer hold
136 BNX2FC_IO_DBG(io_req, "ELS timed out\n");
137 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
141 /* Indicate the cb_func that this ELS is timed out */
142 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
144 if ((io_req->cb_func) && (io_req->cb_arg)) {
145 io_req->cb_func(io_req->cb_arg);
146 io_req->cb_arg = NULL;
151 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
157 /* release the cmd that was held when timer was set */
158 kref_put(&io_req->refcount, bnx2fc_cmd_release);
159 spin_unlock_bh(&tgt->tgt_lock);
162 static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
164 /* Called with host lock held */
165 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
168 * active_cmd_queue may have other command types as well,
169 * and during flush operation, we want to error back only
172 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
175 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
176 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
177 /* Do not call scsi done for this IO */
181 bnx2fc_unmap_sg_list(io_req);
182 io_req->sc_cmd = NULL;
184 /* Sanity checks before returning command to mid-layer */
186 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
187 "IO(0x%x) already cleaned up\n",
191 if (!sc_cmd->device) {
192 pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid);
195 if (!sc_cmd->device->host) {
196 pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n",
201 sc_cmd->result = err_code << 16;
203 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
204 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
206 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
207 sc_cmd->SCp.ptr = NULL;
208 sc_cmd->scsi_done(sc_cmd);
211 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
213 struct bnx2fc_cmd_mgr *cmgr;
214 struct io_bdt *bdt_info;
215 struct bnx2fc_cmd *io_req;
220 int num_ios, num_pri_ios;
222 int arr_sz = num_possible_cpus() + 1;
223 u16 min_xid = BNX2FC_MIN_XID;
224 u16 max_xid = hba->max_xid;
226 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
227 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
228 and max_xid 0x%x\n", min_xid, max_xid);
231 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
233 num_ios = max_xid - min_xid + 1;
234 len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
235 len += sizeof(struct bnx2fc_cmd_mgr);
237 cmgr = kzalloc(len, GFP_KERNEL);
239 printk(KERN_ERR PFX "failed to alloc cmgr\n");
244 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
246 if (!cmgr->free_list) {
247 printk(KERN_ERR PFX "failed to alloc free_list\n");
251 cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock),
253 if (!cmgr->free_list_lock) {
254 printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
255 kfree(cmgr->free_list);
256 cmgr->free_list = NULL;
260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
262 for (i = 0; i < arr_sz; i++) {
263 INIT_LIST_HEAD(&cmgr->free_list[i]);
264 spin_lock_init(&cmgr->free_list_lock[i]);
268 * Pre-allocated pool of bnx2fc_cmds.
269 * Last entry in the free list array is the free list
270 * of slow path requests.
272 xid = BNX2FC_MIN_XID;
273 num_pri_ios = num_ios - hba->elstm_xids;
274 for (i = 0; i < num_ios; i++) {
275 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
278 printk(KERN_ERR PFX "failed to alloc io_req\n");
282 INIT_LIST_HEAD(&io_req->link);
283 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
287 list_add_tail(&io_req->link,
288 &cmgr->free_list[io_req->xid %
289 num_possible_cpus()]);
291 list_add_tail(&io_req->link,
292 &cmgr->free_list[num_possible_cpus()]);
296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297 mem_size = num_ios * sizeof(struct io_bdt *);
298 cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299 if (!cmgr->io_bdt_pool) {
300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
304 mem_size = sizeof(struct io_bdt);
305 for (i = 0; i < num_ios; i++) {
306 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
307 if (!cmgr->io_bdt_pool[i]) {
308 printk(KERN_ERR PFX "failed to alloc "
309 "io_bdt_pool[%d]\n", i);
314 /* Allocate an map fcoe_bdt_ctx structures */
315 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
316 for (i = 0; i < num_ios; i++) {
317 bdt_info = cmgr->io_bdt_pool[i];
318 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
320 &bdt_info->bd_tbl_dma,
322 if (!bdt_info->bd_tbl) {
323 printk(KERN_ERR PFX "failed to alloc "
332 bnx2fc_cmd_mgr_free(cmgr);
336 void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
338 struct io_bdt *bdt_info;
339 struct bnx2fc_hba *hba = cmgr->hba;
341 u16 min_xid = BNX2FC_MIN_XID;
342 u16 max_xid = hba->max_xid;
346 num_ios = max_xid - min_xid + 1;
348 /* Free fcoe_bdt_ctx structures */
349 if (!cmgr->io_bdt_pool)
352 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
353 for (i = 0; i < num_ios; i++) {
354 bdt_info = cmgr->io_bdt_pool[i];
355 if (bdt_info->bd_tbl) {
356 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
358 bdt_info->bd_tbl_dma);
359 bdt_info->bd_tbl = NULL;
363 /* Destroy io_bdt pool */
364 for (i = 0; i < num_ios; i++) {
365 kfree(cmgr->io_bdt_pool[i]);
366 cmgr->io_bdt_pool[i] = NULL;
369 kfree(cmgr->io_bdt_pool);
370 cmgr->io_bdt_pool = NULL;
373 kfree(cmgr->free_list_lock);
375 /* Destroy cmd pool */
376 if (!cmgr->free_list)
379 for (i = 0; i < num_possible_cpus() + 1; i++) {
380 struct bnx2fc_cmd *tmp, *io_req;
382 list_for_each_entry_safe(io_req, tmp,
383 &cmgr->free_list[i], link) {
384 list_del(&io_req->link);
388 kfree(cmgr->free_list);
390 /* Free command manager itself */
394 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
396 struct fcoe_port *port = tgt->port;
397 struct bnx2fc_interface *interface = port->priv;
398 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
399 struct bnx2fc_cmd *io_req;
400 struct list_head *listp;
401 struct io_bdt *bd_tbl;
402 int index = RESERVE_FREE_LIST_INDEX;
407 max_sqes = tgt->max_sqes;
409 case BNX2FC_TASK_MGMT_CMD:
410 max_sqes = BNX2FC_TM_MAX_SQES;
413 max_sqes = BNX2FC_ELS_MAX_SQES;
420 * NOTE: Free list insertions and deletions are protected with
423 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
424 free_sqes = atomic_read(&tgt->free_sqes);
425 if ((list_empty(&(cmd_mgr->free_list[index]))) ||
426 (tgt->num_active_ios.counter >= max_sqes) ||
427 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
428 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
429 "ios(%d):sqes(%d)\n",
430 tgt->num_active_ios.counter, tgt->max_sqes);
431 if (list_empty(&(cmd_mgr->free_list[index])))
432 printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
433 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
437 listp = (struct list_head *)
438 cmd_mgr->free_list[index].next;
439 list_del_init(listp);
440 io_req = (struct bnx2fc_cmd *) listp;
442 cmd_mgr->cmds[xid] = io_req;
443 atomic_inc(&tgt->num_active_ios);
444 atomic_dec(&tgt->free_sqes);
445 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
447 INIT_LIST_HEAD(&io_req->link);
450 io_req->cmd_mgr = cmd_mgr;
451 io_req->req_flags = 0;
452 io_req->cmd_type = type;
454 /* Bind io_bdt for this io_req */
455 /* Have a static link between io_req and io_bdt_pool */
456 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
457 bd_tbl->io_req = io_req;
459 /* Hold the io_req against deletion */
460 kref_init(&io_req->refcount);
464 struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
466 struct fcoe_port *port = tgt->port;
467 struct bnx2fc_interface *interface = port->priv;
468 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
469 struct bnx2fc_cmd *io_req;
470 struct list_head *listp;
471 struct io_bdt *bd_tbl;
475 int index = get_cpu();
477 max_sqes = BNX2FC_SCSI_MAX_SQES;
479 * NOTE: Free list insertions and deletions are protected with
482 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
483 free_sqes = atomic_read(&tgt->free_sqes);
484 if ((list_empty(&cmd_mgr->free_list[index])) ||
485 (tgt->num_active_ios.counter >= max_sqes) ||
486 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
487 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
492 listp = (struct list_head *)
493 cmd_mgr->free_list[index].next;
494 list_del_init(listp);
495 io_req = (struct bnx2fc_cmd *) listp;
497 cmd_mgr->cmds[xid] = io_req;
498 atomic_inc(&tgt->num_active_ios);
499 atomic_dec(&tgt->free_sqes);
500 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
503 INIT_LIST_HEAD(&io_req->link);
506 io_req->cmd_mgr = cmd_mgr;
507 io_req->req_flags = 0;
509 /* Bind io_bdt for this io_req */
510 /* Have a static link between io_req and io_bdt_pool */
511 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
512 bd_tbl->io_req = io_req;
514 /* Hold the io_req against deletion */
515 kref_init(&io_req->refcount);
519 void bnx2fc_cmd_release(struct kref *ref)
521 struct bnx2fc_cmd *io_req = container_of(ref,
522 struct bnx2fc_cmd, refcount);
523 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
526 if (io_req->cmd_type == BNX2FC_SCSI_CMD)
527 index = io_req->xid % num_possible_cpus();
529 index = RESERVE_FREE_LIST_INDEX;
532 spin_lock_bh(&cmd_mgr->free_list_lock[index]);
533 if (io_req->cmd_type != BNX2FC_SCSI_CMD)
534 bnx2fc_free_mp_resc(io_req);
535 cmd_mgr->cmds[io_req->xid] = NULL;
536 /* Delete IO from retire queue */
537 list_del_init(&io_req->link);
538 /* Add it to the free list */
539 list_add(&io_req->link,
540 &cmd_mgr->free_list[index]);
541 atomic_dec(&io_req->tgt->num_active_ios);
542 spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
546 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
548 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
549 struct bnx2fc_interface *interface = io_req->port->priv;
550 struct bnx2fc_hba *hba = interface->hba;
551 size_t sz = sizeof(struct fcoe_bd_ctx);
554 mp_req->tm_flags = 0;
555 if (mp_req->mp_req_bd) {
556 dma_free_coherent(&hba->pcidev->dev, sz,
558 mp_req->mp_req_bd_dma);
559 mp_req->mp_req_bd = NULL;
561 if (mp_req->mp_resp_bd) {
562 dma_free_coherent(&hba->pcidev->dev, sz,
564 mp_req->mp_resp_bd_dma);
565 mp_req->mp_resp_bd = NULL;
567 if (mp_req->req_buf) {
568 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
570 mp_req->req_buf_dma);
571 mp_req->req_buf = NULL;
573 if (mp_req->resp_buf) {
574 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
576 mp_req->resp_buf_dma);
577 mp_req->resp_buf = NULL;
581 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
583 struct bnx2fc_mp_req *mp_req;
584 struct fcoe_bd_ctx *mp_req_bd;
585 struct fcoe_bd_ctx *mp_resp_bd;
586 struct bnx2fc_interface *interface = io_req->port->priv;
587 struct bnx2fc_hba *hba = interface->hba;
591 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
592 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
594 if (io_req->cmd_type != BNX2FC_ELS) {
595 mp_req->req_len = sizeof(struct fcp_cmnd);
596 io_req->data_xfer_len = mp_req->req_len;
598 mp_req->req_len = io_req->data_xfer_len;
600 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
601 &mp_req->req_buf_dma,
603 if (!mp_req->req_buf) {
604 printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
605 bnx2fc_free_mp_resc(io_req);
609 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
610 &mp_req->resp_buf_dma,
612 if (!mp_req->resp_buf) {
613 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
614 bnx2fc_free_mp_resc(io_req);
617 memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
618 memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
620 /* Allocate and map mp_req_bd and mp_resp_bd */
621 sz = sizeof(struct fcoe_bd_ctx);
622 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
623 &mp_req->mp_req_bd_dma,
625 if (!mp_req->mp_req_bd) {
626 printk(KERN_ERR PFX "unable to alloc MP req bd\n");
627 bnx2fc_free_mp_resc(io_req);
630 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
631 &mp_req->mp_resp_bd_dma,
633 if (!mp_req->mp_resp_bd) {
634 printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
635 bnx2fc_free_mp_resc(io_req);
639 addr = mp_req->req_buf_dma;
640 mp_req_bd = mp_req->mp_req_bd;
641 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
642 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
643 mp_req_bd->buf_len = CNIC_PAGE_SIZE;
644 mp_req_bd->flags = 0;
647 * MP buffer is either a task mgmt command or an ELS.
648 * So the assumption is that it consumes a single bd
649 * entry in the bd table
651 mp_resp_bd = mp_req->mp_resp_bd;
652 addr = mp_req->resp_buf_dma;
653 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
654 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
655 mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
656 mp_resp_bd->flags = 0;
661 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
663 struct fc_lport *lport;
664 struct fc_rport *rport;
665 struct fc_rport_libfc_priv *rp;
666 struct fcoe_port *port;
667 struct bnx2fc_interface *interface;
668 struct bnx2fc_rport *tgt;
669 struct bnx2fc_cmd *io_req;
670 struct bnx2fc_mp_req *tm_req;
671 struct fcoe_task_ctx_entry *task;
672 struct fcoe_task_ctx_entry *task_page;
673 struct Scsi_Host *host = sc_cmd->device->host;
674 struct fc_frame_header *fc_hdr;
675 struct fcp_cmnd *fcp_cmnd;
680 unsigned long start = jiffies;
682 lport = shost_priv(host);
683 rport = starget_to_rport(scsi_target(sc_cmd->device));
684 port = lport_priv(lport);
685 interface = port->priv;
688 printk(KERN_ERR PFX "device_reset: rport is NULL\n");
694 rc = fc_block_scsi_eh(sc_cmd);
698 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
699 printk(KERN_ERR PFX "device_reset: link is not ready\n");
703 /* rport and tgt are allocated together, so tgt should be non-NULL */
704 tgt = (struct bnx2fc_rport *)&rp[1];
706 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
707 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
712 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
714 if (time_after(jiffies, start + HZ)) {
715 printk(KERN_ERR PFX "tmf: Failed TMF");
722 /* Initialize rest of io_req fields */
723 io_req->sc_cmd = sc_cmd;
727 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
729 rc = bnx2fc_init_mp_req(io_req);
731 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
732 spin_lock_bh(&tgt->tgt_lock);
733 kref_put(&io_req->refcount, bnx2fc_cmd_release);
734 spin_unlock_bh(&tgt->tgt_lock);
739 io_req->io_req_flags = 0;
740 tm_req->tm_flags = tm_flags;
743 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
744 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
745 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
749 fc_hdr = &(tm_req->req_fc_hdr);
751 did = rport->port_id;
752 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
753 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
755 /* Obtain exchange id */
758 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
759 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
760 index = xid % BNX2FC_TASKS_PER_PAGE;
762 /* Initialize task context for this IO request */
763 task_page = (struct fcoe_task_ctx_entry *)
764 interface->hba->task_ctx[task_idx];
765 task = &(task_page[index]);
766 bnx2fc_init_mp_task(io_req, task);
768 sc_cmd->SCp.ptr = (char *)io_req;
770 /* Obtain free SQ entry */
771 spin_lock_bh(&tgt->tgt_lock);
772 bnx2fc_add_2_sq(tgt, xid);
774 /* Enqueue the io_req to active_tm_queue */
775 io_req->on_tmf_queue = 1;
776 list_add_tail(&io_req->link, &tgt->active_tm_queue);
778 init_completion(&io_req->abts_done);
779 io_req->wait_for_abts_comp = 1;
782 bnx2fc_ring_doorbell(tgt);
783 spin_unlock_bh(&tgt->tgt_lock);
785 rc = wait_for_completion_timeout(&io_req->abts_done,
786 interface->tm_timeout * HZ);
787 spin_lock_bh(&tgt->tgt_lock);
789 io_req->wait_for_abts_comp = 0;
790 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
791 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
792 if (io_req->on_tmf_queue) {
793 list_del_init(&io_req->link);
794 io_req->on_tmf_queue = 0;
796 io_req->wait_for_cleanup_comp = 1;
797 init_completion(&io_req->cleanup_done);
798 bnx2fc_initiate_cleanup(io_req);
799 spin_unlock_bh(&tgt->tgt_lock);
800 rc = wait_for_completion_timeout(&io_req->cleanup_done,
802 spin_lock_bh(&tgt->tgt_lock);
803 io_req->wait_for_cleanup_comp = 0;
805 kref_put(&io_req->refcount, bnx2fc_cmd_release);
808 spin_unlock_bh(&tgt->tgt_lock);
811 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
814 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
821 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
823 struct fc_lport *lport;
824 struct bnx2fc_rport *tgt = io_req->tgt;
825 struct fc_rport *rport = tgt->rport;
826 struct fc_rport_priv *rdata = tgt->rdata;
827 struct bnx2fc_interface *interface;
828 struct fcoe_port *port;
829 struct bnx2fc_cmd *abts_io_req;
830 struct fcoe_task_ctx_entry *task;
831 struct fcoe_task_ctx_entry *task_page;
832 struct fc_frame_header *fc_hdr;
833 struct bnx2fc_mp_req *abts_req;
838 u32 r_a_tov = rdata->r_a_tov;
840 /* called with tgt_lock held */
841 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
844 interface = port->priv;
847 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
848 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
854 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
859 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
860 printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
865 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
867 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
872 /* Initialize rest of io_req fields */
873 abts_io_req->sc_cmd = NULL;
874 abts_io_req->port = port;
875 abts_io_req->tgt = tgt;
876 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
878 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
879 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
882 fc_hdr = &(abts_req->req_fc_hdr);
884 /* Obtain oxid and rxid for the original exchange to be aborted */
885 fc_hdr->fh_ox_id = htons(io_req->xid);
886 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
889 did = rport->port_id;
891 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
892 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
895 xid = abts_io_req->xid;
896 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
897 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
898 index = xid % BNX2FC_TASKS_PER_PAGE;
900 /* Initialize task context for this IO request */
901 task_page = (struct fcoe_task_ctx_entry *)
902 interface->hba->task_ctx[task_idx];
903 task = &(task_page[index]);
904 bnx2fc_init_mp_task(abts_io_req, task);
907 * ABTS task is a temporary task that will be cleaned up
908 * irrespective of ABTS response. We need to start the timer
909 * for the original exchange, as the CQE is posted for the original
912 * Timer for ABTS is started only when it is originated by a
913 * TM request. For the ABTS issued as part of ULP timeout,
914 * scsi-ml maintains the timers.
917 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
918 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
920 /* Obtain free SQ entry */
921 bnx2fc_add_2_sq(tgt, xid);
924 bnx2fc_ring_doorbell(tgt);
930 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
933 struct fc_lport *lport;
934 struct bnx2fc_rport *tgt = orig_io_req->tgt;
935 struct bnx2fc_interface *interface;
936 struct fcoe_port *port;
937 struct bnx2fc_cmd *seq_clnp_req;
938 struct fcoe_task_ctx_entry *task;
939 struct fcoe_task_ctx_entry *task_page;
940 struct bnx2fc_els_cb_arg *cb_arg = NULL;
945 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
947 kref_get(&orig_io_req->refcount);
949 port = orig_io_req->port;
950 interface = port->priv;
953 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
955 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
960 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
962 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
967 /* Initialize rest of io_req fields */
968 seq_clnp_req->sc_cmd = NULL;
969 seq_clnp_req->port = port;
970 seq_clnp_req->tgt = tgt;
971 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
973 xid = seq_clnp_req->xid;
975 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
976 index = xid % BNX2FC_TASKS_PER_PAGE;
978 /* Initialize task context for this IO request */
979 task_page = (struct fcoe_task_ctx_entry *)
980 interface->hba->task_ctx[task_idx];
981 task = &(task_page[index]);
982 cb_arg->aborted_io_req = orig_io_req;
983 cb_arg->io_req = seq_clnp_req;
984 cb_arg->r_ctl = r_ctl;
985 cb_arg->offset = offset;
986 seq_clnp_req->cb_arg = cb_arg;
988 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
989 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
991 /* Obtain free SQ entry */
992 bnx2fc_add_2_sq(tgt, xid);
995 bnx2fc_ring_doorbell(tgt);
1000 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
1002 struct fc_lport *lport;
1003 struct bnx2fc_rport *tgt = io_req->tgt;
1004 struct bnx2fc_interface *interface;
1005 struct fcoe_port *port;
1006 struct bnx2fc_cmd *cleanup_io_req;
1007 struct fcoe_task_ctx_entry *task;
1008 struct fcoe_task_ctx_entry *task_page;
1009 int task_idx, index;
1013 /* ASSUMPTION: called with tgt_lock held */
1014 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
1016 port = io_req->port;
1017 interface = port->priv;
1018 lport = port->lport;
1020 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1021 if (!cleanup_io_req) {
1022 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
1027 /* Initialize rest of io_req fields */
1028 cleanup_io_req->sc_cmd = NULL;
1029 cleanup_io_req->port = port;
1030 cleanup_io_req->tgt = tgt;
1031 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
1033 xid = cleanup_io_req->xid;
1035 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
1036 index = xid % BNX2FC_TASKS_PER_PAGE;
1038 /* Initialize task context for this IO request */
1039 task_page = (struct fcoe_task_ctx_entry *)
1040 interface->hba->task_ctx[task_idx];
1041 task = &(task_page[index]);
1042 orig_xid = io_req->xid;
1044 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
1046 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
1048 /* Obtain free SQ entry */
1049 bnx2fc_add_2_sq(tgt, xid);
1051 /* Set flag that cleanup request is pending with the firmware */
1052 set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
1055 bnx2fc_ring_doorbell(tgt);
1062 * bnx2fc_eh_target_reset: Reset a target
1064 * @sc_cmd: SCSI command
1066 * Set from SCSI host template to send task mgmt command to the target
1067 * and wait for the response
1069 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
1071 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
1075 * bnx2fc_eh_device_reset - Reset a single LUN
1077 * @sc_cmd: SCSI command
1079 * Set from SCSI host template to send task mgmt command to the target
1080 * and wait for the response
1082 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1084 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1087 static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
1089 struct bnx2fc_rport *tgt = io_req->tgt;
1090 unsigned int time_left;
1092 init_completion(&io_req->cleanup_done);
1093 io_req->wait_for_cleanup_comp = 1;
1094 bnx2fc_initiate_cleanup(io_req);
1096 spin_unlock_bh(&tgt->tgt_lock);
1099 * Can't wait forever on cleanup response lest we let the SCSI error
1100 * handler wait forever
1102 time_left = wait_for_completion_timeout(&io_req->cleanup_done,
1105 BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
1109 * Put the extra reference to the SCSI command since it would
1110 * not have been returned in this case.
1112 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1115 spin_lock_bh(&tgt->tgt_lock);
1116 io_req->wait_for_cleanup_comp = 0;
1121 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1124 * @sc_cmd: SCSI_ML command pointer
1126 * SCSI abort request handler
1128 int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1130 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1131 struct fc_rport_libfc_priv *rp = rport->dd_data;
1132 struct bnx2fc_cmd *io_req;
1133 struct fc_lport *lport;
1134 struct bnx2fc_rport *tgt;
1136 unsigned int time_left;
1138 rc = fc_block_scsi_eh(sc_cmd);
1142 lport = shost_priv(sc_cmd->device->host);
1143 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1144 printk(KERN_ERR PFX "eh_abort: link not ready\n");
1148 tgt = (struct bnx2fc_rport *)&rp[1];
1150 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1152 spin_lock_bh(&tgt->tgt_lock);
1153 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1155 /* Command might have just completed */
1156 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1157 spin_unlock_bh(&tgt->tgt_lock);
1160 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1161 kref_read(&io_req->refcount));
1163 /* Hold IO request across abort processing */
1164 kref_get(&io_req->refcount);
1166 BUG_ON(tgt != io_req->tgt);
1168 /* Remove the io_req from the active_q. */
1170 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1171 * issue an ABTS on this particular IO req, as the
1172 * io_req is no longer in the active_q.
1174 if (tgt->flush_in_prog) {
1175 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1176 "flush in progress\n", io_req->xid);
1177 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1178 spin_unlock_bh(&tgt->tgt_lock);
1182 if (io_req->on_active_queue == 0) {
1183 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1184 "not on active_q\n", io_req->xid);
1186 * The IO is still with the FW.
1187 * Return failure and let SCSI-ml retry eh_abort.
1189 spin_unlock_bh(&tgt->tgt_lock);
1194 * Only eh_abort processing will remove the IO from
1195 * active_cmd_q before processing the request. this is
1196 * done to avoid race conditions between IOs aborted
1197 * as part of task management completion and eh_abort
1200 list_del_init(&io_req->link);
1201 io_req->on_active_queue = 0;
1202 /* Move IO req to retire queue */
1203 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1205 init_completion(&io_req->abts_done);
1206 init_completion(&io_req->cleanup_done);
1208 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1209 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1210 "already in abts processing\n", io_req->xid);
1211 if (cancel_delayed_work(&io_req->timeout_work))
1212 kref_put(&io_req->refcount,
1213 bnx2fc_cmd_release); /* drop timer hold */
1215 * We don't want to hold off the upper layer timer so simply
1216 * cleanup the command and return that I/O was successfully
1219 rc = bnx2fc_abts_cleanup(io_req);
1220 /* This only occurs when an task abort was requested while ABTS
1221 is in progress. Setting the IO_CLEANUP flag will skip the
1222 RRQ process in the case when the fw generated SCSI_CMD cmpl
1223 was a result from the ABTS request rather than the CLEANUP
1225 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
1229 /* Cancel the current timer running on this io_req */
1230 if (cancel_delayed_work(&io_req->timeout_work))
1231 kref_put(&io_req->refcount,
1232 bnx2fc_cmd_release); /* drop timer hold */
1233 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1234 io_req->wait_for_abts_comp = 1;
1235 rc = bnx2fc_initiate_abts(io_req);
1237 io_req->wait_for_cleanup_comp = 1;
1238 bnx2fc_initiate_cleanup(io_req);
1239 spin_unlock_bh(&tgt->tgt_lock);
1240 wait_for_completion(&io_req->cleanup_done);
1241 spin_lock_bh(&tgt->tgt_lock);
1242 io_req->wait_for_cleanup_comp = 0;
1245 spin_unlock_bh(&tgt->tgt_lock);
1247 /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
1248 time_left = wait_for_completion_timeout(&io_req->abts_done,
1249 (2 * rp->r_a_tov + 1) * HZ);
1251 BNX2FC_IO_DBG(io_req,
1252 "Timed out in eh_abort waiting for abts_done");
1254 spin_lock_bh(&tgt->tgt_lock);
1255 io_req->wait_for_abts_comp = 0;
1256 if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1257 BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
1259 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1260 &io_req->req_flags))) {
1261 /* Let the scsi-ml try to recover this command */
1262 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1265 * Cleanup firmware residuals before returning control back
1268 rc = bnx2fc_abts_cleanup(io_req);
1272 * We come here even when there was a race condition
1273 * between timeout and abts completion, and abts
1274 * completion happens just in time.
1276 BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1278 bnx2fc_scsi_done(io_req, DID_ABORT);
1279 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1282 /* release the reference taken in eh_abort */
1283 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1284 spin_unlock_bh(&tgt->tgt_lock);
1288 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1289 struct fcoe_task_ctx_entry *task,
1292 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1293 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1294 u32 offset = cb_arg->offset;
1295 enum fc_rctl r_ctl = cb_arg->r_ctl;
1297 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1299 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1301 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1303 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1304 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1309 spin_unlock_bh(&tgt->tgt_lock);
1310 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1311 spin_lock_bh(&tgt->tgt_lock);
1314 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1315 " IO will abort\n");
1316 seq_clnp_req->cb_arg = NULL;
1317 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1323 void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1324 struct fcoe_task_ctx_entry *task,
1327 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1328 "refcnt = %d, cmd_type = %d\n",
1329 kref_read(&io_req->refcount), io_req->cmd_type);
1331 * Test whether there is a cleanup request pending. If not just
1334 if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ,
1335 &io_req->req_flags))
1338 * If we receive a cleanup completion for this request then the
1339 * firmware will not give us an abort completion for this request
1340 * so clear any ABTS pending flags.
1342 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) &&
1343 !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) {
1344 set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags);
1345 if (io_req->wait_for_abts_comp)
1346 complete(&io_req->abts_done);
1349 bnx2fc_scsi_done(io_req, DID_ERROR);
1350 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1351 if (io_req->wait_for_cleanup_comp)
1352 complete(&io_req->cleanup_done);
1355 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1356 struct fcoe_task_ctx_entry *task,
1360 u32 r_a_tov = FC_DEF_R_A_TOV;
1362 struct bnx2fc_rport *tgt = io_req->tgt;
1364 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1365 "refcnt = %d, cmd_type = %d\n",
1367 kref_read(&io_req->refcount), io_req->cmd_type);
1369 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1370 &io_req->req_flags)) {
1371 BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1377 * If we receive an ABTS completion here then we will not receive
1378 * a cleanup completion so clear any cleanup pending flags.
1380 if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) {
1381 clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
1382 if (io_req->wait_for_cleanup_comp)
1383 complete(&io_req->cleanup_done);
1386 /* Do not issue RRQ as this IO is already cleanedup */
1387 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1388 &io_req->req_flags))
1392 * For ABTS issued due to SCSI eh_abort_handler, timeout
1393 * values are maintained by scsi-ml itself. Cancel timeout
1394 * in case ABTS issued as part of task management function
1395 * or due to FW error.
1397 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1398 if (cancel_delayed_work(&io_req->timeout_work))
1399 kref_put(&io_req->refcount,
1400 bnx2fc_cmd_release); /* drop timer hold */
1402 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
1405 case FC_RCTL_BA_ACC:
1407 * Dont release this cmd yet. It will be relesed
1408 * after we get RRQ response
1410 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1414 case FC_RCTL_BA_RJT:
1415 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1418 printk(KERN_ERR PFX "Unknown ABTS response\n");
1423 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1424 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1426 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1427 bnx2fc_cmd_timer_set(io_req, r_a_tov);
1430 if (io_req->wait_for_abts_comp) {
1431 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1432 &io_req->req_flags))
1433 complete(&io_req->abts_done);
1436 * We end up here when ABTS is issued as
1437 * in asynchronous context, i.e., as part
1438 * of task management completion, or
1439 * when FW error is received or when the
1440 * ABTS is issued when the IO is timed
1444 if (io_req->on_active_queue) {
1445 list_del_init(&io_req->link);
1446 io_req->on_active_queue = 0;
1447 /* Move IO req to retire queue */
1448 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1450 bnx2fc_scsi_done(io_req, DID_ERROR);
1451 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1455 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1457 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1458 struct bnx2fc_rport *tgt = io_req->tgt;
1459 struct bnx2fc_cmd *cmd, *tmp;
1460 u64 tm_lun = sc_cmd->device->lun;
1464 /* called with tgt_lock held */
1465 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1467 * Walk thru the active_ios queue and ABORT the IO
1468 * that matches with the LUN that was reset
1470 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1471 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1472 lun = cmd->sc_cmd->device->lun;
1473 if (lun == tm_lun) {
1474 /* Initiate ABTS on this cmd */
1475 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1477 /* cancel the IO timeout */
1478 if (cancel_delayed_work(&io_req->timeout_work))
1479 kref_put(&io_req->refcount,
1480 bnx2fc_cmd_release);
1482 rc = bnx2fc_initiate_abts(cmd);
1483 /* abts shouldn't fail in this context */
1484 WARN_ON(rc != SUCCESS);
1486 printk(KERN_ERR PFX "lun_rst: abts already in"
1487 " progress for this IO 0x%x\n",
1493 static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1495 struct bnx2fc_rport *tgt = io_req->tgt;
1496 struct bnx2fc_cmd *cmd, *tmp;
1499 /* called with tgt_lock held */
1500 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1502 * Walk thru the active_ios queue and ABORT the IO
1503 * that matches with the LUN that was reset
1505 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1506 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1508 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1510 /* cancel the IO timeout */
1511 if (cancel_delayed_work(&io_req->timeout_work))
1512 kref_put(&io_req->refcount,
1513 bnx2fc_cmd_release); /* timer hold */
1514 rc = bnx2fc_initiate_abts(cmd);
1515 /* abts shouldn't fail in this context */
1516 WARN_ON(rc != SUCCESS);
1519 printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1520 " for this IO 0x%x\n", cmd->xid);
1524 void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1525 struct fcoe_task_ctx_entry *task, u8 num_rq)
1527 struct bnx2fc_mp_req *tm_req;
1528 struct fc_frame_header *fc_hdr;
1529 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1534 /* Called with tgt_lock held */
1535 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1537 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1538 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1540 /* TM has already timed out and we got
1541 * delayed completion. Ignore completion
1547 tm_req = &(io_req->mp_req);
1548 fc_hdr = &(tm_req->resp_fc_hdr);
1549 hdr = (u64 *)fc_hdr;
1551 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
1552 hdr[0] = cpu_to_be64(temp_hdr[0]);
1553 hdr[1] = cpu_to_be64(temp_hdr[1]);
1554 hdr[2] = cpu_to_be64(temp_hdr[2]);
1557 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
1559 rsp_buf = tm_req->resp_buf;
1561 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1562 bnx2fc_parse_fcp_rsp(io_req,
1563 (struct fcoe_fcp_rsp_payload *)
1565 if (io_req->fcp_rsp_code == 0) {
1567 if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1568 bnx2fc_lun_reset_cmpl(io_req);
1569 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1570 bnx2fc_tgt_reset_cmpl(io_req);
1573 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1576 if (!sc_cmd->SCp.ptr) {
1577 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1580 switch (io_req->fcp_status) {
1582 if (io_req->cdb_status == 0) {
1583 /* Good IO completion */
1584 sc_cmd->result = DID_OK << 16;
1586 /* Transport status is good, SCSI status not good */
1587 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1589 if (io_req->fcp_resid)
1590 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1594 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1595 io_req->fcp_status);
1599 sc_cmd = io_req->sc_cmd;
1600 io_req->sc_cmd = NULL;
1602 /* check if the io_req exists in tgt's tmf_q */
1603 if (io_req->on_tmf_queue) {
1605 list_del_init(&io_req->link);
1606 io_req->on_tmf_queue = 0;
1609 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1613 sc_cmd->SCp.ptr = NULL;
1614 sc_cmd->scsi_done(sc_cmd);
1616 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1617 if (io_req->wait_for_abts_comp) {
1618 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1619 complete(&io_req->abts_done);
1623 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1626 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1627 int frag_size, sg_frags;
1631 if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1632 frag_size = BNX2FC_BD_SPLIT_SZ;
1635 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1636 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
1637 bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1638 bd[bd_index + sg_frags].flags = 0;
1640 addr += (u64) frag_size;
1642 sg_len -= frag_size;
1648 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1650 struct bnx2fc_interface *interface = io_req->port->priv;
1651 struct bnx2fc_hba *hba = interface->hba;
1652 struct scsi_cmnd *sc = io_req->sc_cmd;
1653 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1654 struct scatterlist *sg;
1659 unsigned int sg_len;
1663 WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD);
1665 * Use dma_map_sg directly to ensure we're using the correct
1666 * dev struct off of pcidev.
1668 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1669 scsi_sg_count(sc), sc->sc_data_direction);
1670 scsi_for_each_sg(sc, sg, sg_count, i) {
1671 sg_len = sg_dma_len(sg);
1672 addr = sg_dma_address(sg);
1673 if (sg_len > BNX2FC_MAX_BD_LEN) {
1674 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1679 bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1680 bd[bd_count].buf_addr_hi = addr >> 32;
1681 bd[bd_count].buf_len = (u16)sg_len;
1682 bd[bd_count].flags = 0;
1684 bd_count += sg_frags;
1685 byte_count += sg_len;
1687 if (byte_count != scsi_bufflen(sc))
1688 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1689 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1694 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1696 struct scsi_cmnd *sc = io_req->sc_cmd;
1697 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1700 if (scsi_sg_count(sc)) {
1701 bd_count = bnx2fc_map_sg(io_req);
1706 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1707 bd[0].buf_len = bd[0].flags = 0;
1709 io_req->bd_tbl->bd_valid = bd_count;
1712 * Return the command to ML if BD count exceeds the max number
1713 * that can be handled by FW.
1715 if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) {
1716 pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n",
1717 bd_count, io_req->xid);
1724 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1726 struct scsi_cmnd *sc = io_req->sc_cmd;
1727 struct bnx2fc_interface *interface = io_req->port->priv;
1728 struct bnx2fc_hba *hba = interface->hba;
1731 * Use dma_unmap_sg directly to ensure we're using the correct
1732 * dev struct off of pcidev.
1734 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1735 dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
1736 scsi_sg_count(sc), sc->sc_data_direction);
1737 io_req->bd_tbl->bd_valid = 0;
1741 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1742 struct fcp_cmnd *fcp_cmnd)
1744 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1746 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1748 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
1750 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1751 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1753 fcp_cmnd->fc_cmdref = 0;
1754 fcp_cmnd->fc_pri_ta = 0;
1755 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1756 fcp_cmnd->fc_flags = io_req->io_req_flags;
1757 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1760 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1761 struct fcoe_fcp_rsp_payload *fcp_rsp,
1764 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1765 struct bnx2fc_rport *tgt = io_req->tgt;
1766 u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1767 u32 rq_buff_len = 0;
1769 unsigned char *rq_data;
1770 unsigned char *dummy;
1771 int fcp_sns_len = 0;
1772 int fcp_rsp_len = 0;
1774 io_req->fcp_status = FC_GOOD;
1775 io_req->fcp_resid = 0;
1776 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1777 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1778 io_req->fcp_resid = fcp_rsp->fcp_resid;
1780 io_req->scsi_comp_flags = rsp_flags;
1781 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1782 fcp_rsp->scsi_status_code;
1784 /* Fetch fcp_rsp_info and fcp_sns_info if available */
1788 * We do not anticipate num_rq >1, as the linux defined
1789 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1790 * 256 bytes of single rq buffer is good enough to hold this.
1794 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1795 fcp_rsp_len = rq_buff_len
1796 = fcp_rsp->fcp_rsp_len;
1800 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1801 fcp_sns_len = fcp_rsp->fcp_sns_len;
1802 rq_buff_len += fcp_rsp->fcp_sns_len;
1805 io_req->fcp_rsp_len = fcp_rsp_len;
1806 io_req->fcp_sns_len = fcp_sns_len;
1808 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1809 /* Invalid sense sense length. */
1810 printk(KERN_ERR PFX "invalid sns length %d\n",
1812 /* reset rq_buff_len */
1813 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
1816 rq_data = bnx2fc_get_next_rqe(tgt, 1);
1819 /* We do not need extra sense data */
1820 for (i = 1; i < num_rq; i++)
1821 dummy = bnx2fc_get_next_rqe(tgt, 1);
1824 /* fetch fcp_rsp_code */
1825 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1826 /* Only for task management function */
1827 io_req->fcp_rsp_code = rq_data[3];
1828 BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n",
1829 io_req->fcp_rsp_code);
1832 /* fetch sense data */
1833 rq_data += fcp_rsp_len;
1835 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1836 printk(KERN_ERR PFX "Truncating sense buffer\n");
1837 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1840 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1842 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1844 /* return RQ entries */
1845 for (i = 0; i < num_rq; i++)
1846 bnx2fc_return_rqe(tgt, 1);
1851 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1853 * @host: The Scsi_Host the command was issued to
1854 * @sc_cmd: struct scsi_cmnd to be executed
1856 * This is the IO strategy routine, called by SCSI-ML
1858 int bnx2fc_queuecommand(struct Scsi_Host *host,
1859 struct scsi_cmnd *sc_cmd)
1861 struct fc_lport *lport = shost_priv(host);
1862 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1863 struct fc_rport_libfc_priv *rp = rport->dd_data;
1864 struct bnx2fc_rport *tgt;
1865 struct bnx2fc_cmd *io_req;
1869 rval = fc_remote_port_chkready(rport);
1871 sc_cmd->result = rval;
1872 sc_cmd->scsi_done(sc_cmd);
1876 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1877 rc = SCSI_MLQUEUE_HOST_BUSY;
1881 /* rport and tgt are allocated together, so tgt should be non-NULL */
1882 tgt = (struct bnx2fc_rport *)&rp[1];
1884 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1886 * Session is not offloaded yet. Let SCSI-ml retry
1889 rc = SCSI_MLQUEUE_TARGET_BUSY;
1892 if (tgt->retry_delay_timestamp) {
1893 if (time_after(jiffies, tgt->retry_delay_timestamp)) {
1894 tgt->retry_delay_timestamp = 0;
1896 /* If retry_delay timer is active, flow off the ML */
1897 rc = SCSI_MLQUEUE_TARGET_BUSY;
1902 spin_lock_bh(&tgt->tgt_lock);
1904 io_req = bnx2fc_cmd_alloc(tgt);
1906 rc = SCSI_MLQUEUE_HOST_BUSY;
1907 goto exit_qcmd_tgtlock;
1909 io_req->sc_cmd = sc_cmd;
1911 if (bnx2fc_post_io_req(tgt, io_req)) {
1912 printk(KERN_ERR PFX "Unable to post io_req\n");
1913 rc = SCSI_MLQUEUE_HOST_BUSY;
1914 goto exit_qcmd_tgtlock;
1918 spin_unlock_bh(&tgt->tgt_lock);
1923 void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1924 struct fcoe_task_ctx_entry *task,
1927 struct fcoe_fcp_rsp_payload *fcp_rsp;
1928 struct bnx2fc_rport *tgt = io_req->tgt;
1929 struct scsi_cmnd *sc_cmd;
1930 struct Scsi_Host *host;
1933 /* scsi_cmd_cmpl is called with tgt lock held */
1935 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1936 /* we will not receive ABTS response for this IO */
1937 BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1942 /* Cancel the timeout_work, as we received IO completion */
1943 if (cancel_delayed_work(&io_req->timeout_work))
1944 kref_put(&io_req->refcount,
1945 bnx2fc_cmd_release); /* drop timer hold */
1947 sc_cmd = io_req->sc_cmd;
1948 if (sc_cmd == NULL) {
1949 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1953 /* Fetch fcp_rsp from task context and perform cmd completion */
1954 fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1955 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
1957 /* parse fcp_rsp and obtain sense data from RQ if available */
1958 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1960 host = sc_cmd->device->host;
1961 if (!sc_cmd->SCp.ptr) {
1962 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1966 if (io_req->on_active_queue) {
1967 list_del_init(&io_req->link);
1968 io_req->on_active_queue = 0;
1969 /* Move IO req to retire queue */
1970 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1972 /* This should not happen, but could have been pulled
1973 * by bnx2fc_flush_active_ios(), or during a race
1974 * between command abort and (late) completion.
1976 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1977 if (io_req->wait_for_abts_comp)
1978 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1979 &io_req->req_flags))
1980 complete(&io_req->abts_done);
1983 bnx2fc_unmap_sg_list(io_req);
1984 io_req->sc_cmd = NULL;
1986 switch (io_req->fcp_status) {
1988 if (io_req->cdb_status == 0) {
1989 /* Good IO completion */
1990 sc_cmd->result = DID_OK << 16;
1992 /* Transport status is good, SCSI status not good */
1993 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1994 " fcp_resid = 0x%x\n",
1995 io_req->cdb_status, io_req->fcp_resid);
1996 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1998 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1999 io_req->cdb_status == SAM_STAT_BUSY) {
2000 /* Set the jiffies + retry_delay_timer * 100ms
2001 for the rport/tgt */
2002 tgt->retry_delay_timestamp = jiffies +
2003 fcp_rsp->retry_delay_timer * HZ / 10;
2007 if (io_req->fcp_resid)
2008 scsi_set_resid(sc_cmd, io_req->fcp_resid);
2011 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
2012 io_req->fcp_status);
2015 sc_cmd->SCp.ptr = NULL;
2016 sc_cmd->scsi_done(sc_cmd);
2017 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2020 int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2021 struct bnx2fc_cmd *io_req)
2023 struct fcoe_task_ctx_entry *task;
2024 struct fcoe_task_ctx_entry *task_page;
2025 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
2026 struct fcoe_port *port = tgt->port;
2027 struct bnx2fc_interface *interface = port->priv;
2028 struct bnx2fc_hba *hba = interface->hba;
2029 struct fc_lport *lport = port->lport;
2030 struct fc_stats *stats;
2031 int task_idx, index;
2034 /* bnx2fc_post_io_req() is called with the tgt_lock held */
2036 /* Initialize rest of io_req fields */
2037 io_req->cmd_type = BNX2FC_SCSI_CMD;
2038 io_req->port = port;
2040 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
2041 sc_cmd->SCp.ptr = (char *)io_req;
2043 stats = per_cpu_ptr(lport->stats, get_cpu());
2044 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
2045 io_req->io_req_flags = BNX2FC_READ;
2046 stats->InputRequests++;
2047 stats->InputBytes += io_req->data_xfer_len;
2048 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
2049 io_req->io_req_flags = BNX2FC_WRITE;
2050 stats->OutputRequests++;
2051 stats->OutputBytes += io_req->data_xfer_len;
2053 io_req->io_req_flags = 0;
2054 stats->ControlRequests++;
2060 /* Build buffer descriptor list for firmware from sg list */
2061 if (bnx2fc_build_bd_list_from_sg(io_req)) {
2062 printk(KERN_ERR PFX "BD list creation failed\n");
2063 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2067 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
2068 index = xid % BNX2FC_TASKS_PER_PAGE;
2070 /* Initialize task context for this IO request */
2071 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
2072 task = &(task_page[index]);
2073 bnx2fc_init_task(io_req, task);
2075 if (tgt->flush_in_prog) {
2076 printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2077 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2081 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2082 printk(KERN_ERR PFX "Session not ready...post_io\n");
2083 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2088 if (tgt->io_timeout)
2089 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
2090 /* Obtain free SQ entry */
2091 bnx2fc_add_2_sq(tgt, xid);
2093 /* Enqueue the io_req to active_cmd_queue */
2095 io_req->on_active_queue = 1;
2096 /* move io_req from pending_queue to active_queue */
2097 list_add_tail(&io_req->link, &tgt->active_cmd_queue);
2100 bnx2fc_ring_doorbell(tgt);