2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
21 static void qedf_cmd_timeout(struct work_struct *work)
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf = io_req->fcport->qedf;
27 struct qedf_rport *fcport = io_req->fcport;
30 switch (io_req->cmd_type) {
32 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
34 /* Cleanup timed out ABTS */
35 qedf_initiate_cleanup(io_req, true);
36 complete(&io_req->abts_done);
39 * Need to call kref_put for reference taken when initiate_abts
40 * was called since abts_compl won't be called now that we've
41 * cleaned up the task.
43 kref_put(&io_req->refcount, qedf_release_cmd);
46 * Now that the original I/O and the ABTS are complete see
47 * if we need to reconnect to the target.
49 qedf_restart_rport(fcport);
52 kref_get(&io_req->refcount);
54 * Don't attempt to clean an ELS timeout as any subseqeunt
55 * ABTS or cleanup requests just hang. For now just free
56 * the resources of the original I/O and the RRQ
58 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
60 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
61 /* Call callback function to complete command */
62 if (io_req->cb_func && io_req->cb_arg) {
63 op = io_req->cb_arg->op;
64 io_req->cb_func(io_req->cb_arg);
65 io_req->cb_arg = NULL;
67 qedf_initiate_cleanup(io_req, true);
68 kref_put(&io_req->refcount, qedf_release_cmd);
70 case QEDF_SEQ_CLEANUP:
71 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
72 "xid=0x%x.\n", io_req->xid);
73 qedf_initiate_cleanup(io_req, true);
74 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
75 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
82 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
84 struct io_bdt *bdt_info;
85 struct qedf_ctx *qedf = cmgr->qedf;
87 u16 min_xid = QEDF_MIN_XID;
88 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
91 struct qedf_ioreq *io_req;
93 num_ios = max_xid - min_xid + 1;
95 /* Free fcoe_bdt_ctx structures */
96 if (!cmgr->io_bdt_pool)
99 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
100 for (i = 0; i < num_ios; i++) {
101 bdt_info = cmgr->io_bdt_pool[i];
102 if (bdt_info->bd_tbl) {
103 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
104 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
105 bdt_info->bd_tbl = NULL;
109 /* Destroy io_bdt pool */
110 for (i = 0; i < num_ios; i++) {
111 kfree(cmgr->io_bdt_pool[i]);
112 cmgr->io_bdt_pool[i] = NULL;
115 kfree(cmgr->io_bdt_pool);
116 cmgr->io_bdt_pool = NULL;
120 for (i = 0; i < num_ios; i++) {
121 io_req = &cmgr->cmds[i];
122 kfree(io_req->sgl_task_params);
123 kfree(io_req->task_params);
124 /* Make sure we free per command sense buffer */
125 if (io_req->sense_buffer)
126 dma_free_coherent(&qedf->pdev->dev,
127 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
128 io_req->sense_buffer_dma);
129 cancel_delayed_work_sync(&io_req->rrq_work);
132 /* Free command manager itself */
136 static void qedf_handle_rrq(struct work_struct *work)
138 struct qedf_ioreq *io_req =
139 container_of(work, struct qedf_ioreq, rrq_work.work);
141 qedf_send_rrq(io_req);
145 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
147 struct qedf_cmd_mgr *cmgr;
148 struct io_bdt *bdt_info;
149 struct qedf_ioreq *io_req;
153 u16 min_xid = QEDF_MIN_XID;
154 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
156 /* Make sure num_queues is already set before calling this function */
157 if (!qedf->num_queues) {
158 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
162 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
163 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
164 "max_xid 0x%x.\n", min_xid, max_xid);
168 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
169 "0x%x.\n", min_xid, max_xid);
171 num_ios = max_xid - min_xid + 1;
173 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
175 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
180 spin_lock_init(&cmgr->lock);
183 * Initialize I/O request fields.
187 for (i = 0; i < num_ios; i++) {
188 io_req = &cmgr->cmds[i];
189 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
193 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
195 /* Allocate DMA memory to hold sense buffer */
196 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
197 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
199 if (!io_req->sense_buffer)
202 /* Allocate task parameters to pass to f/w init funcions */
203 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
205 if (!io_req->task_params) {
206 QEDF_ERR(&(qedf->dbg_ctx),
207 "Failed to allocate task_params for xid=0x%x\n",
213 * Allocate scatter/gather list info to pass to f/w init
216 io_req->sgl_task_params = kzalloc(
217 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
218 if (!io_req->sgl_task_params) {
219 QEDF_ERR(&(qedf->dbg_ctx),
220 "Failed to allocate sgl_task_params for xid=0x%x\n",
226 /* Allocate pool of io_bdts - one for each qedf_ioreq */
227 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
230 if (!cmgr->io_bdt_pool) {
231 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
235 for (i = 0; i < num_ios; i++) {
236 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
238 if (!cmgr->io_bdt_pool[i]) {
239 QEDF_WARN(&(qedf->dbg_ctx),
240 "Failed to alloc io_bdt_pool[%d].\n", i);
245 for (i = 0; i < num_ios; i++) {
246 bdt_info = cmgr->io_bdt_pool[i];
247 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
248 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
249 &bdt_info->bd_tbl_dma, GFP_KERNEL);
250 if (!bdt_info->bd_tbl) {
251 QEDF_WARN(&(qedf->dbg_ctx),
252 "Failed to alloc bdt_tbl[%d].\n", i);
256 atomic_set(&cmgr->free_list_cnt, num_ios);
257 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
258 "cmgr->free_list_cnt=%d.\n",
259 atomic_read(&cmgr->free_list_cnt));
264 qedf_cmd_mgr_free(cmgr);
268 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
270 struct qedf_ctx *qedf = fcport->qedf;
271 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
272 struct qedf_ioreq *io_req = NULL;
273 struct io_bdt *bd_tbl;
279 free_sqes = atomic_read(&fcport->free_sqes);
282 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
283 "Returning NULL, free_sqes=%d.\n ",
288 /* Limit the number of outstanding R/W tasks */
289 if ((atomic_read(&fcport->num_active_ios) >=
290 NUM_RW_TASKS_PER_CONNECTION)) {
291 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
292 "Returning NULL, num_active_ios=%d.\n",
293 atomic_read(&fcport->num_active_ios));
297 /* Limit global TIDs certain tasks */
298 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
299 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
300 "Returning NULL, free_list_cnt=%d.\n",
301 atomic_read(&cmd_mgr->free_list_cnt));
305 spin_lock_irqsave(&cmd_mgr->lock, flags);
306 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
307 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
309 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
312 /* Check to make sure command was previously freed */
313 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
317 if (i == FCOE_PARAMS_NUM_TASKS) {
318 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
322 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
323 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
325 atomic_inc(&fcport->num_active_ios);
326 atomic_dec(&fcport->free_sqes);
328 atomic_dec(&cmd_mgr->free_list_cnt);
330 io_req->cmd_mgr = cmd_mgr;
331 io_req->fcport = fcport;
333 /* Hold the io_req against deletion */
334 kref_init(&io_req->refcount);
336 /* Bind io_bdt for this io_req */
337 /* Have a static link between io_req and io_bdt_pool */
338 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
339 if (bd_tbl == NULL) {
340 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
341 kref_put(&io_req->refcount, qedf_release_cmd);
344 bd_tbl->io_req = io_req;
345 io_req->cmd_type = cmd_type;
346 io_req->tm_flags = 0;
348 /* Reset sequence offset data */
349 io_req->rx_buf_off = 0;
350 io_req->tx_buf_off = 0;
351 io_req->rx_id = 0xffff; /* No OX_ID */
356 /* Record failure for stats and return NULL to caller */
357 qedf->alloc_failures++;
361 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
363 struct qedf_mp_req *mp_req = &(io_req->mp_req);
364 struct qedf_ctx *qedf = io_req->fcport->qedf;
365 uint64_t sz = sizeof(struct scsi_sge);
368 if (mp_req->mp_req_bd) {
369 dma_free_coherent(&qedf->pdev->dev, sz,
370 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
371 mp_req->mp_req_bd = NULL;
373 if (mp_req->mp_resp_bd) {
374 dma_free_coherent(&qedf->pdev->dev, sz,
375 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
376 mp_req->mp_resp_bd = NULL;
378 if (mp_req->req_buf) {
379 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
380 mp_req->req_buf, mp_req->req_buf_dma);
381 mp_req->req_buf = NULL;
383 if (mp_req->resp_buf) {
384 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
385 mp_req->resp_buf, mp_req->resp_buf_dma);
386 mp_req->resp_buf = NULL;
390 void qedf_release_cmd(struct kref *ref)
392 struct qedf_ioreq *io_req =
393 container_of(ref, struct qedf_ioreq, refcount);
394 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
395 struct qedf_rport *fcport = io_req->fcport;
397 if (io_req->cmd_type == QEDF_ELS ||
398 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
399 qedf_free_mp_resc(io_req);
401 atomic_inc(&cmd_mgr->free_list_cnt);
402 atomic_dec(&fcport->num_active_ios);
403 if (atomic_read(&fcport->num_active_ios) < 0)
404 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
406 /* Increment task retry identifier now that the request is released */
407 io_req->task_retry_identifier++;
409 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
412 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
415 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
416 int frag_size, sg_frags;
420 if (sg_len > QEDF_BD_SPLIT_SZ)
421 frag_size = QEDF_BD_SPLIT_SZ;
424 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
425 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
426 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
428 addr += (u64)frag_size;
435 static int qedf_map_sg(struct qedf_ioreq *io_req)
437 struct scsi_cmnd *sc = io_req->sc_cmd;
438 struct Scsi_Host *host = sc->device->host;
439 struct fc_lport *lport = shost_priv(host);
440 struct qedf_ctx *qedf = lport_priv(lport);
441 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
442 struct scatterlist *sg;
451 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
452 scsi_sg_count(sc), sc->sc_data_direction);
454 sg = scsi_sglist(sc);
457 * New condition to send single SGE as cached-SGL with length less
460 if ((sg_count == 1) && (sg_dma_len(sg) <=
461 QEDF_MAX_SGLEN_FOR_CACHESGL)) {
462 sg_len = sg_dma_len(sg);
463 addr = (u64)sg_dma_address(sg);
465 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
466 bd[bd_count].sge_addr.hi = (addr >> 32);
467 bd[bd_count].sge_len = (u16)sg_len;
472 scsi_for_each_sg(sc, sg, sg_count, i) {
473 sg_len = sg_dma_len(sg);
474 addr = (u64)sg_dma_address(sg);
475 end_addr = (u64)(addr + sg_len);
478 * First s/g element in the list so check if the end_addr
479 * is paged aligned. Also check to make sure the length is
480 * at least page size.
482 if ((i == 0) && (sg_count > 1) &&
483 ((end_addr % QEDF_PAGE_SIZE) ||
484 sg_len < QEDF_PAGE_SIZE))
485 io_req->use_slowpath = true;
487 * Last s/g element so check if the start address is paged
490 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
491 (addr % QEDF_PAGE_SIZE))
492 io_req->use_slowpath = true;
494 * Intermediate s/g element so check if start and end address
497 else if ((i != 0) && (i != (sg_count - 1)) &&
498 ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
499 io_req->use_slowpath = true;
501 if (sg_len > QEDF_MAX_BD_LEN) {
502 sg_frags = qedf_split_bd(io_req, addr, sg_len,
506 bd[bd_count].sge_addr.lo = U64_LO(addr);
507 bd[bd_count].sge_addr.hi = U64_HI(addr);
508 bd[bd_count].sge_len = (uint16_t)sg_len;
511 bd_count += sg_frags;
512 byte_count += sg_len;
515 if (byte_count != scsi_bufflen(sc))
516 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
517 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
518 scsi_bufflen(sc), io_req->xid);
523 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
525 struct scsi_cmnd *sc = io_req->sc_cmd;
526 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
529 if (scsi_sg_count(sc)) {
530 bd_count = qedf_map_sg(io_req);
535 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
538 io_req->bd_tbl->bd_valid = bd_count;
543 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
544 struct fcp_cmnd *fcp_cmnd)
546 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
548 /* fcp_cmnd is 32 bytes */
549 memset(fcp_cmnd, 0, FCP_CMND_LEN);
551 /* 8 bytes: SCSI LUN info */
552 int_to_scsilun(sc_cmd->device->lun,
553 (struct scsi_lun *)&fcp_cmnd->fc_lun);
555 /* 4 bytes: flag info */
556 fcp_cmnd->fc_pri_ta = 0;
557 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
558 fcp_cmnd->fc_flags = io_req->io_req_flags;
559 fcp_cmnd->fc_cmdref = 0;
561 /* Populate data direction */
562 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
565 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
566 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
567 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
571 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
573 /* 16 bytes: CDB information */
574 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
575 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
577 /* 4 bytes: FCP data length */
578 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
581 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
582 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
583 struct fcoe_wqe *sqe)
585 enum fcoe_task_type task_type;
586 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
587 struct io_bdt *bd_tbl = io_req->bd_tbl;
591 struct qedf_ctx *qedf = fcport->qedf;
592 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
593 struct regpair sense_data_buffer_phys_addr;
598 /* Note init_initiator_rw_fcoe_task memsets the task context */
599 io_req->task = task_ctx;
600 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
601 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
602 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
604 /* Set task type bassed on DMA directio of command */
605 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
606 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
608 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
609 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
610 tx_io_size = io_req->data_xfer_len;
612 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
613 rx_io_size = io_req->data_xfer_len;
617 /* Setup the fields for fcoe_task_params */
618 io_req->task_params->context = task_ctx;
619 io_req->task_params->sqe = sqe;
620 io_req->task_params->task_type = task_type;
621 io_req->task_params->tx_io_size = tx_io_size;
622 io_req->task_params->rx_io_size = rx_io_size;
623 io_req->task_params->conn_cid = fcport->fw_cid;
624 io_req->task_params->itid = io_req->xid;
625 io_req->task_params->cq_rss_number = cq_idx;
626 io_req->task_params->is_tape_device = fcport->dev_type;
628 /* Fill in information for scatter/gather list */
629 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
630 bd_count = bd_tbl->bd_valid;
631 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
632 io_req->sgl_task_params->sgl_phys_addr.lo =
633 U64_LO(bd_tbl->bd_tbl_dma);
634 io_req->sgl_task_params->sgl_phys_addr.hi =
635 U64_HI(bd_tbl->bd_tbl_dma);
636 io_req->sgl_task_params->num_sges = bd_count;
637 io_req->sgl_task_params->total_buffer_size =
638 scsi_bufflen(io_req->sc_cmd);
639 io_req->sgl_task_params->small_mid_sge =
640 io_req->use_slowpath;
643 /* Fill in physical address of sense buffer */
644 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
645 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
647 /* fill FCP_CMND IU */
648 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
650 /* Swap fcp_cmnd since FC is big endian */
651 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
652 for (i = 0; i < cnt; i++) {
653 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
655 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
657 init_initiator_rw_fcoe_task(io_req->task_params,
658 io_req->sgl_task_params,
659 sense_data_buffer_phys_addr,
660 io_req->task_retry_identifier, fcp_cmnd);
662 /* Increment SGL type counters */
664 qedf->single_sge_ios++;
665 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
666 } else if (io_req->use_slowpath) {
667 qedf->slow_sge_ios++;
668 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
670 qedf->fast_sge_ios++;
671 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
675 void qedf_init_mp_task(struct qedf_ioreq *io_req,
676 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
678 struct qedf_mp_req *mp_req = &(io_req->mp_req);
679 struct qedf_rport *fcport = io_req->fcport;
680 struct qedf_ctx *qedf = io_req->fcport->qedf;
681 struct fc_frame_header *fc_hdr;
682 struct fcoe_tx_mid_path_params task_fc_hdr;
683 struct scsi_sgl_task_params tx_sgl_task_params;
684 struct scsi_sgl_task_params rx_sgl_task_params;
686 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
687 "Initializing MP task for cmd_type=%d\n",
690 qedf->control_requests++;
692 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
693 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
694 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
695 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
697 /* Setup the task from io_req for easy reference */
698 io_req->task = task_ctx;
700 /* Setup the fields for fcoe_task_params */
701 io_req->task_params->context = task_ctx;
702 io_req->task_params->sqe = sqe;
703 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
704 io_req->task_params->tx_io_size = io_req->data_xfer_len;
705 /* rx_io_size tells the f/w how large a response buffer we have */
706 io_req->task_params->rx_io_size = PAGE_SIZE;
707 io_req->task_params->conn_cid = fcport->fw_cid;
708 io_req->task_params->itid = io_req->xid;
709 /* Return middle path commands on CQ 0 */
710 io_req->task_params->cq_rss_number = 0;
711 io_req->task_params->is_tape_device = fcport->dev_type;
713 fc_hdr = &(mp_req->req_fc_hdr);
714 /* Set OX_ID and RX_ID based on driver task id */
715 fc_hdr->fh_ox_id = io_req->xid;
716 fc_hdr->fh_rx_id = htons(0xffff);
718 /* Set up FC header information */
719 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
720 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
721 task_fc_hdr.type = fc_hdr->fh_type;
722 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
723 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
724 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
725 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
727 /* Set up s/g list parameters for request buffer */
728 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
729 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
730 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
731 tx_sgl_task_params.num_sges = 1;
732 /* Set PAGE_SIZE for now since sg element is that size ??? */
733 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
734 tx_sgl_task_params.small_mid_sge = 0;
736 /* Set up s/g list parameters for request buffer */
737 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
738 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
739 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
740 rx_sgl_task_params.num_sges = 1;
741 /* Set PAGE_SIZE for now since sg element is that size ??? */
742 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
743 rx_sgl_task_params.small_mid_sge = 0;
747 * Last arg is 0 as previous code did not set that we wanted the
748 * fc header information.
750 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
753 &rx_sgl_task_params, 0);
755 /* Midpath requests always consume 1 SGE */
756 qedf->single_sge_ios++;
759 /* Presumed that fcport->rport_lock is held */
760 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
762 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
765 rval = fcport->sq_prod_idx;
767 /* Adjust ring index */
768 fcport->sq_prod_idx++;
769 fcport->fw_sq_prod_idx++;
770 if (fcport->sq_prod_idx == total_sqe)
771 fcport->sq_prod_idx = 0;
776 void qedf_ring_doorbell(struct qedf_rport *fcport)
778 struct fcoe_db_data dbell = { 0 };
782 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
783 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
784 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
785 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
787 dbell.sq_prod = fcport->fw_sq_prod_idx;
788 writel(*(u32 *)&dbell, fcport->p_doorbell);
789 /* Make sure SQ index is updated so f/w prcesses requests in order */
794 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
797 struct qedf_ctx *qedf = fcport->qedf;
798 struct qedf_io_log *io_log;
799 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
803 spin_lock_irqsave(&qedf->io_trace_lock, flags);
805 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
806 io_log->direction = direction;
807 io_log->task_id = io_req->xid;
808 io_log->port_id = fcport->rdata->ids.port_id;
809 io_log->lun = sc_cmd->device->lun;
810 io_log->op = op = sc_cmd->cmnd[0];
811 io_log->lba[0] = sc_cmd->cmnd[2];
812 io_log->lba[1] = sc_cmd->cmnd[3];
813 io_log->lba[2] = sc_cmd->cmnd[4];
814 io_log->lba[3] = sc_cmd->cmnd[5];
815 io_log->bufflen = scsi_bufflen(sc_cmd);
816 io_log->sg_count = scsi_sg_count(sc_cmd);
817 io_log->result = sc_cmd->result;
818 io_log->jiffies = jiffies;
819 io_log->refcount = kref_read(&io_req->refcount);
821 if (direction == QEDF_IO_TRACE_REQ) {
822 /* For requests we only care abot the submission CPU */
823 io_log->req_cpu = io_req->cpu;
826 } else if (direction == QEDF_IO_TRACE_RSP) {
827 io_log->req_cpu = io_req->cpu;
828 io_log->int_cpu = io_req->int_cpu;
829 io_log->rsp_cpu = smp_processor_id();
832 io_log->sge_type = io_req->sge_type;
834 qedf->io_trace_idx++;
835 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
836 qedf->io_trace_idx = 0;
838 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
841 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
843 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
844 struct Scsi_Host *host = sc_cmd->device->host;
845 struct fc_lport *lport = shost_priv(host);
846 struct qedf_ctx *qedf = lport_priv(lport);
847 struct fcoe_task_context *task_ctx;
849 enum fcoe_task_type req_type = 0;
850 struct fcoe_wqe *sqe;
853 /* Initialize rest of io_req fileds */
854 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
855 sc_cmd->SCp.ptr = (char *)io_req;
856 io_req->use_slowpath = false; /* Assume fast SGL by default */
858 /* Record which cpu this request is associated with */
859 io_req->cpu = smp_processor_id();
861 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
862 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
863 io_req->io_req_flags = QEDF_READ;
864 qedf->input_requests++;
865 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
866 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
867 io_req->io_req_flags = QEDF_WRITE;
868 qedf->output_requests++;
870 io_req->io_req_flags = 0;
871 qedf->control_requests++;
876 /* Build buffer descriptor list for firmware from sg list */
877 if (qedf_build_bd_list_from_sg(io_req)) {
878 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
879 kref_put(&io_req->refcount, qedf_release_cmd);
883 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
884 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
885 kref_put(&io_req->refcount, qedf_release_cmd);
888 /* Obtain free SQE */
889 sqe_idx = qedf_get_sqe_idx(fcport);
890 sqe = &fcport->sq[sqe_idx];
891 memset(sqe, 0, sizeof(struct fcoe_wqe));
893 /* Get the task context */
894 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
896 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
898 kref_put(&io_req->refcount, qedf_release_cmd);
902 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
905 qedf_ring_doorbell(fcport);
907 if (qedf_io_tracing && io_req->sc_cmd)
908 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
914 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
916 struct fc_lport *lport = shost_priv(host);
917 struct qedf_ctx *qedf = lport_priv(lport);
918 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
919 struct fc_rport_libfc_priv *rp = rport->dd_data;
920 struct qedf_rport *fcport = rport->dd_data;
921 struct qedf_ioreq *io_req;
924 unsigned long flags = 0;
927 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
928 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
929 sc_cmd->result = DID_NO_CONNECT << 16;
930 sc_cmd->scsi_done(sc_cmd);
934 rval = fc_remote_port_chkready(rport);
936 sc_cmd->result = rval;
937 sc_cmd->scsi_done(sc_cmd);
941 /* Retry command if we are doing a qed drain operation */
942 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
943 rc = SCSI_MLQUEUE_HOST_BUSY;
947 if (lport->state != LPORT_ST_READY ||
948 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
949 rc = SCSI_MLQUEUE_HOST_BUSY;
953 /* rport and tgt are allocated together, so tgt should be non-NULL */
954 fcport = (struct qedf_rport *)&rp[1];
956 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
958 * Session is not offloaded yet. Let SCSI-ml retry
961 rc = SCSI_MLQUEUE_TARGET_BUSY;
964 if (fcport->retry_delay_timestamp) {
965 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
966 fcport->retry_delay_timestamp = 0;
968 /* If retry_delay timer is active, flow off the ML */
969 rc = SCSI_MLQUEUE_TARGET_BUSY;
974 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
976 rc = SCSI_MLQUEUE_HOST_BUSY;
980 io_req->sc_cmd = sc_cmd;
982 /* Take fcport->rport_lock for posting to fcport send queue */
983 spin_lock_irqsave(&fcport->rport_lock, flags);
984 if (qedf_post_io_req(fcport, io_req)) {
985 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
986 /* Return SQE to pool */
987 atomic_inc(&fcport->free_sqes);
988 rc = SCSI_MLQUEUE_HOST_BUSY;
990 spin_unlock_irqrestore(&fcport->rport_lock, flags);
996 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
997 struct fcoe_cqe_rsp_info *fcp_rsp)
999 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1000 struct qedf_ctx *qedf = io_req->fcport->qedf;
1001 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1002 int fcp_sns_len = 0;
1003 int fcp_rsp_len = 0;
1004 uint8_t *rsp_info, *sense_data;
1006 io_req->fcp_status = FC_GOOD;
1007 io_req->fcp_resid = 0;
1008 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1009 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1010 io_req->fcp_resid = fcp_rsp->fcp_resid;
1012 io_req->scsi_comp_flags = rsp_flags;
1013 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1014 fcp_rsp->scsi_status_code;
1017 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1018 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1021 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1022 fcp_sns_len = fcp_rsp->fcp_sns_len;
1024 io_req->fcp_rsp_len = fcp_rsp_len;
1025 io_req->fcp_sns_len = fcp_sns_len;
1026 rsp_info = sense_data = io_req->sense_buffer;
1028 /* fetch fcp_rsp_code */
1029 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1030 /* Only for task management function */
1031 io_req->fcp_rsp_code = rsp_info[3];
1032 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1033 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1034 /* Adjust sense-data location. */
1035 sense_data += fcp_rsp_len;
1038 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1039 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1040 "Truncating sense buffer\n");
1041 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1044 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1046 memcpy(sc_cmd->sense_buffer, sense_data,
1050 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1052 struct scsi_cmnd *sc = io_req->sc_cmd;
1054 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1055 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1056 scsi_sg_count(sc), sc->sc_data_direction);
1057 io_req->bd_tbl->bd_valid = 0;
1061 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1062 struct qedf_ioreq *io_req)
1065 struct fcoe_task_context *task_ctx;
1066 struct scsi_cmnd *sc_cmd;
1067 struct fcoe_cqe_rsp_info *fcp_rsp;
1068 struct qedf_rport *fcport;
1070 u16 scope, qualifier = 0;
1071 u8 fw_residual_flag = 0;
1079 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1080 sc_cmd = io_req->sc_cmd;
1081 fcp_rsp = &cqe->cqe_info.rsp_info;
1084 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1088 if (!sc_cmd->SCp.ptr) {
1089 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1090 "another context.\n");
1094 if (!sc_cmd->request) {
1095 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1096 "sc_cmd=%p.\n", sc_cmd);
1100 if (!sc_cmd->request->special) {
1101 QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1102 "request not valid, sc_cmd=%p.\n", sc_cmd);
1106 if (!sc_cmd->request->q) {
1107 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1108 "is not valid, sc_cmd=%p.\n", sc_cmd);
1112 fcport = io_req->fcport;
1114 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1116 qedf_unmap_sg_list(qedf, io_req);
1118 /* Check for FCP transport error */
1119 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1120 QEDF_ERR(&(qedf->dbg_ctx),
1121 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1122 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1123 io_req->fcp_rsp_code);
1124 sc_cmd->result = DID_BUS_BUSY << 16;
1128 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1129 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1130 if (fw_residual_flag) {
1131 QEDF_ERR(&(qedf->dbg_ctx),
1132 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1133 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1134 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1135 cqe->cqe_info.rsp_info.fw_residual);
1137 if (io_req->cdb_status == 0)
1138 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1140 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1142 /* Abort the command since we did not get all the data */
1143 init_completion(&io_req->abts_done);
1144 rval = qedf_initiate_abts(io_req, true);
1146 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1147 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1151 * Set resid to the whole buffer length so we won't try to resue
1152 * any previously data.
1154 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1158 switch (io_req->fcp_status) {
1160 if (io_req->cdb_status == 0) {
1161 /* Good I/O completion */
1162 sc_cmd->result = DID_OK << 16;
1164 refcount = kref_read(&io_req->refcount);
1165 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1166 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1167 "lba=%02x%02x%02x%02x cdb_status=%d "
1168 "fcp_resid=0x%x refcount=%d.\n",
1169 qedf->lport->host->host_no, sc_cmd->device->id,
1170 sc_cmd->device->lun, io_req->xid,
1171 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1172 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1173 io_req->cdb_status, io_req->fcp_resid,
1175 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1177 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1178 io_req->cdb_status == SAM_STAT_BUSY) {
1180 * Check whether we need to set retry_delay at
1181 * all based on retry_delay module parameter
1182 * and the status qualifier.
1186 scope = fcp_rsp->retry_delay_timer & 0xC000;
1188 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1190 if (qedf_retry_delay &&
1191 scope > 0 && qualifier > 0 &&
1192 qualifier <= 0x3FEF) {
1193 /* Check we don't go over the max */
1194 if (qualifier > QEDF_RETRY_DELAY_MAX)
1196 QEDF_RETRY_DELAY_MAX;
1197 fcport->retry_delay_timestamp =
1198 jiffies + (qualifier * HZ / 10);
1202 if (io_req->fcp_resid)
1203 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1206 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1207 io_req->fcp_status);
1212 if (qedf_io_tracing)
1213 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1215 io_req->sc_cmd = NULL;
1216 sc_cmd->SCp.ptr = NULL;
1217 sc_cmd->scsi_done(sc_cmd);
1218 kref_put(&io_req->refcount, qedf_release_cmd);
1221 /* Return a SCSI command in some other context besides a normal completion */
1222 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1226 struct scsi_cmnd *sc_cmd;
1233 sc_cmd = io_req->sc_cmd;
1236 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1240 if (!sc_cmd->SCp.ptr) {
1241 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1242 "another context.\n");
1246 qedf_unmap_sg_list(qedf, io_req);
1248 sc_cmd->result = result << 16;
1249 refcount = kref_read(&io_req->refcount);
1250 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1251 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1252 "allowed=%d retries=%d refcount=%d.\n",
1253 qedf->lport->host->host_no, sc_cmd->device->id,
1254 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1255 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1256 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1260 * Set resid to the whole buffer length so we won't try to resue any
1261 * previously read data
1263 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1265 if (qedf_io_tracing)
1266 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1268 io_req->sc_cmd = NULL;
1269 sc_cmd->SCp.ptr = NULL;
1270 sc_cmd->scsi_done(sc_cmd);
1271 kref_put(&io_req->refcount, qedf_release_cmd);
1275 * Handle warning type CQE completions. This is mainly used for REC timer
1278 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1279 struct qedf_ioreq *io_req)
1282 struct qedf_rport *fcport = io_req->fcport;
1283 u64 err_warn_bit_map;
1289 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1290 "xid=0x%x\n", io_req->xid);
1291 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1292 "err_warn_bitmap=%08x:%08x\n",
1293 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1294 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1295 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1296 "rx_buff_off=%08x, rx_id=%04x\n",
1297 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1298 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1299 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1301 /* Normalize the error bitmap value to an just an unsigned int */
1302 err_warn_bit_map = (u64)
1303 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1304 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1305 for (i = 0; i < 64; i++) {
1306 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1312 /* Check if REC TOV expired if this is a tape device */
1313 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1315 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1316 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1317 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1318 io_req->rx_buf_off =
1319 cqe->cqe_info.err_info.rx_buf_off;
1320 io_req->tx_buf_off =
1321 cqe->cqe_info.err_info.tx_buf_off;
1322 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1323 rval = qedf_send_rec(io_req);
1325 * We only want to abort the io_req if we
1326 * can't queue the REC command as we want to
1327 * keep the exchange open for recovery.
1337 init_completion(&io_req->abts_done);
1338 rval = qedf_initiate_abts(io_req, true);
1340 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1343 /* Cleanup a command when we receive an error detection completion */
1344 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1345 struct qedf_ioreq *io_req)
1352 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1353 "xid=0x%x\n", io_req->xid);
1354 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1355 "err_warn_bitmap=%08x:%08x\n",
1356 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1357 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1358 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1359 "rx_buff_off=%08x, rx_id=%04x\n",
1360 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1361 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1362 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1364 if (qedf->stop_io_on_error) {
1365 qedf_stop_all_io(qedf);
1369 init_completion(&io_req->abts_done);
1370 rval = qedf_initiate_abts(io_req, true);
1372 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1375 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1376 struct qedf_ioreq *els_req)
1378 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1379 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1380 kref_read(&els_req->refcount));
1383 * Need to distinguish this from a timeout when calling the
1386 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1388 /* Cancel the timer */
1389 cancel_delayed_work_sync(&els_req->timeout_work);
1391 /* Call callback function to complete command */
1392 if (els_req->cb_func && els_req->cb_arg) {
1393 els_req->cb_func(els_req->cb_arg);
1394 els_req->cb_arg = NULL;
1397 /* Release kref for original initiate_els */
1398 kref_put(&els_req->refcount, qedf_release_cmd);
1401 /* A value of -1 for lun is a wild card that means flush all
1402 * active SCSI I/Os for the target.
1404 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1406 struct qedf_ioreq *io_req;
1407 struct qedf_ctx *qedf;
1408 struct qedf_cmd_mgr *cmd_mgr;
1414 qedf = fcport->qedf;
1415 cmd_mgr = qedf->cmd_mgr;
1417 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1419 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1420 io_req = &cmd_mgr->cmds[i];
1424 if (io_req->fcport != fcport)
1426 if (io_req->cmd_type == QEDF_ELS) {
1427 rc = kref_get_unless_zero(&io_req->refcount);
1429 QEDF_ERR(&(qedf->dbg_ctx),
1430 "Could not get kref for io_req=0x%p.\n",
1434 qedf_flush_els_req(qedf, io_req);
1436 * Release the kref and go back to the top of the
1442 if (!io_req->sc_cmd)
1445 if (io_req->sc_cmd->device->lun !=
1451 * Use kref_get_unless_zero in the unlikely case the command
1452 * we're about to flush was completed in the normal SCSI path
1454 rc = kref_get_unless_zero(&io_req->refcount);
1456 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1457 "io_req=0x%p\n", io_req);
1460 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1461 "Cleanup xid=0x%x.\n", io_req->xid);
1463 /* Cleanup task and return I/O mid-layer */
1464 qedf_initiate_cleanup(io_req, true);
1467 kref_put(&io_req->refcount, qedf_release_cmd);
1472 * Initiate a ABTS middle path command. Note that we don't have to initialize
1473 * the task context for an ABTS task.
1475 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1477 struct fc_lport *lport;
1478 struct qedf_rport *fcport = io_req->fcport;
1479 struct fc_rport_priv *rdata = fcport->rdata;
1480 struct qedf_ctx *qedf = fcport->qedf;
1484 unsigned long flags;
1485 struct fcoe_wqe *sqe;
1488 r_a_tov = rdata->r_a_tov;
1489 lport = qedf->lport;
1491 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1492 QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
1497 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1498 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1503 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1504 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1509 /* Ensure room on SQ */
1510 if (!atomic_read(&fcport->free_sqes)) {
1511 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1517 kref_get(&io_req->refcount);
1520 qedf->control_requests++;
1521 qedf->packet_aborts++;
1523 /* Set the return CPU to be the same as the request one */
1524 io_req->cpu = smp_processor_id();
1526 /* Set the command type to abort */
1527 io_req->cmd_type = QEDF_ABTS;
1528 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1530 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1531 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1534 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1536 spin_lock_irqsave(&fcport->rport_lock, flags);
1538 sqe_idx = qedf_get_sqe_idx(fcport);
1539 sqe = &fcport->sq[sqe_idx];
1540 memset(sqe, 0, sizeof(struct fcoe_wqe));
1541 io_req->task_params->sqe = sqe;
1543 init_initiator_abort_fcoe_task(io_req->task_params);
1544 qedf_ring_doorbell(fcport);
1546 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1551 * If the ABTS task fails to queue then we need to cleanup the
1552 * task at the firmware.
1554 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1558 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1559 struct qedf_ioreq *io_req)
1564 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1565 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1567 cancel_delayed_work(&io_req->timeout_work);
1570 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1573 case FC_RCTL_BA_ACC:
1574 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1575 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1576 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1578 * Dont release this cmd yet. It will be relesed
1579 * after we get RRQ response
1581 kref_get(&io_req->refcount);
1582 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1583 msecs_to_jiffies(qedf->lport->r_a_tov));
1585 /* For error cases let the cleanup return the command */
1586 case FC_RCTL_BA_RJT:
1587 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1588 "ABTS response - RJT\n");
1589 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1592 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1596 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1598 if (io_req->sc_cmd) {
1599 if (io_req->return_scsi_cmd_on_abts)
1600 qedf_scsi_done(qedf, io_req, DID_ERROR);
1603 /* Notify eh_abort handler that ABTS is complete */
1604 complete(&io_req->abts_done);
1606 kref_put(&io_req->refcount, qedf_release_cmd);
1609 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1611 struct qedf_mp_req *mp_req;
1612 struct scsi_sge *mp_req_bd;
1613 struct scsi_sge *mp_resp_bd;
1614 struct qedf_ctx *qedf = io_req->fcport->qedf;
1618 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1620 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1621 memset(mp_req, 0, sizeof(struct qedf_mp_req));
1623 if (io_req->cmd_type != QEDF_ELS) {
1624 mp_req->req_len = sizeof(struct fcp_cmnd);
1625 io_req->data_xfer_len = mp_req->req_len;
1627 mp_req->req_len = io_req->data_xfer_len;
1629 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1630 &mp_req->req_buf_dma, GFP_KERNEL);
1631 if (!mp_req->req_buf) {
1632 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1633 qedf_free_mp_resc(io_req);
1637 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1638 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1639 if (!mp_req->resp_buf) {
1640 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1642 qedf_free_mp_resc(io_req);
1646 /* Allocate and map mp_req_bd and mp_resp_bd */
1647 sz = sizeof(struct scsi_sge);
1648 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1649 &mp_req->mp_req_bd_dma, GFP_KERNEL);
1650 if (!mp_req->mp_req_bd) {
1651 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1652 qedf_free_mp_resc(io_req);
1656 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1657 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1658 if (!mp_req->mp_resp_bd) {
1659 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1660 qedf_free_mp_resc(io_req);
1665 addr = mp_req->req_buf_dma;
1666 mp_req_bd = mp_req->mp_req_bd;
1667 mp_req_bd->sge_addr.lo = U64_LO(addr);
1668 mp_req_bd->sge_addr.hi = U64_HI(addr);
1669 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
1672 * MP buffer is either a task mgmt command or an ELS.
1673 * So the assumption is that it consumes a single bd
1674 * entry in the bd table
1676 mp_resp_bd = mp_req->mp_resp_bd;
1677 addr = mp_req->resp_buf_dma;
1678 mp_resp_bd->sge_addr.lo = U64_LO(addr);
1679 mp_resp_bd->sge_addr.hi = U64_HI(addr);
1680 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
1686 * Last ditch effort to clear the port if it's stuck. Used only after a
1687 * cleanup task times out.
1689 static void qedf_drain_request(struct qedf_ctx *qedf)
1691 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1692 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1696 /* Set bit to return all queuecommand requests as busy */
1697 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1699 /* Call qed drain request for function. Should be synchronous */
1700 qed_ops->common->drain(qedf->cdev);
1702 /* Settle time for CQEs to be returned */
1705 /* Unplug and continue */
1706 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1710 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1713 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1714 bool return_scsi_cmd_on_abts)
1716 struct qedf_rport *fcport;
1717 struct qedf_ctx *qedf;
1719 struct fcoe_task_context *task;
1722 unsigned long flags;
1723 struct fcoe_wqe *sqe;
1726 fcport = io_req->fcport;
1728 QEDF_ERR(NULL, "fcport is NULL.\n");
1732 qedf = fcport->qedf;
1734 QEDF_ERR(NULL, "qedf is NULL.\n");
1738 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1739 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1740 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1741 "cleanup processing or already completed.\n",
1746 /* Ensure room on SQ */
1747 if (!atomic_read(&fcport->free_sqes)) {
1748 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1753 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1756 /* Cleanup cmds re-use the same TID as the original I/O */
1758 io_req->cmd_type = QEDF_CLEANUP;
1759 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1761 /* Set the return CPU to be the same as the request one */
1762 io_req->cpu = smp_processor_id();
1764 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1766 task = qedf_get_task_mem(&qedf->tasks, xid);
1768 init_completion(&io_req->tm_done);
1770 spin_lock_irqsave(&fcport->rport_lock, flags);
1772 sqe_idx = qedf_get_sqe_idx(fcport);
1773 sqe = &fcport->sq[sqe_idx];
1774 memset(sqe, 0, sizeof(struct fcoe_wqe));
1775 io_req->task_params->sqe = sqe;
1777 init_initiator_cleanup_fcoe_task(io_req->task_params);
1778 qedf_ring_doorbell(fcport);
1780 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1782 tmo = wait_for_completion_timeout(&io_req->tm_done,
1783 QEDF_CLEANUP_TIMEOUT * HZ);
1788 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1789 "xid=%x.\n", io_req->xid);
1790 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1791 /* Issue a drain request if cleanup task times out */
1792 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1793 qedf_drain_request(qedf);
1796 if (io_req->sc_cmd) {
1797 if (io_req->return_scsi_cmd_on_abts)
1798 qedf_scsi_done(qedf, io_req, DID_ERROR);
1802 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1804 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1809 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1810 struct qedf_ioreq *io_req)
1812 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1815 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1817 /* Complete so we can finish cleaning up the I/O */
1818 complete(&io_req->tm_done);
1821 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1824 struct qedf_ioreq *io_req;
1825 struct fcoe_task_context *task;
1826 struct qedf_ctx *qedf = fcport->qedf;
1827 struct fc_lport *lport = qedf->lport;
1831 unsigned long flags;
1832 struct fcoe_wqe *sqe;
1836 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
1840 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
1841 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
1846 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1847 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
1849 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
1851 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
1856 /* Initialize rest of io_req fields */
1857 io_req->sc_cmd = sc_cmd;
1858 io_req->fcport = fcport;
1859 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
1861 /* Set the return CPU to be the same as the request one */
1862 io_req->cpu = smp_processor_id();
1865 io_req->io_req_flags = QEDF_READ;
1866 io_req->data_xfer_len = 0;
1867 io_req->tm_flags = tm_flags;
1869 /* Default is to return a SCSI command when an error occurs */
1870 io_req->return_scsi_cmd_on_abts = true;
1872 /* Obtain exchange id */
1875 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
1878 /* Initialize task context for this IO request */
1879 task = qedf_get_task_mem(&qedf->tasks, xid);
1881 init_completion(&io_req->tm_done);
1883 spin_lock_irqsave(&fcport->rport_lock, flags);
1885 sqe_idx = qedf_get_sqe_idx(fcport);
1886 sqe = &fcport->sq[sqe_idx];
1887 memset(sqe, 0, sizeof(struct fcoe_wqe));
1889 qedf_init_task(fcport, lport, io_req, task, sqe);
1890 qedf_ring_doorbell(fcport);
1892 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1894 tmo = wait_for_completion_timeout(&io_req->tm_done,
1895 QEDF_TM_TIMEOUT * HZ);
1899 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
1901 /* Check TMF response code */
1902 if (io_req->fcp_rsp_code == 0)
1908 if (tm_flags == FCP_TMF_LUN_RESET)
1909 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
1911 qedf_flush_active_ios(fcport, -1);
1913 kref_put(&io_req->refcount, qedf_release_cmd);
1915 if (rc != SUCCESS) {
1916 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
1919 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
1926 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
1928 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1929 struct fc_rport_libfc_priv *rp = rport->dd_data;
1930 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
1931 struct qedf_ctx *qedf;
1932 struct fc_lport *lport;
1936 rval = fc_remote_port_chkready(rport);
1939 QEDF_ERR(NULL, "device_reset rport not ready\n");
1944 if (fcport == NULL) {
1945 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
1950 qedf = fcport->qedf;
1951 lport = qedf->lport;
1953 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
1954 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
1959 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1960 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1965 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
1971 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1972 struct qedf_ioreq *io_req)
1974 struct fcoe_cqe_rsp_info *fcp_rsp;
1976 fcp_rsp = &cqe->cqe_info.rsp_info;
1977 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1979 io_req->sc_cmd = NULL;
1980 complete(&io_req->tm_done);
1983 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
1984 struct fcoe_cqe *cqe)
1986 unsigned long flags;
1988 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
1989 u32 payload_len, crc;
1990 struct fc_frame_header *fh;
1991 struct fc_frame *fp;
1992 struct qedf_io_work *io_work;
1996 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
1997 "address.hi=%x address.lo=%x opaque_data.hi=%x "
1998 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
1999 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
2000 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
2001 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
2002 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
2003 qedf->bdq_prod_idx, pktlen);
2005 bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
2006 if (bdq_idx >= QEDF_BDQ_SIZE) {
2007 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2009 goto increment_prod;
2012 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2014 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2015 "unsolicited packet.\n");
2016 goto increment_prod;
2019 if (qedf_dump_frames) {
2020 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2021 "BDQ frame is at addr=%p.\n", bdq_addr);
2022 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2023 (void *)bdq_addr, pktlen, false);
2026 /* Allocate frame */
2027 payload_len = pktlen - sizeof(struct fc_frame_header);
2028 fp = fc_frame_alloc(qedf->lport, payload_len);
2030 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2031 goto increment_prod;
2034 /* Copy data from BDQ buffer into fc_frame struct */
2035 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2036 memcpy(fh, (void *)bdq_addr, pktlen);
2038 /* Initialize the frame so libfc sees it as a valid frame */
2039 crc = fcoe_fc_crc(fp);
2041 fr_dev(fp) = qedf->lport;
2042 fr_sof(fp) = FC_SOF_I3;
2043 fr_eof(fp) = FC_EOF_T;
2044 fr_crc(fp) = cpu_to_le32(~crc);
2047 * We need to return the frame back up to libfc in a non-atomic
2050 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2052 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2053 "work for I/O completion.\n");
2055 goto increment_prod;
2057 memset(io_work, 0, sizeof(struct qedf_io_work));
2059 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2061 /* Copy contents of CQE for deferred processing */
2062 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2064 io_work->qedf = qedf;
2067 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2069 spin_lock_irqsave(&qedf->hba_lock, flags);
2071 /* Increment producer to let f/w know we've handled the frame */
2072 qedf->bdq_prod_idx++;
2074 /* Producer index wraps at uint16_t boundary */
2075 if (qedf->bdq_prod_idx == 0xffff)
2076 qedf->bdq_prod_idx = 0;
2078 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2079 tmp = readw(qedf->bdq_primary_prod);
2080 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2081 tmp = readw(qedf->bdq_secondary_prod);
2083 spin_unlock_irqrestore(&qedf->hba_lock, flags);