2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
31 /* Set transfer direction */
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 vha->qla_stats.output_requests++;
36 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39 vha->qla_stats.input_requests++;
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
48 * @dsds: number of data segment decriptors needed
50 * Returns the number of IOCB entries needed to store @dsds.
53 qla2x00_calc_iocbs_32(uint16_t dsds)
59 iocbs += (dsds - 3) / 7;
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
70 * @dsds: number of data segment decriptors needed
72 * Returns the number of IOCB entries needed to store @dsds.
75 qla2x00_calc_iocbs_64(uint16_t dsds)
81 iocbs += (dsds - 2) / 5;
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
97 cont_entry_t *cont_pkt;
98 struct req_que *req = vha->req;
99 /* Adjust ring index. */
101 if (req->ring_index == req->length) {
103 req->ring_ptr = req->ring;
108 cont_pkt = (cont_entry_t *)req->ring_ptr;
110 /* Load packet defaults. */
111 *((uint32_t *)(&cont_pkt->entry_type)) =
112 __constant_cpu_to_le32(CONTINUE_TYPE);
118 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
121 * Returns a pointer to the continuation type 1 IOCB packet.
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
126 cont_a64_entry_t *cont_pkt;
128 /* Adjust ring index. */
130 if (req->ring_index == req->length) {
132 req->ring_ptr = req->ring;
137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151 uint8_t guard = scsi_host_get_guard(cmd->device->host);
153 /* We always use DIFF Bundling for best performance */
156 /* Translate SCSI opcode to a protection opcode */
157 switch (scsi_get_prot_op(cmd)) {
158 case SCSI_PROT_READ_STRIP:
159 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
161 case SCSI_PROT_WRITE_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 case SCSI_PROT_READ_INSERT:
165 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 case SCSI_PROT_WRITE_STRIP:
168 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
170 case SCSI_PROT_READ_PASS:
171 case SCSI_PROT_WRITE_PASS:
172 if (guard & SHOST_DIX_GUARD_IP)
173 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 default: /* Normal Request */
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 return scsi_prot_sg_count(cmd);
186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
198 scsi_qla_host_t *vha;
199 struct scsi_cmnd *cmd;
200 struct scatterlist *sg;
203 cmd = GET_CMD_SP(sp);
205 /* Update entry type to indicate Command Type 2 IOCB */
206 *((uint32_t *)(&cmd_pkt->entry_type)) =
207 __constant_cpu_to_le32(COMMAND_TYPE);
209 /* No data transfer */
210 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 vha = sp->fcport->vha;
216 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
218 /* Three DSDs are available in the Command Type 2 IOCB */
220 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
222 /* Load data segments */
223 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224 cont_entry_t *cont_pkt;
226 /* Allocate additional continuation packets? */
227 if (avail_dsds == 0) {
229 * Seven DSDs are available in the Continuation
232 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
244 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245 * capable IOCB types.
247 * @sp: SRB command to process
248 * @cmd_pkt: Command type 3 IOCB
249 * @tot_dsds: Total number of segments to transfer
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
256 scsi_qla_host_t *vha;
257 struct scsi_cmnd *cmd;
258 struct scatterlist *sg;
261 cmd = GET_CMD_SP(sp);
263 /* Update entry type to indicate Command Type 3 IOCB */
264 *((uint32_t *)(&cmd_pkt->entry_type)) =
265 __constant_cpu_to_le32(COMMAND_A64_TYPE);
267 /* No data transfer */
268 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 vha = sp->fcport->vha;
274 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
276 /* Two DSDs are available in the Command Type 3 IOCB */
278 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
280 /* Load data segments */
281 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
283 cont_a64_entry_t *cont_pkt;
285 /* Allocate additional continuation packets? */
286 if (avail_dsds == 0) {
288 * Five DSDs are available in the Continuation
291 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 sle_dma = sg_dma_address(sg);
297 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
305 * qla2x00_start_scsi() - Send a SCSI command to the ISP
306 * @sp: command to send to the ISP
308 * Returns non-zero if a failure occurred, else zero.
311 qla2x00_start_scsi(srb_t *sp)
315 scsi_qla_host_t *vha;
316 struct scsi_cmnd *cmd;
320 cmd_entry_t *cmd_pkt;
324 struct device_reg_2xxx __iomem *reg;
325 struct qla_hw_data *ha;
330 /* Setup device pointers. */
332 vha = sp->fcport->vha;
334 reg = &ha->iobase->isp;
335 cmd = GET_CMD_SP(sp);
336 req = ha->req_q_map[0];
337 rsp = ha->rsp_q_map[0];
338 /* So we know we haven't pci_map'ed anything yet */
341 /* Send marker if required */
342 if (vha->marker_needed != 0) {
343 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
345 return (QLA_FUNCTION_FAILED);
347 vha->marker_needed = 0;
350 /* Acquire ring specific lock */
351 spin_lock_irqsave(&ha->hardware_lock, flags);
353 /* Check for room in outstanding command list. */
354 handle = req->current_outstanding_cmd;
355 for (index = 1; index < req->num_outstanding_cmds; index++) {
357 if (handle == req->num_outstanding_cmds)
359 if (!req->outstanding_cmds[handle])
362 if (index == req->num_outstanding_cmds)
365 /* Map the sg table so we have an accurate count of sg entries needed */
366 if (scsi_sg_count(cmd)) {
367 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368 scsi_sg_count(cmd), cmd->sc_data_direction);
376 /* Calculate the number of request entries needed. */
377 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378 if (req->cnt < (req_cnt + 2)) {
379 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380 if (req->ring_index < cnt)
381 req->cnt = cnt - req->ring_index;
383 req->cnt = req->length -
384 (req->ring_index - cnt);
385 /* If still no head room then bail out */
386 if (req->cnt < (req_cnt + 2))
390 /* Build command packet */
391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
394 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
408 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd, tag)) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
425 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
432 /* Build IOCB segments */
433 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
435 /* Set total data segment count. */
436 cmd_pkt->entry_count = (uint8_t)req_cnt;
439 /* Adjust ring index. */
441 if (req->ring_index == req->length) {
443 req->ring_ptr = req->ring;
447 sp->flags |= SRB_DMA_VALID;
449 /* Set chip new ring index. */
450 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
453 /* Manage unprocessed RIO/ZIO commands in response queue. */
454 if (vha->flags.process_response_queue &&
455 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 qla2x00_process_response_queue(rsp);
458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
459 return (QLA_SUCCESS);
465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
467 return (QLA_FUNCTION_FAILED);
471 * qla2x00_start_iocbs() - Execute the IOCB command
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
476 struct qla_hw_data *ha = vha->hw;
477 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
479 if (IS_P3P_TYPE(ha)) {
480 qla82xx_start_iocbs(vha);
482 /* Adjust ring index. */
484 if (req->ring_index == req->length) {
486 req->ring_ptr = req->ring;
490 /* Set chip new ring index. */
491 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494 } else if (IS_QLAFX00(ha)) {
495 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
496 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
497 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498 } else if (IS_FWI2_CAPABLE(ha)) {
499 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
500 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
502 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
504 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
510 * qla2x00_marker() - Send a marker IOCB to the firmware.
514 * @type: marker modifier
516 * Can be called from both normal and interrupt context.
518 * Returns non-zero if a failure occurred, else zero.
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522 struct rsp_que *rsp, uint16_t loop_id,
523 uint64_t lun, uint8_t type)
526 struct mrk_entry_24xx *mrk24 = NULL;
528 struct qla_hw_data *ha = vha->hw;
529 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
531 req = ha->req_q_map[0];
532 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
534 ql_log(ql_log_warn, base_vha, 0x3026,
535 "Failed to allocate Marker IOCB.\n");
537 return (QLA_FUNCTION_FAILED);
540 mrk->entry_type = MARKER_TYPE;
541 mrk->modifier = type;
542 if (type != MK_SYNC_ALL) {
543 if (IS_FWI2_CAPABLE(ha)) {
544 mrk24 = (struct mrk_entry_24xx *) mrk;
545 mrk24->nport_handle = cpu_to_le16(loop_id);
546 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
547 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
548 mrk24->vp_index = vha->vp_idx;
549 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
551 SET_TARGET_ID(ha, mrk->target, loop_id);
552 mrk->lun = cpu_to_le16((uint16_t)lun);
557 qla2x00_start_iocbs(vha, req);
559 return (QLA_SUCCESS);
563 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
564 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
568 unsigned long flags = 0;
570 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
571 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
572 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
578 * qla2x00_issue_marker
581 * Caller CAN have hardware lock held as specified by ha_locked parameter.
582 * Might release it, then reaquire.
584 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
587 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
588 MK_SYNC_ALL) != QLA_SUCCESS)
589 return QLA_FUNCTION_FAILED;
591 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
592 MK_SYNC_ALL) != QLA_SUCCESS)
593 return QLA_FUNCTION_FAILED;
595 vha->marker_needed = 0;
601 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
604 uint32_t *cur_dsd = NULL;
605 scsi_qla_host_t *vha;
606 struct qla_hw_data *ha;
607 struct scsi_cmnd *cmd;
608 struct scatterlist *cur_seg;
612 uint8_t first_iocb = 1;
613 uint32_t dsd_list_len;
614 struct dsd_dma *dsd_ptr;
617 cmd = GET_CMD_SP(sp);
619 /* Update entry type to indicate Command Type 3 IOCB */
620 *((uint32_t *)(&cmd_pkt->entry_type)) =
621 __constant_cpu_to_le32(COMMAND_TYPE_6);
623 /* No data transfer */
624 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
625 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
629 vha = sp->fcport->vha;
632 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->control_flags =
635 __constant_cpu_to_le16(CF_WRITE_DATA);
636 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
637 vha->qla_stats.output_requests++;
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->control_flags =
640 __constant_cpu_to_le16(CF_READ_DATA);
641 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
642 vha->qla_stats.input_requests++;
645 cur_seg = scsi_sglist(cmd);
646 ctx = GET_CMD_CTX_SP(sp);
649 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
650 QLA_DSDS_PER_IOCB : tot_dsds;
651 tot_dsds -= avail_dsds;
652 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
654 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
655 struct dsd_dma, list);
656 next_dsd = dsd_ptr->dsd_addr;
657 list_del(&dsd_ptr->list);
659 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
665 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
666 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
667 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
668 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
670 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672 *cur_dsd++ = cpu_to_le32(dsd_list_len);
674 cur_dsd = (uint32_t *)next_dsd;
678 sle_dma = sg_dma_address(cur_seg);
679 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
680 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
681 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
682 cur_seg = sg_next(cur_seg);
687 /* Null termination */
691 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
696 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
697 * for Command Type 6.
699 * @dsds: number of data segment decriptors needed
701 * Returns the number of dsd list needed to store @dsds.
704 qla24xx_calc_dsd_lists(uint16_t dsds)
706 uint16_t dsd_lists = 0;
708 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
709 if (dsds % QLA_DSDS_PER_IOCB)
716 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
719 * @sp: SRB command to process
720 * @cmd_pkt: Command type 3 IOCB
721 * @tot_dsds: Total number of segments to transfer
724 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
729 scsi_qla_host_t *vha;
730 struct scsi_cmnd *cmd;
731 struct scatterlist *sg;
735 cmd = GET_CMD_SP(sp);
737 /* Update entry type to indicate Command Type 3 IOCB */
738 *((uint32_t *)(&cmd_pkt->entry_type)) =
739 __constant_cpu_to_le32(COMMAND_TYPE_7);
741 /* No data transfer */
742 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
743 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
747 vha = sp->fcport->vha;
750 /* Set transfer direction */
751 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
752 cmd_pkt->task_mgmt_flags =
753 __constant_cpu_to_le16(TMF_WRITE_DATA);
754 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
755 vha->qla_stats.output_requests++;
756 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
757 cmd_pkt->task_mgmt_flags =
758 __constant_cpu_to_le16(TMF_READ_DATA);
759 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
760 vha->qla_stats.input_requests++;
763 /* One DSD is available in the Command Type 3 IOCB */
765 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
767 /* Load data segments */
769 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
771 cont_a64_entry_t *cont_pkt;
773 /* Allocate additional continuation packets? */
774 if (avail_dsds == 0) {
776 * Five DSDs are available in the Continuation
779 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
780 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
784 sle_dma = sg_dma_address(sg);
785 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
786 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
787 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
792 struct fw_dif_context {
795 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
796 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
800 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
804 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
805 unsigned int protcnt)
807 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
809 switch (scsi_get_prot_type(cmd)) {
810 case SCSI_PROT_DIF_TYPE0:
812 * No check for ql2xenablehba_err_chk, as it would be an
813 * I/O error if hba tag generation is not done.
815 pkt->ref_tag = cpu_to_le32((uint32_t)
816 (0xffffffff & scsi_get_lba(cmd)));
818 if (!qla2x00_hba_err_chk_enabled(sp))
821 pkt->ref_tag_mask[0] = 0xff;
822 pkt->ref_tag_mask[1] = 0xff;
823 pkt->ref_tag_mask[2] = 0xff;
824 pkt->ref_tag_mask[3] = 0xff;
828 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
829 * match LBA in CDB + N
831 case SCSI_PROT_DIF_TYPE2:
832 pkt->app_tag = __constant_cpu_to_le16(0);
833 pkt->app_tag_mask[0] = 0x0;
834 pkt->app_tag_mask[1] = 0x0;
836 pkt->ref_tag = cpu_to_le32((uint32_t)
837 (0xffffffff & scsi_get_lba(cmd)));
839 if (!qla2x00_hba_err_chk_enabled(sp))
842 /* enable ALL bytes of the ref tag */
843 pkt->ref_tag_mask[0] = 0xff;
844 pkt->ref_tag_mask[1] = 0xff;
845 pkt->ref_tag_mask[2] = 0xff;
846 pkt->ref_tag_mask[3] = 0xff;
849 /* For Type 3 protection: 16 bit GUARD only */
850 case SCSI_PROT_DIF_TYPE3:
851 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
852 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
857 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
860 case SCSI_PROT_DIF_TYPE1:
861 pkt->ref_tag = cpu_to_le32((uint32_t)
862 (0xffffffff & scsi_get_lba(cmd)));
863 pkt->app_tag = __constant_cpu_to_le16(0);
864 pkt->app_tag_mask[0] = 0x0;
865 pkt->app_tag_mask[1] = 0x0;
867 if (!qla2x00_hba_err_chk_enabled(sp))
870 /* enable ALL bytes of the ref tag */
871 pkt->ref_tag_mask[0] = 0xff;
872 pkt->ref_tag_mask[1] = 0xff;
873 pkt->ref_tag_mask[2] = 0xff;
874 pkt->ref_tag_mask[3] = 0xff;
880 dma_addr_t dma_addr; /* OUT */
881 uint32_t dma_len; /* OUT */
883 uint32_t tot_bytes; /* IN */
884 struct scatterlist *cur_sg; /* IN */
886 /* for book keeping, bzero on initial invocation */
887 uint32_t bytes_consumed;
889 uint32_t tot_partial;
897 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
900 struct scatterlist *sg;
901 uint32_t cumulative_partial, sg_len;
902 dma_addr_t sg_dma_addr;
904 if (sgx->num_bytes == sgx->tot_bytes)
908 cumulative_partial = sgx->tot_partial;
910 sg_dma_addr = sg_dma_address(sg);
911 sg_len = sg_dma_len(sg);
913 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
915 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916 sgx->dma_len = (blk_sz - cumulative_partial);
917 sgx->tot_partial = 0;
918 sgx->num_bytes += blk_sz;
921 sgx->dma_len = sg_len - sgx->bytes_consumed;
922 sgx->tot_partial += sgx->dma_len;
926 sgx->bytes_consumed += sgx->dma_len;
928 if (sg_len == sgx->bytes_consumed) {
932 sgx->bytes_consumed = 0;
939 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
940 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
943 uint8_t avail_dsds = 0;
944 uint32_t dsd_list_len;
945 struct dsd_dma *dsd_ptr;
946 struct scatterlist *sg_prot;
947 uint32_t *cur_dsd = dsd;
948 uint16_t used_dsds = tot_dsds;
950 uint32_t prot_int; /* protection interval */
954 uint32_t sle_dma_len, tot_prot_dma_len = 0;
955 struct scsi_cmnd *cmd;
956 struct scsi_qla_host *vha;
958 memset(&sgx, 0, sizeof(struct qla2_sgx));
960 vha = sp->fcport->vha;
961 cmd = GET_CMD_SP(sp);
962 prot_int = cmd->device->sector_size;
964 sgx.tot_bytes = scsi_bufflen(cmd);
965 sgx.cur_sg = scsi_sglist(cmd);
968 sg_prot = scsi_prot_sglist(cmd);
971 prot_int = tc->blk_sz;
972 sgx.tot_bytes = tc->bufflen;
974 sg_prot = tc->prot_sg;
980 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
982 sle_dma = sgx.dma_addr;
983 sle_dma_len = sgx.dma_len;
985 /* Allocate additional continuation packets? */
986 if (avail_dsds == 0) {
987 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
988 QLA_DSDS_PER_IOCB : used_dsds;
989 dsd_list_len = (avail_dsds + 1) * 12;
990 used_dsds -= avail_dsds;
992 /* allocate tracking DS */
993 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
997 /* allocate new list */
998 dsd_ptr->dsd_addr = next_dsd =
999 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1000 &dsd_ptr->dsd_list_dma);
1004 * Need to cleanup only this dsd_ptr, rest
1005 * will be done by sp_free_dma()
1012 list_add_tail(&dsd_ptr->list,
1013 &((struct crc_context *)
1014 sp->u.scmd.ctx)->dsd_list);
1016 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1018 list_add_tail(&dsd_ptr->list,
1019 &(tc->ctx->dsd_list));
1020 tc->ctx_dsd_alloced = 1;
1024 /* add new list to cmd iocb or last list */
1025 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1026 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1027 *cur_dsd++ = dsd_list_len;
1028 cur_dsd = (uint32_t *)next_dsd;
1030 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1031 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1032 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1036 /* Got a full protection interval */
1037 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1040 tot_prot_dma_len += sle_dma_len;
1041 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1042 tot_prot_dma_len = 0;
1043 sg_prot = sg_next(sg_prot);
1046 partial = 1; /* So as to not re-enter this block */
1047 goto alloc_and_fill;
1050 /* Null termination */
1058 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1059 uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1062 uint8_t avail_dsds = 0;
1063 uint32_t dsd_list_len;
1064 struct dsd_dma *dsd_ptr;
1065 struct scatterlist *sg, *sgl;
1066 uint32_t *cur_dsd = dsd;
1068 uint16_t used_dsds = tot_dsds;
1069 struct scsi_cmnd *cmd;
1070 struct scsi_qla_host *vha;
1073 cmd = GET_CMD_SP(sp);
1074 sgl = scsi_sglist(cmd);
1075 vha = sp->fcport->vha;
1085 for_each_sg(sgl, sg, tot_dsds, i) {
1088 /* Allocate additional continuation packets? */
1089 if (avail_dsds == 0) {
1090 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1091 QLA_DSDS_PER_IOCB : used_dsds;
1092 dsd_list_len = (avail_dsds + 1) * 12;
1093 used_dsds -= avail_dsds;
1095 /* allocate tracking DS */
1096 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1100 /* allocate new list */
1101 dsd_ptr->dsd_addr = next_dsd =
1102 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1103 &dsd_ptr->dsd_list_dma);
1107 * Need to cleanup only this dsd_ptr, rest
1108 * will be done by sp_free_dma()
1115 list_add_tail(&dsd_ptr->list,
1116 &((struct crc_context *)
1117 sp->u.scmd.ctx)->dsd_list);
1119 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1121 list_add_tail(&dsd_ptr->list,
1122 &(tc->ctx->dsd_list));
1123 tc->ctx_dsd_alloced = 1;
1126 /* add new list to cmd iocb or last list */
1127 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1128 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1129 *cur_dsd++ = dsd_list_len;
1130 cur_dsd = (uint32_t *)next_dsd;
1132 sle_dma = sg_dma_address(sg);
1134 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1135 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1136 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1140 /* Null termination */
1148 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1149 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1152 uint8_t avail_dsds = 0;
1153 uint32_t dsd_list_len;
1154 struct dsd_dma *dsd_ptr;
1155 struct scatterlist *sg, *sgl;
1157 struct scsi_cmnd *cmd;
1158 uint32_t *cur_dsd = dsd;
1159 uint16_t used_dsds = tot_dsds;
1160 struct scsi_qla_host *vha;
1163 cmd = GET_CMD_SP(sp);
1164 sgl = scsi_prot_sglist(cmd);
1165 vha = sp->fcport->vha;
1174 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1175 "%s: enter\n", __func__);
1177 for_each_sg(sgl, sg, tot_dsds, i) {
1180 /* Allocate additional continuation packets? */
1181 if (avail_dsds == 0) {
1182 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1183 QLA_DSDS_PER_IOCB : used_dsds;
1184 dsd_list_len = (avail_dsds + 1) * 12;
1185 used_dsds -= avail_dsds;
1187 /* allocate tracking DS */
1188 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1192 /* allocate new list */
1193 dsd_ptr->dsd_addr = next_dsd =
1194 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1195 &dsd_ptr->dsd_list_dma);
1199 * Need to cleanup only this dsd_ptr, rest
1200 * will be done by sp_free_dma()
1207 list_add_tail(&dsd_ptr->list,
1208 &((struct crc_context *)
1209 sp->u.scmd.ctx)->dsd_list);
1211 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1213 list_add_tail(&dsd_ptr->list,
1214 &(tc->ctx->dsd_list));
1215 tc->ctx_dsd_alloced = 1;
1218 /* add new list to cmd iocb or last list */
1219 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1220 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1221 *cur_dsd++ = dsd_list_len;
1222 cur_dsd = (uint32_t *)next_dsd;
1224 sle_dma = sg_dma_address(sg);
1226 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1227 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1228 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1232 /* Null termination */
1240 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1241 * Type 6 IOCB types.
1243 * @sp: SRB command to process
1244 * @cmd_pkt: Command type 3 IOCB
1245 * @tot_dsds: Total number of segments to transfer
1248 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1249 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1251 uint32_t *cur_dsd, *fcp_dl;
1252 scsi_qla_host_t *vha;
1253 struct scsi_cmnd *cmd;
1255 uint32_t total_bytes = 0;
1256 uint32_t data_bytes;
1258 uint8_t bundling = 1;
1261 struct crc_context *crc_ctx_pkt = NULL;
1262 struct qla_hw_data *ha;
1263 uint8_t additional_fcpcdb_len;
1264 uint16_t fcp_cmnd_len;
1265 struct fcp_cmnd *fcp_cmnd;
1266 dma_addr_t crc_ctx_dma;
1269 cmd = GET_CMD_SP(sp);
1272 /* Update entry type to indicate Command Type CRC_2 IOCB */
1273 *((uint32_t *)(&cmd_pkt->entry_type)) =
1274 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1276 vha = sp->fcport->vha;
1279 /* No data transfer */
1280 data_bytes = scsi_bufflen(cmd);
1281 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1282 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1286 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1288 /* Set transfer direction */
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1290 cmd_pkt->control_flags =
1291 __constant_cpu_to_le16(CF_WRITE_DATA);
1292 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1293 cmd_pkt->control_flags =
1294 __constant_cpu_to_le16(CF_READ_DATA);
1297 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1298 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1299 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1300 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1303 /* Allocate CRC context from global pool */
1304 crc_ctx_pkt = sp->u.scmd.ctx =
1305 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1308 goto crc_queuing_error;
1310 /* Zero out CTX area. */
1311 clr_ptr = (uint8_t *)crc_ctx_pkt;
1312 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1314 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1316 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1319 crc_ctx_pkt->handle = cmd_pkt->handle;
1321 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1323 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1324 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1326 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1327 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1328 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1330 /* Determine SCSI command length -- align to 4 byte boundary */
1331 if (cmd->cmd_len > 16) {
1332 additional_fcpcdb_len = cmd->cmd_len - 16;
1333 if ((cmd->cmd_len % 4) != 0) {
1334 /* SCSI cmd > 16 bytes must be multiple of 4 */
1335 goto crc_queuing_error;
1337 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1339 additional_fcpcdb_len = 0;
1340 fcp_cmnd_len = 12 + 16 + 4;
1343 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1345 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1346 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1347 fcp_cmnd->additional_cdb_len |= 1;
1348 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1349 fcp_cmnd->additional_cdb_len |= 2;
1351 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1352 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1353 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1354 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1355 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1356 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1357 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1358 fcp_cmnd->task_management = 0;
1361 * Update tagged queuing modifier if using command tag queuing
1363 if (scsi_populate_tag_msg(cmd, tag)) {
1365 case HEAD_OF_QUEUE_TAG:
1366 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1368 case ORDERED_QUEUE_TAG:
1369 fcp_cmnd->task_attribute = TSK_ORDERED;
1372 fcp_cmnd->task_attribute = TSK_SIMPLE;
1376 fcp_cmnd->task_attribute = TSK_SIMPLE;
1379 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1381 /* Compute dif len and adjust data len to incude protection */
1383 blk_size = cmd->device->sector_size;
1384 dif_bytes = (data_bytes / blk_size) * 8;
1386 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1387 case SCSI_PROT_READ_INSERT:
1388 case SCSI_PROT_WRITE_STRIP:
1389 total_bytes = data_bytes;
1390 data_bytes += dif_bytes;
1393 case SCSI_PROT_READ_STRIP:
1394 case SCSI_PROT_WRITE_INSERT:
1395 case SCSI_PROT_READ_PASS:
1396 case SCSI_PROT_WRITE_PASS:
1397 total_bytes = data_bytes + dif_bytes;
1403 if (!qla2x00_hba_err_chk_enabled(sp))
1404 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1405 /* HBA error checking enabled */
1406 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1407 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1408 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1409 SCSI_PROT_DIF_TYPE2))
1410 fw_prot_opts |= BIT_10;
1411 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1412 SCSI_PROT_DIF_TYPE3)
1413 fw_prot_opts |= BIT_11;
1417 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1420 * Configure Bundling if we need to fetch interlaving
1421 * protection PCI accesses
1423 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1424 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1425 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1427 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1430 /* Finish the common fields of CRC pkt */
1431 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1432 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1433 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1434 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1435 /* Fibre channel byte count */
1436 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1437 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1438 additional_fcpcdb_len);
1439 *fcp_dl = htonl(total_bytes);
1441 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1442 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1445 /* Walks data segments */
1447 cmd_pkt->control_flags |=
1448 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1450 if (!bundling && tot_prot_dsds) {
1451 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1452 cur_dsd, tot_dsds, NULL))
1453 goto crc_queuing_error;
1454 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1455 (tot_dsds - tot_prot_dsds), NULL))
1456 goto crc_queuing_error;
1458 if (bundling && tot_prot_dsds) {
1459 /* Walks dif segments */
1460 cmd_pkt->control_flags |=
1461 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1462 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1463 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1464 tot_prot_dsds, NULL))
1465 goto crc_queuing_error;
1470 /* Cleanup will be performed by the caller */
1472 return QLA_FUNCTION_FAILED;
1476 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1477 * @sp: command to send to the ISP
1479 * Returns non-zero if a failure occurred, else zero.
1482 qla24xx_start_scsi(srb_t *sp)
1485 unsigned long flags;
1489 struct cmd_type_7 *cmd_pkt;
1493 struct req_que *req = NULL;
1494 struct rsp_que *rsp = NULL;
1495 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1496 struct scsi_qla_host *vha = sp->fcport->vha;
1497 struct qla_hw_data *ha = vha->hw;
1500 /* Setup device pointers. */
1503 qla25xx_set_que(sp, &rsp);
1506 /* So we know we haven't pci_map'ed anything yet */
1509 /* Send marker if required */
1510 if (vha->marker_needed != 0) {
1511 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1513 return QLA_FUNCTION_FAILED;
1514 vha->marker_needed = 0;
1517 /* Acquire ring specific lock */
1518 spin_lock_irqsave(&ha->hardware_lock, flags);
1520 /* Check for room in outstanding command list. */
1521 handle = req->current_outstanding_cmd;
1522 for (index = 1; index < req->num_outstanding_cmds; index++) {
1524 if (handle == req->num_outstanding_cmds)
1526 if (!req->outstanding_cmds[handle])
1529 if (index == req->num_outstanding_cmds)
1532 /* Map the sg table so we have an accurate count of sg entries needed */
1533 if (scsi_sg_count(cmd)) {
1534 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1535 scsi_sg_count(cmd), cmd->sc_data_direction);
1536 if (unlikely(!nseg))
1542 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1543 if (req->cnt < (req_cnt + 2)) {
1544 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1545 RD_REG_DWORD_RELAXED(req->req_q_out);
1546 if (req->ring_index < cnt)
1547 req->cnt = cnt - req->ring_index;
1549 req->cnt = req->length -
1550 (req->ring_index - cnt);
1551 if (req->cnt < (req_cnt + 2))
1555 /* Build command packet. */
1556 req->current_outstanding_cmd = handle;
1557 req->outstanding_cmds[handle] = sp;
1558 sp->handle = handle;
1559 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1560 req->cnt -= req_cnt;
1562 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1563 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1565 /* Zero out remaining portion of packet. */
1566 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1567 clr_ptr = (uint32_t *)cmd_pkt + 2;
1568 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1569 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1571 /* Set NPORT-ID and LUN number*/
1572 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1573 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1574 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1575 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1576 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1578 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1579 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1581 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1582 if (scsi_populate_tag_msg(cmd, tag)) {
1584 case HEAD_OF_QUEUE_TAG:
1585 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1587 case ORDERED_QUEUE_TAG:
1588 cmd_pkt->task = TSK_ORDERED;
1591 cmd_pkt->task = TSK_SIMPLE;
1595 cmd_pkt->task = TSK_SIMPLE;
1598 /* Load SCSI command packet. */
1599 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1600 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1602 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1604 /* Build IOCB segments */
1605 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1607 /* Set total data segment count. */
1608 cmd_pkt->entry_count = (uint8_t)req_cnt;
1609 /* Specify response queue number where completion should happen */
1610 cmd_pkt->entry_status = (uint8_t) rsp->id;
1612 /* Adjust ring index. */
1614 if (req->ring_index == req->length) {
1615 req->ring_index = 0;
1616 req->ring_ptr = req->ring;
1620 sp->flags |= SRB_DMA_VALID;
1622 /* Set chip new ring index. */
1623 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1624 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1626 /* Manage unprocessed RIO/ZIO commands in response queue. */
1627 if (vha->flags.process_response_queue &&
1628 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1629 qla24xx_process_response_queue(vha, rsp);
1631 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1636 scsi_dma_unmap(cmd);
1638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1640 return QLA_FUNCTION_FAILED;
1644 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1645 * @sp: command to send to the ISP
1647 * Returns non-zero if a failure occurred, else zero.
1650 qla24xx_dif_start_scsi(srb_t *sp)
1653 unsigned long flags;
1658 uint16_t req_cnt = 0;
1660 uint16_t tot_prot_dsds;
1661 uint16_t fw_prot_opts = 0;
1662 struct req_que *req = NULL;
1663 struct rsp_que *rsp = NULL;
1664 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1665 struct scsi_qla_host *vha = sp->fcport->vha;
1666 struct qla_hw_data *ha = vha->hw;
1667 struct cmd_type_crc_2 *cmd_pkt;
1668 uint32_t status = 0;
1670 #define QDSS_GOT_Q_SPACE BIT_0
1672 /* Only process protection or >16 cdb in this routine */
1673 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1674 if (cmd->cmd_len <= 16)
1675 return qla24xx_start_scsi(sp);
1678 /* Setup device pointers. */
1680 qla25xx_set_que(sp, &rsp);
1683 /* So we know we haven't pci_map'ed anything yet */
1686 /* Send marker if required */
1687 if (vha->marker_needed != 0) {
1688 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1690 return QLA_FUNCTION_FAILED;
1691 vha->marker_needed = 0;
1694 /* Acquire ring specific lock */
1695 spin_lock_irqsave(&ha->hardware_lock, flags);
1697 /* Check for room in outstanding command list. */
1698 handle = req->current_outstanding_cmd;
1699 for (index = 1; index < req->num_outstanding_cmds; index++) {
1701 if (handle == req->num_outstanding_cmds)
1703 if (!req->outstanding_cmds[handle])
1707 if (index == req->num_outstanding_cmds)
1710 /* Compute number of required data segments */
1711 /* Map the sg table so we have an accurate count of sg entries needed */
1712 if (scsi_sg_count(cmd)) {
1713 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1714 scsi_sg_count(cmd), cmd->sc_data_direction);
1715 if (unlikely(!nseg))
1718 sp->flags |= SRB_DMA_VALID;
1720 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1721 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1722 struct qla2_sgx sgx;
1725 memset(&sgx, 0, sizeof(struct qla2_sgx));
1726 sgx.tot_bytes = scsi_bufflen(cmd);
1727 sgx.cur_sg = scsi_sglist(cmd);
1731 while (qla24xx_get_one_block_sg(
1732 cmd->device->sector_size, &sgx, &partial))
1738 /* number of required data segments */
1741 /* Compute number of required protection segments */
1742 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1743 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1744 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1745 if (unlikely(!nseg))
1748 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1750 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1751 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1752 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1759 /* Total Data and protection sg segment(s) */
1760 tot_prot_dsds = nseg;
1762 if (req->cnt < (req_cnt + 2)) {
1763 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1764 RD_REG_DWORD_RELAXED(req->req_q_out);
1765 if (req->ring_index < cnt)
1766 req->cnt = cnt - req->ring_index;
1768 req->cnt = req->length -
1769 (req->ring_index - cnt);
1770 if (req->cnt < (req_cnt + 2))
1774 status |= QDSS_GOT_Q_SPACE;
1776 /* Build header part of command packet (excluding the OPCODE). */
1777 req->current_outstanding_cmd = handle;
1778 req->outstanding_cmds[handle] = sp;
1779 sp->handle = handle;
1780 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1781 req->cnt -= req_cnt;
1783 /* Fill-in common area */
1784 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1787 clr_ptr = (uint32_t *)cmd_pkt + 2;
1788 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1790 /* Set NPORT-ID and LUN number*/
1791 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1792 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1793 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1794 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1796 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1797 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1799 /* Total Data and protection segment(s) */
1800 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1802 /* Build IOCB segments and adjust for data protection segments */
1803 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1804 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1808 cmd_pkt->entry_count = (uint8_t)req_cnt;
1809 /* Specify response queue number where completion should happen */
1810 cmd_pkt->entry_status = (uint8_t) rsp->id;
1811 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1814 /* Adjust ring index. */
1816 if (req->ring_index == req->length) {
1817 req->ring_index = 0;
1818 req->ring_ptr = req->ring;
1822 /* Set chip new ring index. */
1823 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1824 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1826 /* Manage unprocessed RIO/ZIO commands in response queue. */
1827 if (vha->flags.process_response_queue &&
1828 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1829 qla24xx_process_response_queue(vha, rsp);
1831 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1836 if (status & QDSS_GOT_Q_SPACE) {
1837 req->outstanding_cmds[handle] = NULL;
1838 req->cnt += req_cnt;
1840 /* Cleanup will be performed by the caller (queuecommand) */
1842 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1843 return QLA_FUNCTION_FAILED;
1847 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1849 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1850 struct qla_hw_data *ha = sp->fcport->vha->hw;
1851 int affinity = cmd->request->cpu;
1853 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1854 affinity < ha->max_rsp_queues - 1)
1855 *rsp = ha->rsp_q_map[affinity + 1];
1857 *rsp = ha->rsp_q_map[0];
1860 /* Generic Control-SRB manipulation functions. */
1862 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1864 struct qla_hw_data *ha = vha->hw;
1865 struct req_que *req = ha->req_q_map[0];
1866 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1867 uint32_t index, handle;
1869 uint16_t cnt, req_cnt;
1876 goto skip_cmd_array;
1878 /* Check for room in outstanding command list. */
1879 handle = req->current_outstanding_cmd;
1880 for (index = 1; index < req->num_outstanding_cmds; index++) {
1882 if (handle == req->num_outstanding_cmds)
1884 if (!req->outstanding_cmds[handle])
1887 if (index == req->num_outstanding_cmds) {
1888 ql_log(ql_log_warn, vha, 0x700b,
1889 "No room on outstanding cmd array.\n");
1893 /* Prep command array. */
1894 req->current_outstanding_cmd = handle;
1895 req->outstanding_cmds[handle] = sp;
1896 sp->handle = handle;
1898 /* Adjust entry-counts as needed. */
1899 if (sp->type != SRB_SCSI_CMD)
1900 req_cnt = sp->iocbs;
1903 /* Check for room on request queue. */
1904 if (req->cnt < req_cnt) {
1905 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1906 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1907 else if (IS_P3P_TYPE(ha))
1908 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1909 else if (IS_FWI2_CAPABLE(ha))
1910 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1911 else if (IS_QLAFX00(ha))
1912 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
1914 cnt = qla2x00_debounce_register(
1915 ISP_REQ_Q_OUT(ha, ®->isp));
1917 if (req->ring_index < cnt)
1918 req->cnt = cnt - req->ring_index;
1920 req->cnt = req->length -
1921 (req->ring_index - cnt);
1923 if (req->cnt < req_cnt)
1927 req->cnt -= req_cnt;
1928 pkt = req->ring_ptr;
1929 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1930 if (IS_QLAFX00(ha)) {
1931 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1932 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1934 pkt->entry_count = req_cnt;
1935 pkt->handle = handle;
1943 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1945 struct srb_iocb *lio = &sp->u.iocb_cmd;
1947 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1948 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1949 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1950 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1951 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1952 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1953 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1954 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1955 logio->port_id[1] = sp->fcport->d_id.b.area;
1956 logio->port_id[2] = sp->fcport->d_id.b.domain;
1957 logio->vp_index = sp->fcport->vha->vp_idx;
1961 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1963 struct qla_hw_data *ha = sp->fcport->vha->hw;
1964 struct srb_iocb *lio = &sp->u.iocb_cmd;
1967 mbx->entry_type = MBX_IOCB_TYPE;
1968 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1969 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1970 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1971 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1972 if (HAS_EXTENDED_IDS(ha)) {
1973 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1974 mbx->mb10 = cpu_to_le16(opts);
1976 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1978 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1979 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1980 sp->fcport->d_id.b.al_pa);
1981 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1985 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1987 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1988 logio->control_flags =
1989 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1990 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1991 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1992 logio->port_id[1] = sp->fcport->d_id.b.area;
1993 logio->port_id[2] = sp->fcport->d_id.b.domain;
1994 logio->vp_index = sp->fcport->vha->vp_idx;
1998 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2000 struct qla_hw_data *ha = sp->fcport->vha->hw;
2002 mbx->entry_type = MBX_IOCB_TYPE;
2003 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2004 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2005 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2006 cpu_to_le16(sp->fcport->loop_id):
2007 cpu_to_le16(sp->fcport->loop_id << 8);
2008 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2009 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2010 sp->fcport->d_id.b.al_pa);
2011 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2012 /* Implicit: mbx->mbx10 = 0. */
2016 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2018 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2019 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2020 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2021 logio->vp_index = sp->fcport->vha->vp_idx;
2025 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2027 struct qla_hw_data *ha = sp->fcport->vha->hw;
2029 mbx->entry_type = MBX_IOCB_TYPE;
2030 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2031 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2032 if (HAS_EXTENDED_IDS(ha)) {
2033 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2034 mbx->mb10 = cpu_to_le16(BIT_0);
2036 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2038 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2039 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2040 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2041 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2042 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2046 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2050 struct fc_port *fcport = sp->fcport;
2051 scsi_qla_host_t *vha = fcport->vha;
2052 struct qla_hw_data *ha = vha->hw;
2053 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2054 struct req_que *req = vha->req;
2056 flags = iocb->u.tmf.flags;
2057 lun = iocb->u.tmf.lun;
2059 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2060 tsk->entry_count = 1;
2061 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2062 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2063 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2064 tsk->control_flags = cpu_to_le32(flags);
2065 tsk->port_id[0] = fcport->d_id.b.al_pa;
2066 tsk->port_id[1] = fcport->d_id.b.area;
2067 tsk->port_id[2] = fcport->d_id.b.domain;
2068 tsk->vp_index = fcport->vha->vp_idx;
2070 if (flags == TCF_LUN_RESET) {
2071 int_to_scsilun(lun, &tsk->lun);
2072 host_to_fcp_swap((uint8_t *)&tsk->lun,
2078 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2080 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2082 els_iocb->entry_type = ELS_IOCB_TYPE;
2083 els_iocb->entry_count = 1;
2084 els_iocb->sys_define = 0;
2085 els_iocb->entry_status = 0;
2086 els_iocb->handle = sp->handle;
2087 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2088 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2089 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2090 els_iocb->sof_type = EST_SOFI3;
2091 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2094 sp->type == SRB_ELS_CMD_RPT ?
2095 bsg_job->request->rqst_data.r_els.els_code :
2096 bsg_job->request->rqst_data.h_els.command_code;
2097 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2098 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2099 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2100 els_iocb->control_flags = 0;
2101 els_iocb->rx_byte_count =
2102 cpu_to_le32(bsg_job->reply_payload.payload_len);
2103 els_iocb->tx_byte_count =
2104 cpu_to_le32(bsg_job->request_payload.payload_len);
2106 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2107 (bsg_job->request_payload.sg_list)));
2108 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2109 (bsg_job->request_payload.sg_list)));
2110 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2111 (bsg_job->request_payload.sg_list));
2113 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2114 (bsg_job->reply_payload.sg_list)));
2115 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2116 (bsg_job->reply_payload.sg_list)));
2117 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2118 (bsg_job->reply_payload.sg_list));
2120 sp->fcport->vha->qla_stats.control_requests++;
2124 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2126 uint16_t avail_dsds;
2128 struct scatterlist *sg;
2131 scsi_qla_host_t *vha = sp->fcport->vha;
2132 struct qla_hw_data *ha = vha->hw;
2133 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2134 int loop_iterartion = 0;
2135 int cont_iocb_prsnt = 0;
2136 int entry_count = 1;
2138 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2139 ct_iocb->entry_type = CT_IOCB_TYPE;
2140 ct_iocb->entry_status = 0;
2141 ct_iocb->handle1 = sp->handle;
2142 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2143 ct_iocb->status = __constant_cpu_to_le16(0);
2144 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2145 ct_iocb->timeout = 0;
2146 ct_iocb->cmd_dsd_count =
2147 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2148 ct_iocb->total_dsd_count =
2149 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2150 ct_iocb->req_bytecount =
2151 cpu_to_le32(bsg_job->request_payload.payload_len);
2152 ct_iocb->rsp_bytecount =
2153 cpu_to_le32(bsg_job->reply_payload.payload_len);
2155 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2156 (bsg_job->request_payload.sg_list)));
2157 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2158 (bsg_job->request_payload.sg_list)));
2159 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2161 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2162 (bsg_job->reply_payload.sg_list)));
2163 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2164 (bsg_job->reply_payload.sg_list)));
2165 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2168 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2170 tot_dsds = bsg_job->reply_payload.sg_cnt;
2172 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2174 cont_a64_entry_t *cont_pkt;
2176 /* Allocate additional continuation packets? */
2177 if (avail_dsds == 0) {
2179 * Five DSDs are available in the Cont.
2182 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2183 vha->hw->req_q_map[0]);
2184 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2186 cont_iocb_prsnt = 1;
2190 sle_dma = sg_dma_address(sg);
2191 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2192 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2193 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2197 ct_iocb->entry_count = entry_count;
2199 sp->fcport->vha->qla_stats.control_requests++;
2203 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2205 uint16_t avail_dsds;
2207 struct scatterlist *sg;
2210 scsi_qla_host_t *vha = sp->fcport->vha;
2211 struct qla_hw_data *ha = vha->hw;
2212 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2213 int loop_iterartion = 0;
2214 int cont_iocb_prsnt = 0;
2215 int entry_count = 1;
2217 ct_iocb->entry_type = CT_IOCB_TYPE;
2218 ct_iocb->entry_status = 0;
2219 ct_iocb->sys_define = 0;
2220 ct_iocb->handle = sp->handle;
2222 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2223 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2224 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2226 ct_iocb->cmd_dsd_count =
2227 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2228 ct_iocb->timeout = 0;
2229 ct_iocb->rsp_dsd_count =
2230 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2231 ct_iocb->rsp_byte_count =
2232 cpu_to_le32(bsg_job->reply_payload.payload_len);
2233 ct_iocb->cmd_byte_count =
2234 cpu_to_le32(bsg_job->request_payload.payload_len);
2235 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2236 (bsg_job->request_payload.sg_list)));
2237 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2238 (bsg_job->request_payload.sg_list)));
2239 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2240 (bsg_job->request_payload.sg_list));
2243 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2245 tot_dsds = bsg_job->reply_payload.sg_cnt;
2247 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2249 cont_a64_entry_t *cont_pkt;
2251 /* Allocate additional continuation packets? */
2252 if (avail_dsds == 0) {
2254 * Five DSDs are available in the Cont.
2257 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2259 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2261 cont_iocb_prsnt = 1;
2265 sle_dma = sg_dma_address(sg);
2266 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2267 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2268 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2272 ct_iocb->entry_count = entry_count;
2276 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2277 * @sp: command to send to the ISP
2279 * Returns non-zero if a failure occurred, else zero.
2282 qla82xx_start_scsi(srb_t *sp)
2285 unsigned long flags;
2286 struct scsi_cmnd *cmd;
2293 struct device_reg_82xx __iomem *reg;
2296 uint8_t additional_cdb_len;
2297 struct ct6_dsd *ctx;
2298 struct scsi_qla_host *vha = sp->fcport->vha;
2299 struct qla_hw_data *ha = vha->hw;
2300 struct req_que *req = NULL;
2301 struct rsp_que *rsp = NULL;
2304 /* Setup device pointers. */
2306 reg = &ha->iobase->isp82;
2307 cmd = GET_CMD_SP(sp);
2309 rsp = ha->rsp_q_map[0];
2311 /* So we know we haven't pci_map'ed anything yet */
2314 dbval = 0x04 | (ha->portnum << 5);
2316 /* Send marker if required */
2317 if (vha->marker_needed != 0) {
2318 if (qla2x00_marker(vha, req,
2319 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2320 ql_log(ql_log_warn, vha, 0x300c,
2321 "qla2x00_marker failed for cmd=%p.\n", cmd);
2322 return QLA_FUNCTION_FAILED;
2324 vha->marker_needed = 0;
2327 /* Acquire ring specific lock */
2328 spin_lock_irqsave(&ha->hardware_lock, flags);
2330 /* Check for room in outstanding command list. */
2331 handle = req->current_outstanding_cmd;
2332 for (index = 1; index < req->num_outstanding_cmds; index++) {
2334 if (handle == req->num_outstanding_cmds)
2336 if (!req->outstanding_cmds[handle])
2339 if (index == req->num_outstanding_cmds)
2342 /* Map the sg table so we have an accurate count of sg entries needed */
2343 if (scsi_sg_count(cmd)) {
2344 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2345 scsi_sg_count(cmd), cmd->sc_data_direction);
2346 if (unlikely(!nseg))
2353 if (tot_dsds > ql2xshiftctondsd) {
2354 struct cmd_type_6 *cmd_pkt;
2355 uint16_t more_dsd_lists = 0;
2356 struct dsd_dma *dsd_ptr;
2359 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2360 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2361 ql_dbg(ql_dbg_io, vha, 0x300d,
2362 "Num of DSD list %d is than %d for cmd=%p.\n",
2363 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2368 if (more_dsd_lists <= ha->gbl_dsd_avail)
2369 goto sufficient_dsds;
2371 more_dsd_lists -= ha->gbl_dsd_avail;
2373 for (i = 0; i < more_dsd_lists; i++) {
2374 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2376 ql_log(ql_log_fatal, vha, 0x300e,
2377 "Failed to allocate memory for dsd_dma "
2378 "for cmd=%p.\n", cmd);
2382 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2383 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2384 if (!dsd_ptr->dsd_addr) {
2386 ql_log(ql_log_fatal, vha, 0x300f,
2387 "Failed to allocate memory for dsd_addr "
2388 "for cmd=%p.\n", cmd);
2391 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2392 ha->gbl_dsd_avail++;
2398 if (req->cnt < (req_cnt + 2)) {
2399 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2400 ®->req_q_out[0]);
2401 if (req->ring_index < cnt)
2402 req->cnt = cnt - req->ring_index;
2404 req->cnt = req->length -
2405 (req->ring_index - cnt);
2406 if (req->cnt < (req_cnt + 2))
2410 ctx = sp->u.scmd.ctx =
2411 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2413 ql_log(ql_log_fatal, vha, 0x3010,
2414 "Failed to allocate ctx for cmd=%p.\n", cmd);
2418 memset(ctx, 0, sizeof(struct ct6_dsd));
2419 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2420 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2421 if (!ctx->fcp_cmnd) {
2422 ql_log(ql_log_fatal, vha, 0x3011,
2423 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2427 /* Initialize the DSD list and dma handle */
2428 INIT_LIST_HEAD(&ctx->dsd_list);
2429 ctx->dsd_use_cnt = 0;
2431 if (cmd->cmd_len > 16) {
2432 additional_cdb_len = cmd->cmd_len - 16;
2433 if ((cmd->cmd_len % 4) != 0) {
2434 /* SCSI command bigger than 16 bytes must be
2437 ql_log(ql_log_warn, vha, 0x3012,
2438 "scsi cmd len %d not multiple of 4 "
2439 "for cmd=%p.\n", cmd->cmd_len, cmd);
2440 goto queuing_error_fcp_cmnd;
2442 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2444 additional_cdb_len = 0;
2445 ctx->fcp_cmnd_len = 12 + 16 + 4;
2448 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2449 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2451 /* Zero out remaining portion of packet. */
2452 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2453 clr_ptr = (uint32_t *)cmd_pkt + 2;
2454 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2455 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2457 /* Set NPORT-ID and LUN number*/
2458 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2459 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2460 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2461 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2462 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2464 /* Build IOCB segments */
2465 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2466 goto queuing_error_fcp_cmnd;
2468 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2469 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2471 /* build FCP_CMND IU */
2472 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2473 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2474 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2476 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2477 ctx->fcp_cmnd->additional_cdb_len |= 1;
2478 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2479 ctx->fcp_cmnd->additional_cdb_len |= 2;
2482 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2484 if (scsi_populate_tag_msg(cmd, tag)) {
2486 case HEAD_OF_QUEUE_TAG:
2487 ctx->fcp_cmnd->task_attribute =
2490 case ORDERED_QUEUE_TAG:
2491 ctx->fcp_cmnd->task_attribute =
2497 /* Populate the FCP_PRIO. */
2498 if (ha->flags.fcp_prio_enabled)
2499 ctx->fcp_cmnd->task_attribute |=
2500 sp->fcport->fcp_prio << 3;
2502 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2504 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2505 additional_cdb_len);
2506 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2508 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2509 cmd_pkt->fcp_cmnd_dseg_address[0] =
2510 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2511 cmd_pkt->fcp_cmnd_dseg_address[1] =
2512 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2514 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2515 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2516 /* Set total data segment count. */
2517 cmd_pkt->entry_count = (uint8_t)req_cnt;
2518 /* Specify response queue number where
2519 * completion should happen
2521 cmd_pkt->entry_status = (uint8_t) rsp->id;
2523 struct cmd_type_7 *cmd_pkt;
2524 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2525 if (req->cnt < (req_cnt + 2)) {
2526 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2527 ®->req_q_out[0]);
2528 if (req->ring_index < cnt)
2529 req->cnt = cnt - req->ring_index;
2531 req->cnt = req->length -
2532 (req->ring_index - cnt);
2534 if (req->cnt < (req_cnt + 2))
2537 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2538 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2540 /* Zero out remaining portion of packet. */
2541 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2542 clr_ptr = (uint32_t *)cmd_pkt + 2;
2543 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2544 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2546 /* Set NPORT-ID and LUN number*/
2547 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2548 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2549 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2550 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2551 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2553 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2554 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2555 sizeof(cmd_pkt->lun));
2558 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2560 if (scsi_populate_tag_msg(cmd, tag)) {
2562 case HEAD_OF_QUEUE_TAG:
2563 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2565 case ORDERED_QUEUE_TAG:
2566 cmd_pkt->task = TSK_ORDERED;
2571 /* Populate the FCP_PRIO. */
2572 if (ha->flags.fcp_prio_enabled)
2573 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2575 /* Load SCSI command packet. */
2576 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2577 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2579 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2581 /* Build IOCB segments */
2582 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2584 /* Set total data segment count. */
2585 cmd_pkt->entry_count = (uint8_t)req_cnt;
2586 /* Specify response queue number where
2587 * completion should happen.
2589 cmd_pkt->entry_status = (uint8_t) rsp->id;
2592 /* Build command packet. */
2593 req->current_outstanding_cmd = handle;
2594 req->outstanding_cmds[handle] = sp;
2595 sp->handle = handle;
2596 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2597 req->cnt -= req_cnt;
2600 /* Adjust ring index. */
2602 if (req->ring_index == req->length) {
2603 req->ring_index = 0;
2604 req->ring_ptr = req->ring;
2608 sp->flags |= SRB_DMA_VALID;
2610 /* Set chip new ring index. */
2611 /* write, read and verify logic */
2612 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2614 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2617 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2620 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2622 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2628 /* Manage unprocessed RIO/ZIO commands in response queue. */
2629 if (vha->flags.process_response_queue &&
2630 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2631 qla24xx_process_response_queue(vha, rsp);
2633 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2636 queuing_error_fcp_cmnd:
2637 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2640 scsi_dma_unmap(cmd);
2642 if (sp->u.scmd.ctx) {
2643 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2644 sp->u.scmd.ctx = NULL;
2646 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2648 return QLA_FUNCTION_FAILED;
2652 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2654 struct srb_iocb *aio = &sp->u.iocb_cmd;
2655 scsi_qla_host_t *vha = sp->fcport->vha;
2656 struct req_que *req = vha->req;
2658 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2659 abt_iocb->entry_type = ABORT_IOCB_TYPE;
2660 abt_iocb->entry_count = 1;
2661 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2662 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2663 abt_iocb->handle_to_abort =
2664 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2665 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2666 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2667 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2668 abt_iocb->vp_index = vha->vp_idx;
2669 abt_iocb->req_que_no = cpu_to_le16(req->id);
2670 /* Send the command to the firmware */
2675 qla2x00_start_sp(srb_t *sp)
2678 struct qla_hw_data *ha = sp->fcport->vha->hw;
2680 unsigned long flags;
2682 rval = QLA_FUNCTION_FAILED;
2683 spin_lock_irqsave(&ha->hardware_lock, flags);
2684 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2686 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2687 "qla2x00_alloc_iocbs failed.\n");
2694 IS_FWI2_CAPABLE(ha) ?
2695 qla24xx_login_iocb(sp, pkt) :
2696 qla2x00_login_iocb(sp, pkt);
2698 case SRB_LOGOUT_CMD:
2699 IS_FWI2_CAPABLE(ha) ?
2700 qla24xx_logout_iocb(sp, pkt) :
2701 qla2x00_logout_iocb(sp, pkt);
2703 case SRB_ELS_CMD_RPT:
2704 case SRB_ELS_CMD_HST:
2705 qla24xx_els_iocb(sp, pkt);
2708 IS_FWI2_CAPABLE(ha) ?
2709 qla24xx_ct_iocb(sp, pkt) :
2710 qla2x00_ct_iocb(sp, pkt);
2713 IS_FWI2_CAPABLE(ha) ?
2714 qla24xx_adisc_iocb(sp, pkt) :
2715 qla2x00_adisc_iocb(sp, pkt);
2719 qlafx00_tm_iocb(sp, pkt) :
2720 qla24xx_tm_iocb(sp, pkt);
2722 case SRB_FXIOCB_DCMD:
2723 case SRB_FXIOCB_BCMD:
2724 qlafx00_fxdisc_iocb(sp, pkt);
2728 qlafx00_abort_iocb(sp, pkt) :
2729 qla24xx_abort_iocb(sp, pkt);
2736 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2738 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2743 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2744 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2746 uint16_t avail_dsds;
2748 uint32_t req_data_len = 0;
2749 uint32_t rsp_data_len = 0;
2750 struct scatterlist *sg;
2752 int entry_count = 1;
2753 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2755 /*Update entry type to indicate bidir command */
2756 *((uint32_t *)(&cmd_pkt->entry_type)) =
2757 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2759 /* Set the transfer direction, in this set both flags
2760 * Also set the BD_WRAP_BACK flag, firmware will take care
2761 * assigning DID=SID for outgoing pkts.
2763 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2764 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2765 cmd_pkt->control_flags =
2766 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2769 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2770 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2771 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2772 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2774 vha->bidi_stats.transfer_bytes += req_data_len;
2775 vha->bidi_stats.io_count++;
2777 vha->qla_stats.output_bytes += req_data_len;
2778 vha->qla_stats.output_requests++;
2780 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2781 * are bundled in continuation iocb
2784 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2788 for_each_sg(bsg_job->request_payload.sg_list, sg,
2789 bsg_job->request_payload.sg_cnt, index) {
2791 cont_a64_entry_t *cont_pkt;
2793 /* Allocate additional continuation packets */
2794 if (avail_dsds == 0) {
2795 /* Continuation type 1 IOCB can accomodate
2798 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2799 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2803 sle_dma = sg_dma_address(sg);
2804 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2805 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2806 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2809 /* For read request DSD will always goes to continuation IOCB
2810 * and follow the write DSD. If there is room on the current IOCB
2811 * then it is added to that IOCB else new continuation IOCB is
2814 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2815 bsg_job->reply_payload.sg_cnt, index) {
2817 cont_a64_entry_t *cont_pkt;
2819 /* Allocate additional continuation packets */
2820 if (avail_dsds == 0) {
2821 /* Continuation type 1 IOCB can accomodate
2824 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2825 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2829 sle_dma = sg_dma_address(sg);
2830 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2831 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2832 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2835 /* This value should be same as number of IOCB required for this cmd */
2836 cmd_pkt->entry_count = entry_count;
2840 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2843 struct qla_hw_data *ha = vha->hw;
2844 unsigned long flags;
2850 struct cmd_bidir *cmd_pkt = NULL;
2851 struct rsp_que *rsp;
2852 struct req_que *req;
2853 int rval = EXT_STATUS_OK;
2857 rsp = ha->rsp_q_map[0];
2860 /* Send marker if required */
2861 if (vha->marker_needed != 0) {
2862 if (qla2x00_marker(vha, req,
2863 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2864 return EXT_STATUS_MAILBOX;
2865 vha->marker_needed = 0;
2868 /* Acquire ring specific lock */
2869 spin_lock_irqsave(&ha->hardware_lock, flags);
2871 /* Check for room in outstanding command list. */
2872 handle = req->current_outstanding_cmd;
2873 for (index = 1; index < req->num_outstanding_cmds; index++) {
2875 if (handle == req->num_outstanding_cmds)
2877 if (!req->outstanding_cmds[handle])
2881 if (index == req->num_outstanding_cmds) {
2882 rval = EXT_STATUS_BUSY;
2886 /* Calculate number of IOCB required */
2887 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2889 /* Check for room on request queue. */
2890 if (req->cnt < req_cnt + 2) {
2891 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2892 RD_REG_DWORD_RELAXED(req->req_q_out);
2893 if (req->ring_index < cnt)
2894 req->cnt = cnt - req->ring_index;
2896 req->cnt = req->length -
2897 (req->ring_index - cnt);
2899 if (req->cnt < req_cnt + 2) {
2900 rval = EXT_STATUS_BUSY;
2904 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2905 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2907 /* Zero out remaining portion of packet. */
2908 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2909 clr_ptr = (uint32_t *)cmd_pkt + 2;
2910 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2912 /* Set NPORT-ID (of vha)*/
2913 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2914 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2915 cmd_pkt->port_id[1] = vha->d_id.b.area;
2916 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2918 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2919 cmd_pkt->entry_status = (uint8_t) rsp->id;
2920 /* Build command packet. */
2921 req->current_outstanding_cmd = handle;
2922 req->outstanding_cmds[handle] = sp;
2923 sp->handle = handle;
2924 req->cnt -= req_cnt;
2926 /* Send the command to the firmware */
2928 qla2x00_start_iocbs(vha, req);
2930 spin_unlock_irqrestore(&ha->hardware_lock, flags);