2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
129 if (req->ring_index == req->length) {
131 req->ring_ptr = req->ring;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
195 struct dsd32 *cur_dsd;
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
206 /* No data transfer */
207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208 cmd_pkt->byte_count = cpu_to_le32(0);
213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215 /* Three DSDs are available in the Command Type 2 IOCB */
216 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217 cur_dsd = cmd_pkt->dsd32;
219 /* Load data segments */
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 cont_entry_t *cont_pkt;
223 /* Allocate additional continuation packets? */
224 if (avail_dsds == 0) {
226 * Seven DSDs are available in the Continuation
229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230 cur_dsd = cont_pkt->dsd;
231 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
234 append_dsd32(&cur_dsd, sg);
240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241 * capable IOCB types.
243 * @sp: SRB command to process
244 * @cmd_pkt: Command type 3 IOCB
245 * @tot_dsds: Total number of segments to transfer
247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251 struct dsd64 *cur_dsd;
252 scsi_qla_host_t *vha;
253 struct scsi_cmnd *cmd;
254 struct scatterlist *sg;
257 cmd = GET_CMD_SP(sp);
259 /* Update entry type to indicate Command Type 3 IOCB */
260 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
262 /* No data transfer */
263 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
264 cmd_pkt->byte_count = cpu_to_le32(0);
269 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
271 /* Two DSDs are available in the Command Type 3 IOCB */
272 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273 cur_dsd = cmd_pkt->dsd64;
275 /* Load data segments */
276 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
277 cont_a64_entry_t *cont_pkt;
279 /* Allocate additional continuation packets? */
280 if (avail_dsds == 0) {
282 * Five DSDs are available in the Continuation
285 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
286 cur_dsd = cont_pkt->dsd;
287 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
290 append_dsd64(&cur_dsd, sg);
296 * qla2x00_start_scsi() - Send a SCSI command to the ISP
297 * @sp: command to send to the ISP
299 * Returns non-zero if a failure occurred, else zero.
302 qla2x00_start_scsi(srb_t *sp)
306 scsi_qla_host_t *vha;
307 struct scsi_cmnd *cmd;
311 cmd_entry_t *cmd_pkt;
315 struct device_reg_2xxx __iomem *reg;
316 struct qla_hw_data *ha;
320 /* Setup device pointers. */
323 reg = &ha->iobase->isp;
324 cmd = GET_CMD_SP(sp);
325 req = ha->req_q_map[0];
326 rsp = ha->rsp_q_map[0];
327 /* So we know we haven't pci_map'ed anything yet */
330 /* Send marker if required */
331 if (vha->marker_needed != 0) {
332 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
334 return (QLA_FUNCTION_FAILED);
336 vha->marker_needed = 0;
339 /* Acquire ring specific lock */
340 spin_lock_irqsave(&ha->hardware_lock, flags);
342 /* Check for room in outstanding command list. */
343 handle = req->current_outstanding_cmd;
344 for (index = 1; index < req->num_outstanding_cmds; index++) {
346 if (handle == req->num_outstanding_cmds)
348 if (!req->outstanding_cmds[handle])
351 if (index == req->num_outstanding_cmds)
354 /* Map the sg table so we have an accurate count of sg entries needed */
355 if (scsi_sg_count(cmd)) {
356 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
357 scsi_sg_count(cmd), cmd->sc_data_direction);
365 /* Calculate the number of request entries needed. */
366 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
367 if (req->cnt < (req_cnt + 2)) {
368 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
369 if (req->ring_index < cnt)
370 req->cnt = cnt - req->ring_index;
372 req->cnt = req->length -
373 (req->ring_index - cnt);
374 /* If still no head room then bail out */
375 if (req->cnt < (req_cnt + 2))
379 /* Build command packet */
380 req->current_outstanding_cmd = handle;
381 req->outstanding_cmds[handle] = sp;
383 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
386 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
387 cmd_pkt->handle = handle;
388 /* Zero out remaining portion of packet. */
389 clr_ptr = (uint32_t *)cmd_pkt + 2;
390 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
391 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
393 /* Set target ID and LUN number*/
394 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
395 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
396 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
398 /* Load SCSI command packet. */
399 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
400 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
402 /* Build IOCB segments */
403 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
405 /* Set total data segment count. */
406 cmd_pkt->entry_count = (uint8_t)req_cnt;
409 /* Adjust ring index. */
411 if (req->ring_index == req->length) {
413 req->ring_ptr = req->ring;
417 sp->flags |= SRB_DMA_VALID;
419 /* Set chip new ring index. */
420 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
421 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
423 /* Manage unprocessed RIO/ZIO commands in response queue. */
424 if (vha->flags.process_response_queue &&
425 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
426 qla2x00_process_response_queue(rsp);
428 spin_unlock_irqrestore(&ha->hardware_lock, flags);
429 return (QLA_SUCCESS);
435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
437 return (QLA_FUNCTION_FAILED);
441 * qla2x00_start_iocbs() - Execute the IOCB command
443 * @req: request queue
446 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
448 struct qla_hw_data *ha = vha->hw;
449 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
451 if (IS_P3P_TYPE(ha)) {
452 qla82xx_start_iocbs(vha);
454 /* Adjust ring index. */
456 if (req->ring_index == req->length) {
458 req->ring_ptr = req->ring;
462 /* Set chip new ring index. */
463 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
464 WRT_REG_DWORD(req->req_q_in, req->ring_index);
465 } else if (IS_QLA83XX(ha)) {
466 WRT_REG_DWORD(req->req_q_in, req->ring_index);
467 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
468 } else if (IS_QLAFX00(ha)) {
469 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
470 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
471 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
472 } else if (IS_FWI2_CAPABLE(ha)) {
473 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
476 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
478 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
484 * qla2x00_marker() - Send a marker IOCB to the firmware.
486 * @qpair: queue pair pointer
489 * @type: marker modifier
491 * Can be called from both normal and interrupt context.
493 * Returns non-zero if a failure occurred, else zero.
496 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
497 uint16_t loop_id, uint64_t lun, uint8_t type)
500 struct mrk_entry_24xx *mrk24 = NULL;
501 struct req_que *req = qpair->req;
502 struct qla_hw_data *ha = vha->hw;
503 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
505 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
507 ql_log(ql_log_warn, base_vha, 0x3026,
508 "Failed to allocate Marker IOCB.\n");
510 return (QLA_FUNCTION_FAILED);
513 mrk->entry_type = MARKER_TYPE;
514 mrk->modifier = type;
515 if (type != MK_SYNC_ALL) {
516 if (IS_FWI2_CAPABLE(ha)) {
517 mrk24 = (struct mrk_entry_24xx *) mrk;
518 mrk24->nport_handle = cpu_to_le16(loop_id);
519 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
520 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
521 mrk24->vp_index = vha->vp_idx;
522 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
524 SET_TARGET_ID(ha, mrk->target, loop_id);
525 mrk->lun = cpu_to_le16((uint16_t)lun);
530 qla2x00_start_iocbs(vha, req);
532 return (QLA_SUCCESS);
536 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
537 uint16_t loop_id, uint64_t lun, uint8_t type)
540 unsigned long flags = 0;
542 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
543 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
544 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
550 * qla2x00_issue_marker
553 * Caller CAN have hardware lock held as specified by ha_locked parameter.
554 * Might release it, then reaquire.
556 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
559 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
560 MK_SYNC_ALL) != QLA_SUCCESS)
561 return QLA_FUNCTION_FAILED;
563 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
564 MK_SYNC_ALL) != QLA_SUCCESS)
565 return QLA_FUNCTION_FAILED;
567 vha->marker_needed = 0;
573 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
576 struct dsd64 *cur_dsd = NULL, *next_dsd;
577 scsi_qla_host_t *vha;
578 struct qla_hw_data *ha;
579 struct scsi_cmnd *cmd;
580 struct scatterlist *cur_seg;
582 uint8_t first_iocb = 1;
583 uint32_t dsd_list_len;
584 struct dsd_dma *dsd_ptr;
587 cmd = GET_CMD_SP(sp);
589 /* Update entry type to indicate Command Type 3 IOCB */
590 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
592 /* No data transfer */
593 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
594 cmd_pkt->byte_count = cpu_to_le32(0);
601 /* Set transfer direction */
602 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
603 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
604 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
605 vha->qla_stats.output_requests++;
606 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
607 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
608 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
609 vha->qla_stats.input_requests++;
612 cur_seg = scsi_sglist(cmd);
613 ctx = GET_CMD_CTX_SP(sp);
616 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
617 QLA_DSDS_PER_IOCB : tot_dsds;
618 tot_dsds -= avail_dsds;
619 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
621 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
622 struct dsd_dma, list);
623 next_dsd = dsd_ptr->dsd_addr;
624 list_del(&dsd_ptr->list);
626 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
632 put_unaligned_le64(dsd_ptr->dsd_list_dma,
633 &cmd_pkt->fcp_dsd.address);
634 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
636 put_unaligned_le64(dsd_ptr->dsd_list_dma,
638 cur_dsd->length = cpu_to_le32(dsd_list_len);
643 append_dsd64(&cur_dsd, cur_seg);
644 cur_seg = sg_next(cur_seg);
649 /* Null termination */
650 cur_dsd->address = 0;
653 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
658 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
659 * for Command Type 6.
661 * @dsds: number of data segment decriptors needed
663 * Returns the number of dsd list needed to store @dsds.
665 static inline uint16_t
666 qla24xx_calc_dsd_lists(uint16_t dsds)
668 uint16_t dsd_lists = 0;
670 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
671 if (dsds % QLA_DSDS_PER_IOCB)
678 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
681 * @sp: SRB command to process
682 * @cmd_pkt: Command type 3 IOCB
683 * @tot_dsds: Total number of segments to transfer
684 * @req: pointer to request queue
687 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
688 uint16_t tot_dsds, struct req_que *req)
691 struct dsd64 *cur_dsd;
692 scsi_qla_host_t *vha;
693 struct scsi_cmnd *cmd;
694 struct scatterlist *sg;
697 cmd = GET_CMD_SP(sp);
699 /* Update entry type to indicate Command Type 3 IOCB */
700 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
702 /* No data transfer */
703 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
704 cmd_pkt->byte_count = cpu_to_le32(0);
710 /* Set transfer direction */
711 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
712 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
713 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
714 vha->qla_stats.output_requests++;
715 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
716 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
717 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
718 vha->qla_stats.input_requests++;
721 /* One DSD is available in the Command Type 3 IOCB */
723 cur_dsd = &cmd_pkt->dsd;
725 /* Load data segments */
727 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
728 cont_a64_entry_t *cont_pkt;
730 /* Allocate additional continuation packets? */
731 if (avail_dsds == 0) {
733 * Five DSDs are available in the Continuation
736 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
737 cur_dsd = cont_pkt->dsd;
738 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
741 append_dsd64(&cur_dsd, sg);
746 struct fw_dif_context {
749 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
750 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
754 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
758 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
759 unsigned int protcnt)
761 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
763 switch (scsi_get_prot_type(cmd)) {
764 case SCSI_PROT_DIF_TYPE0:
766 * No check for ql2xenablehba_err_chk, as it would be an
767 * I/O error if hba tag generation is not done.
769 pkt->ref_tag = cpu_to_le32((uint32_t)
770 (0xffffffff & scsi_get_lba(cmd)));
772 if (!qla2x00_hba_err_chk_enabled(sp))
775 pkt->ref_tag_mask[0] = 0xff;
776 pkt->ref_tag_mask[1] = 0xff;
777 pkt->ref_tag_mask[2] = 0xff;
778 pkt->ref_tag_mask[3] = 0xff;
782 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
783 * match LBA in CDB + N
785 case SCSI_PROT_DIF_TYPE2:
786 pkt->app_tag = cpu_to_le16(0);
787 pkt->app_tag_mask[0] = 0x0;
788 pkt->app_tag_mask[1] = 0x0;
790 pkt->ref_tag = cpu_to_le32((uint32_t)
791 (0xffffffff & scsi_get_lba(cmd)));
793 if (!qla2x00_hba_err_chk_enabled(sp))
796 /* enable ALL bytes of the ref tag */
797 pkt->ref_tag_mask[0] = 0xff;
798 pkt->ref_tag_mask[1] = 0xff;
799 pkt->ref_tag_mask[2] = 0xff;
800 pkt->ref_tag_mask[3] = 0xff;
803 /* For Type 3 protection: 16 bit GUARD only */
804 case SCSI_PROT_DIF_TYPE3:
805 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
806 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
811 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
814 case SCSI_PROT_DIF_TYPE1:
815 pkt->ref_tag = cpu_to_le32((uint32_t)
816 (0xffffffff & scsi_get_lba(cmd)));
817 pkt->app_tag = cpu_to_le16(0);
818 pkt->app_tag_mask[0] = 0x0;
819 pkt->app_tag_mask[1] = 0x0;
821 if (!qla2x00_hba_err_chk_enabled(sp))
824 /* enable ALL bytes of the ref tag */
825 pkt->ref_tag_mask[0] = 0xff;
826 pkt->ref_tag_mask[1] = 0xff;
827 pkt->ref_tag_mask[2] = 0xff;
828 pkt->ref_tag_mask[3] = 0xff;
834 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
837 struct scatterlist *sg;
838 uint32_t cumulative_partial, sg_len;
839 dma_addr_t sg_dma_addr;
841 if (sgx->num_bytes == sgx->tot_bytes)
845 cumulative_partial = sgx->tot_partial;
847 sg_dma_addr = sg_dma_address(sg);
848 sg_len = sg_dma_len(sg);
850 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
852 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
853 sgx->dma_len = (blk_sz - cumulative_partial);
854 sgx->tot_partial = 0;
855 sgx->num_bytes += blk_sz;
858 sgx->dma_len = sg_len - sgx->bytes_consumed;
859 sgx->tot_partial += sgx->dma_len;
863 sgx->bytes_consumed += sgx->dma_len;
865 if (sg_len == sgx->bytes_consumed) {
869 sgx->bytes_consumed = 0;
876 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
877 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
880 uint8_t avail_dsds = 0;
881 uint32_t dsd_list_len;
882 struct dsd_dma *dsd_ptr;
883 struct scatterlist *sg_prot;
884 struct dsd64 *cur_dsd = dsd;
885 uint16_t used_dsds = tot_dsds;
886 uint32_t prot_int; /* protection interval */
890 uint32_t sle_dma_len, tot_prot_dma_len = 0;
891 struct scsi_cmnd *cmd;
893 memset(&sgx, 0, sizeof(struct qla2_sgx));
895 cmd = GET_CMD_SP(sp);
896 prot_int = cmd->device->sector_size;
898 sgx.tot_bytes = scsi_bufflen(cmd);
899 sgx.cur_sg = scsi_sglist(cmd);
902 sg_prot = scsi_prot_sglist(cmd);
904 prot_int = tc->blk_sz;
905 sgx.tot_bytes = tc->bufflen;
907 sg_prot = tc->prot_sg;
913 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
915 sle_dma = sgx.dma_addr;
916 sle_dma_len = sgx.dma_len;
918 /* Allocate additional continuation packets? */
919 if (avail_dsds == 0) {
920 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
921 QLA_DSDS_PER_IOCB : used_dsds;
922 dsd_list_len = (avail_dsds + 1) * 12;
923 used_dsds -= avail_dsds;
925 /* allocate tracking DS */
926 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
930 /* allocate new list */
931 dsd_ptr->dsd_addr = next_dsd =
932 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
933 &dsd_ptr->dsd_list_dma);
937 * Need to cleanup only this dsd_ptr, rest
938 * will be done by sp_free_dma()
945 list_add_tail(&dsd_ptr->list,
946 &((struct crc_context *)
947 sp->u.scmd.ctx)->dsd_list);
949 sp->flags |= SRB_CRC_CTX_DSD_VALID;
951 list_add_tail(&dsd_ptr->list,
952 &(tc->ctx->dsd_list));
953 *tc->ctx_dsd_alloced = 1;
957 /* add new list to cmd iocb or last list */
958 put_unaligned_le64(dsd_ptr->dsd_list_dma,
960 cur_dsd->length = cpu_to_le32(dsd_list_len);
963 put_unaligned_le64(sle_dma, &cur_dsd->address);
964 cur_dsd->length = cpu_to_le32(sle_dma_len);
969 /* Got a full protection interval */
970 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
973 tot_prot_dma_len += sle_dma_len;
974 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
975 tot_prot_dma_len = 0;
976 sg_prot = sg_next(sg_prot);
979 partial = 1; /* So as to not re-enter this block */
983 /* Null termination */
984 cur_dsd->address = 0;
991 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
992 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
995 uint8_t avail_dsds = 0;
996 uint32_t dsd_list_len;
997 struct dsd_dma *dsd_ptr;
998 struct scatterlist *sg, *sgl;
999 struct dsd64 *cur_dsd = dsd;
1001 uint16_t used_dsds = tot_dsds;
1002 struct scsi_cmnd *cmd;
1005 cmd = GET_CMD_SP(sp);
1006 sgl = scsi_sglist(cmd);
1015 for_each_sg(sgl, sg, tot_dsds, i) {
1016 /* Allocate additional continuation packets? */
1017 if (avail_dsds == 0) {
1018 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1019 QLA_DSDS_PER_IOCB : used_dsds;
1020 dsd_list_len = (avail_dsds + 1) * 12;
1021 used_dsds -= avail_dsds;
1023 /* allocate tracking DS */
1024 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1028 /* allocate new list */
1029 dsd_ptr->dsd_addr = next_dsd =
1030 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1031 &dsd_ptr->dsd_list_dma);
1035 * Need to cleanup only this dsd_ptr, rest
1036 * will be done by sp_free_dma()
1043 list_add_tail(&dsd_ptr->list,
1044 &((struct crc_context *)
1045 sp->u.scmd.ctx)->dsd_list);
1047 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1049 list_add_tail(&dsd_ptr->list,
1050 &(tc->ctx->dsd_list));
1051 *tc->ctx_dsd_alloced = 1;
1054 /* add new list to cmd iocb or last list */
1055 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1057 cur_dsd->length = cpu_to_le32(dsd_list_len);
1060 append_dsd64(&cur_dsd, sg);
1064 /* Null termination */
1065 cur_dsd->address = 0;
1066 cur_dsd->length = 0;
1072 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1073 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1075 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1076 struct scatterlist *sg, *sgl;
1077 struct crc_context *difctx = NULL;
1078 struct scsi_qla_host *vha;
1080 uint avail_dsds = 0;
1081 uint used_dsds = tot_dsds;
1082 bool dif_local_dma_alloc = false;
1083 bool direction_to_device = false;
1087 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1089 sgl = scsi_prot_sglist(cmd);
1091 difctx = sp->u.scmd.ctx;
1092 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1093 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1094 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1095 __func__, cmd, difctx, sp);
1100 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1106 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1107 "%s: enter (write=%u)\n", __func__, direction_to_device);
1109 /* if initiator doing write or target doing read */
1110 if (direction_to_device) {
1111 for_each_sg(sgl, sg, tot_dsds, i) {
1112 u64 sle_phys = sg_phys(sg);
1114 /* If SGE addr + len flips bits in upper 32-bits */
1115 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1117 "%s: page boundary crossing (phys=%llx len=%x)\n",
1118 __func__, sle_phys, sg->length);
1121 ha->dif_bundle_crossed_pages++;
1122 dif_local_dma_alloc = true;
1124 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1126 "%s: difctx pointer is NULL\n",
1132 ha->dif_bundle_writes++;
1134 ha->dif_bundle_reads++;
1137 if (ql2xdifbundlinginternalbuffers)
1138 dif_local_dma_alloc = direction_to_device;
1140 if (dif_local_dma_alloc) {
1141 u32 track_difbundl_buf = 0;
1142 u32 ldma_sg_len = 0;
1145 difctx->no_dif_bundl = 0;
1146 difctx->dif_bundl_len = 0;
1148 /* Track DSD buffers */
1149 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1150 /* Track local DMA buffers */
1151 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1153 for_each_sg(sgl, sg, tot_dsds, i) {
1154 u32 sglen = sg_dma_len(sg);
1156 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1157 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1158 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1159 difctx->dif_bundl_len, ldma_needed);
1166 * Allocate list item to store
1169 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1172 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1173 "%s: failed alloc dsd_ptr\n",
1177 ha->dif_bundle_kallocs++;
1179 /* allocate dma buffer */
1180 dsd_ptr->dsd_addr = dma_pool_alloc
1181 (ha->dif_bundl_pool, GFP_ATOMIC,
1182 &dsd_ptr->dsd_list_dma);
1183 if (!dsd_ptr->dsd_addr) {
1184 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1185 "%s: failed alloc ->dsd_ptr\n",
1188 * need to cleanup only this
1189 * dsd_ptr rest will be done
1193 ha->dif_bundle_kallocs--;
1196 ha->dif_bundle_dma_allocs++;
1198 difctx->no_dif_bundl++;
1199 list_add_tail(&dsd_ptr->list,
1200 &difctx->ldif_dma_hndl_list);
1203 /* xfrlen is min of dma pool size and sglen */
1205 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1206 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1209 /* replace with local allocated dma buffer */
1210 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1211 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1212 difctx->dif_bundl_len);
1213 difctx->dif_bundl_len += xfrlen;
1215 ldma_sg_len += xfrlen;
1216 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1224 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1225 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1226 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1227 difctx->dif_bundl_len, difctx->no_dif_bundl,
1228 track_difbundl_buf);
1231 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1233 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1235 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1236 &difctx->ldif_dma_hndl_list, list) {
1237 u32 sglen = (difctx->dif_bundl_len >
1238 DIF_BUNDLING_DMA_POOL_SIZE) ?
1239 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1241 BUG_ON(track_difbundl_buf == 0);
1243 /* Allocate additional continuation packets? */
1244 if (avail_dsds == 0) {
1245 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1247 "%s: adding continuation iocb's\n",
1249 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1250 QLA_DSDS_PER_IOCB : used_dsds;
1251 dsd_list_len = (avail_dsds + 1) * 12;
1252 used_dsds -= avail_dsds;
1254 /* allocate tracking DS */
1255 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1257 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1258 "%s: failed alloc dsd_ptr\n",
1262 ha->dif_bundle_kallocs++;
1264 difctx->no_ldif_dsd++;
1265 /* allocate new list */
1267 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1268 &dsd_ptr->dsd_list_dma);
1269 if (!dsd_ptr->dsd_addr) {
1270 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1271 "%s: failed alloc ->dsd_addr\n",
1274 * need to cleanup only this dsd_ptr
1275 * rest will be done by sp_free_dma()
1278 ha->dif_bundle_kallocs--;
1281 ha->dif_bundle_dma_allocs++;
1284 list_add_tail(&dsd_ptr->list,
1285 &difctx->ldif_dsd_list);
1286 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1288 list_add_tail(&dsd_ptr->list,
1289 &difctx->ldif_dsd_list);
1290 tc->ctx_dsd_alloced = 1;
1293 /* add new list to cmd iocb or last list */
1294 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1296 cur_dsd->length = cpu_to_le32(dsd_list_len);
1297 cur_dsd = dsd_ptr->dsd_addr;
1299 put_unaligned_le64(dif_dsd->dsd_list_dma,
1301 cur_dsd->length = cpu_to_le32(sglen);
1304 difctx->dif_bundl_len -= sglen;
1305 track_difbundl_buf--;
1308 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1309 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1310 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1312 for_each_sg(sgl, sg, tot_dsds, i) {
1313 /* Allocate additional continuation packets? */
1314 if (avail_dsds == 0) {
1315 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1316 QLA_DSDS_PER_IOCB : used_dsds;
1317 dsd_list_len = (avail_dsds + 1) * 12;
1318 used_dsds -= avail_dsds;
1320 /* allocate tracking DS */
1321 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1323 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1325 "%s: failed alloc dsd_dma...\n",
1330 /* allocate new list */
1332 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1333 &dsd_ptr->dsd_list_dma);
1334 if (!dsd_ptr->dsd_addr) {
1335 /* need to cleanup only this dsd_ptr */
1336 /* rest will be done by sp_free_dma() */
1342 list_add_tail(&dsd_ptr->list,
1344 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1346 list_add_tail(&dsd_ptr->list,
1348 tc->ctx_dsd_alloced = 1;
1351 /* add new list to cmd iocb or last list */
1352 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1354 cur_dsd->length = cpu_to_le32(dsd_list_len);
1355 cur_dsd = dsd_ptr->dsd_addr;
1357 append_dsd64(&cur_dsd, sg);
1361 /* Null termination */
1362 cur_dsd->address = 0;
1363 cur_dsd->length = 0;
1369 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1370 * Type 6 IOCB types.
1372 * @sp: SRB command to process
1373 * @cmd_pkt: Command type 3 IOCB
1374 * @tot_dsds: Total number of segments to transfer
1375 * @tot_prot_dsds: Total number of segments with protection information
1376 * @fw_prot_opts: Protection options to be passed to firmware
1379 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1380 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1382 struct dsd64 *cur_dsd;
1384 scsi_qla_host_t *vha;
1385 struct scsi_cmnd *cmd;
1386 uint32_t total_bytes = 0;
1387 uint32_t data_bytes;
1389 uint8_t bundling = 1;
1391 struct crc_context *crc_ctx_pkt = NULL;
1392 struct qla_hw_data *ha;
1393 uint8_t additional_fcpcdb_len;
1394 uint16_t fcp_cmnd_len;
1395 struct fcp_cmnd *fcp_cmnd;
1396 dma_addr_t crc_ctx_dma;
1398 cmd = GET_CMD_SP(sp);
1400 /* Update entry type to indicate Command Type CRC_2 IOCB */
1401 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1406 /* No data transfer */
1407 data_bytes = scsi_bufflen(cmd);
1408 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1409 cmd_pkt->byte_count = cpu_to_le32(0);
1413 cmd_pkt->vp_index = sp->vha->vp_idx;
1415 /* Set transfer direction */
1416 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1417 cmd_pkt->control_flags =
1418 cpu_to_le16(CF_WRITE_DATA);
1419 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1420 cmd_pkt->control_flags =
1421 cpu_to_le16(CF_READ_DATA);
1424 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1425 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1426 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1427 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1430 /* Allocate CRC context from global pool */
1431 crc_ctx_pkt = sp->u.scmd.ctx =
1432 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1435 goto crc_queuing_error;
1437 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1439 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1442 crc_ctx_pkt->handle = cmd_pkt->handle;
1444 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1446 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1447 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1449 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1450 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1452 /* Determine SCSI command length -- align to 4 byte boundary */
1453 if (cmd->cmd_len > 16) {
1454 additional_fcpcdb_len = cmd->cmd_len - 16;
1455 if ((cmd->cmd_len % 4) != 0) {
1456 /* SCSI cmd > 16 bytes must be multiple of 4 */
1457 goto crc_queuing_error;
1459 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1461 additional_fcpcdb_len = 0;
1462 fcp_cmnd_len = 12 + 16 + 4;
1465 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1467 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1468 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1469 fcp_cmnd->additional_cdb_len |= 1;
1470 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1471 fcp_cmnd->additional_cdb_len |= 2;
1473 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1474 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1475 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1476 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1477 &cmd_pkt->fcp_cmnd_dseg_address);
1478 fcp_cmnd->task_management = 0;
1479 fcp_cmnd->task_attribute = TSK_SIMPLE;
1481 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1483 /* Compute dif len and adjust data len to incude protection */
1485 blk_size = cmd->device->sector_size;
1486 dif_bytes = (data_bytes / blk_size) * 8;
1488 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1489 case SCSI_PROT_READ_INSERT:
1490 case SCSI_PROT_WRITE_STRIP:
1491 total_bytes = data_bytes;
1492 data_bytes += dif_bytes;
1495 case SCSI_PROT_READ_STRIP:
1496 case SCSI_PROT_WRITE_INSERT:
1497 case SCSI_PROT_READ_PASS:
1498 case SCSI_PROT_WRITE_PASS:
1499 total_bytes = data_bytes + dif_bytes;
1505 if (!qla2x00_hba_err_chk_enabled(sp))
1506 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1507 /* HBA error checking enabled */
1508 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1509 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1510 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1511 SCSI_PROT_DIF_TYPE2))
1512 fw_prot_opts |= BIT_10;
1513 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1514 SCSI_PROT_DIF_TYPE3)
1515 fw_prot_opts |= BIT_11;
1519 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1522 * Configure Bundling if we need to fetch interlaving
1523 * protection PCI accesses
1525 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1526 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1527 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1529 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1532 /* Finish the common fields of CRC pkt */
1533 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1534 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1535 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1536 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1537 /* Fibre channel byte count */
1538 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1539 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1540 additional_fcpcdb_len);
1541 *fcp_dl = htonl(total_bytes);
1543 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1544 cmd_pkt->byte_count = cpu_to_le32(0);
1547 /* Walks data segments */
1549 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1551 if (!bundling && tot_prot_dsds) {
1552 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1553 cur_dsd, tot_dsds, NULL))
1554 goto crc_queuing_error;
1555 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1556 (tot_dsds - tot_prot_dsds), NULL))
1557 goto crc_queuing_error;
1559 if (bundling && tot_prot_dsds) {
1560 /* Walks dif segments */
1561 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1562 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1563 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1564 tot_prot_dsds, NULL))
1565 goto crc_queuing_error;
1570 /* Cleanup will be performed by the caller */
1572 return QLA_FUNCTION_FAILED;
1576 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1577 * @sp: command to send to the ISP
1579 * Returns non-zero if a failure occurred, else zero.
1582 qla24xx_start_scsi(srb_t *sp)
1585 unsigned long flags;
1589 struct cmd_type_7 *cmd_pkt;
1593 struct req_que *req = NULL;
1594 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1595 struct scsi_qla_host *vha = sp->vha;
1596 struct qla_hw_data *ha = vha->hw;
1598 /* Setup device pointers. */
1601 /* So we know we haven't pci_map'ed anything yet */
1604 /* Send marker if required */
1605 if (vha->marker_needed != 0) {
1606 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1608 return QLA_FUNCTION_FAILED;
1609 vha->marker_needed = 0;
1612 /* Acquire ring specific lock */
1613 spin_lock_irqsave(&ha->hardware_lock, flags);
1615 /* Check for room in outstanding command list. */
1616 handle = req->current_outstanding_cmd;
1617 for (index = 1; index < req->num_outstanding_cmds; index++) {
1619 if (handle == req->num_outstanding_cmds)
1621 if (!req->outstanding_cmds[handle])
1624 if (index == req->num_outstanding_cmds)
1627 /* Map the sg table so we have an accurate count of sg entries needed */
1628 if (scsi_sg_count(cmd)) {
1629 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630 scsi_sg_count(cmd), cmd->sc_data_direction);
1631 if (unlikely(!nseg))
1637 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638 if (req->cnt < (req_cnt + 2)) {
1639 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640 RD_REG_DWORD_RELAXED(req->req_q_out);
1641 if (req->ring_index < cnt)
1642 req->cnt = cnt - req->ring_index;
1644 req->cnt = req->length -
1645 (req->ring_index - cnt);
1646 if (req->cnt < (req_cnt + 2))
1650 /* Build command packet. */
1651 req->current_outstanding_cmd = handle;
1652 req->outstanding_cmds[handle] = sp;
1653 sp->handle = handle;
1654 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655 req->cnt -= req_cnt;
1657 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1660 /* Zero out remaining portion of packet. */
1661 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662 clr_ptr = (uint32_t *)cmd_pkt + 2;
1663 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1666 /* Set NPORT-ID and LUN number*/
1667 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671 cmd_pkt->vp_index = sp->vha->vp_idx;
1673 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1676 cmd_pkt->task = TSK_SIMPLE;
1678 /* Load SCSI command packet. */
1679 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1682 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1684 /* Build IOCB segments */
1685 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1687 /* Set total data segment count. */
1688 cmd_pkt->entry_count = (uint8_t)req_cnt;
1690 /* Adjust ring index. */
1692 if (req->ring_index == req->length) {
1693 req->ring_index = 0;
1694 req->ring_ptr = req->ring;
1698 sp->flags |= SRB_DMA_VALID;
1700 /* Set chip new ring index. */
1701 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1708 scsi_dma_unmap(cmd);
1710 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1712 return QLA_FUNCTION_FAILED;
1716 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717 * @sp: command to send to the ISP
1719 * Returns non-zero if a failure occurred, else zero.
1722 qla24xx_dif_start_scsi(srb_t *sp)
1725 unsigned long flags;
1730 uint16_t req_cnt = 0;
1732 uint16_t tot_prot_dsds;
1733 uint16_t fw_prot_opts = 0;
1734 struct req_que *req = NULL;
1735 struct rsp_que *rsp = NULL;
1736 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1737 struct scsi_qla_host *vha = sp->vha;
1738 struct qla_hw_data *ha = vha->hw;
1739 struct cmd_type_crc_2 *cmd_pkt;
1740 uint32_t status = 0;
1742 #define QDSS_GOT_Q_SPACE BIT_0
1744 /* Only process protection or >16 cdb in this routine */
1745 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1746 if (cmd->cmd_len <= 16)
1747 return qla24xx_start_scsi(sp);
1750 /* Setup device pointers. */
1754 /* So we know we haven't pci_map'ed anything yet */
1757 /* Send marker if required */
1758 if (vha->marker_needed != 0) {
1759 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1761 return QLA_FUNCTION_FAILED;
1762 vha->marker_needed = 0;
1765 /* Acquire ring specific lock */
1766 spin_lock_irqsave(&ha->hardware_lock, flags);
1768 /* Check for room in outstanding command list. */
1769 handle = req->current_outstanding_cmd;
1770 for (index = 1; index < req->num_outstanding_cmds; index++) {
1772 if (handle == req->num_outstanding_cmds)
1774 if (!req->outstanding_cmds[handle])
1778 if (index == req->num_outstanding_cmds)
1781 /* Compute number of required data segments */
1782 /* Map the sg table so we have an accurate count of sg entries needed */
1783 if (scsi_sg_count(cmd)) {
1784 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1785 scsi_sg_count(cmd), cmd->sc_data_direction);
1786 if (unlikely(!nseg))
1789 sp->flags |= SRB_DMA_VALID;
1791 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1792 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1793 struct qla2_sgx sgx;
1796 memset(&sgx, 0, sizeof(struct qla2_sgx));
1797 sgx.tot_bytes = scsi_bufflen(cmd);
1798 sgx.cur_sg = scsi_sglist(cmd);
1802 while (qla24xx_get_one_block_sg(
1803 cmd->device->sector_size, &sgx, &partial))
1809 /* number of required data segments */
1812 /* Compute number of required protection segments */
1813 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1814 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1815 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1816 if (unlikely(!nseg))
1819 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1821 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1822 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1823 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1830 /* Total Data and protection sg segment(s) */
1831 tot_prot_dsds = nseg;
1833 if (req->cnt < (req_cnt + 2)) {
1834 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1835 RD_REG_DWORD_RELAXED(req->req_q_out);
1836 if (req->ring_index < cnt)
1837 req->cnt = cnt - req->ring_index;
1839 req->cnt = req->length -
1840 (req->ring_index - cnt);
1841 if (req->cnt < (req_cnt + 2))
1845 status |= QDSS_GOT_Q_SPACE;
1847 /* Build header part of command packet (excluding the OPCODE). */
1848 req->current_outstanding_cmd = handle;
1849 req->outstanding_cmds[handle] = sp;
1850 sp->handle = handle;
1851 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1852 req->cnt -= req_cnt;
1854 /* Fill-in common area */
1855 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1856 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1858 clr_ptr = (uint32_t *)cmd_pkt + 2;
1859 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1861 /* Set NPORT-ID and LUN number*/
1862 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1863 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1864 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1865 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1867 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1868 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1870 /* Total Data and protection segment(s) */
1871 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1873 /* Build IOCB segments and adjust for data protection segments */
1874 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1875 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1879 cmd_pkt->entry_count = (uint8_t)req_cnt;
1880 /* Specify response queue number where completion should happen */
1881 cmd_pkt->entry_status = (uint8_t) rsp->id;
1882 cmd_pkt->timeout = cpu_to_le16(0);
1885 /* Adjust ring index. */
1887 if (req->ring_index == req->length) {
1888 req->ring_index = 0;
1889 req->ring_ptr = req->ring;
1893 /* Set chip new ring index. */
1894 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1901 if (status & QDSS_GOT_Q_SPACE) {
1902 req->outstanding_cmds[handle] = NULL;
1903 req->cnt += req_cnt;
1905 /* Cleanup will be performed by the caller (queuecommand) */
1907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1908 return QLA_FUNCTION_FAILED;
1912 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1913 * @sp: command to send to the ISP
1915 * Returns non-zero if a failure occurred, else zero.
1918 qla2xxx_start_scsi_mq(srb_t *sp)
1921 unsigned long flags;
1925 struct cmd_type_7 *cmd_pkt;
1929 struct req_que *req = NULL;
1930 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1931 struct scsi_qla_host *vha = sp->fcport->vha;
1932 struct qla_hw_data *ha = vha->hw;
1933 struct qla_qpair *qpair = sp->qpair;
1935 /* Acquire qpair specific lock */
1936 spin_lock_irqsave(&qpair->qp_lock, flags);
1938 /* Setup qpair pointers */
1941 /* So we know we haven't pci_map'ed anything yet */
1944 /* Send marker if required */
1945 if (vha->marker_needed != 0) {
1946 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1948 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1949 return QLA_FUNCTION_FAILED;
1951 vha->marker_needed = 0;
1954 /* Check for room in outstanding command list. */
1955 handle = req->current_outstanding_cmd;
1956 for (index = 1; index < req->num_outstanding_cmds; index++) {
1958 if (handle == req->num_outstanding_cmds)
1960 if (!req->outstanding_cmds[handle])
1963 if (index == req->num_outstanding_cmds)
1966 /* Map the sg table so we have an accurate count of sg entries needed */
1967 if (scsi_sg_count(cmd)) {
1968 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1969 scsi_sg_count(cmd), cmd->sc_data_direction);
1970 if (unlikely(!nseg))
1976 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1977 if (req->cnt < (req_cnt + 2)) {
1978 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1979 RD_REG_DWORD_RELAXED(req->req_q_out);
1980 if (req->ring_index < cnt)
1981 req->cnt = cnt - req->ring_index;
1983 req->cnt = req->length -
1984 (req->ring_index - cnt);
1985 if (req->cnt < (req_cnt + 2))
1989 /* Build command packet. */
1990 req->current_outstanding_cmd = handle;
1991 req->outstanding_cmds[handle] = sp;
1992 sp->handle = handle;
1993 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1994 req->cnt -= req_cnt;
1996 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1997 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1999 /* Zero out remaining portion of packet. */
2000 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2001 clr_ptr = (uint32_t *)cmd_pkt + 2;
2002 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2003 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2005 /* Set NPORT-ID and LUN number*/
2006 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2007 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2008 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2009 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2010 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2012 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2013 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2015 cmd_pkt->task = TSK_SIMPLE;
2017 /* Load SCSI command packet. */
2018 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2019 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2021 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2023 /* Build IOCB segments */
2024 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2026 /* Set total data segment count. */
2027 cmd_pkt->entry_count = (uint8_t)req_cnt;
2029 /* Adjust ring index. */
2031 if (req->ring_index == req->length) {
2032 req->ring_index = 0;
2033 req->ring_ptr = req->ring;
2037 sp->flags |= SRB_DMA_VALID;
2039 /* Set chip new ring index. */
2040 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2042 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2047 scsi_dma_unmap(cmd);
2049 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2051 return QLA_FUNCTION_FAILED;
2056 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2057 * @sp: command to send to the ISP
2059 * Returns non-zero if a failure occurred, else zero.
2062 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2065 unsigned long flags;
2070 uint16_t req_cnt = 0;
2072 uint16_t tot_prot_dsds;
2073 uint16_t fw_prot_opts = 0;
2074 struct req_que *req = NULL;
2075 struct rsp_que *rsp = NULL;
2076 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2077 struct scsi_qla_host *vha = sp->fcport->vha;
2078 struct qla_hw_data *ha = vha->hw;
2079 struct cmd_type_crc_2 *cmd_pkt;
2080 uint32_t status = 0;
2081 struct qla_qpair *qpair = sp->qpair;
2083 #define QDSS_GOT_Q_SPACE BIT_0
2085 /* Check for host side state */
2086 if (!qpair->online) {
2087 cmd->result = DID_NO_CONNECT << 16;
2088 return QLA_INTERFACE_ERROR;
2091 if (!qpair->difdix_supported &&
2092 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2093 cmd->result = DID_NO_CONNECT << 16;
2094 return QLA_INTERFACE_ERROR;
2097 /* Only process protection or >16 cdb in this routine */
2098 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2099 if (cmd->cmd_len <= 16)
2100 return qla2xxx_start_scsi_mq(sp);
2103 spin_lock_irqsave(&qpair->qp_lock, flags);
2105 /* Setup qpair pointers */
2109 /* So we know we haven't pci_map'ed anything yet */
2112 /* Send marker if required */
2113 if (vha->marker_needed != 0) {
2114 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2116 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2117 return QLA_FUNCTION_FAILED;
2119 vha->marker_needed = 0;
2122 /* Check for room in outstanding command list. */
2123 handle = req->current_outstanding_cmd;
2124 for (index = 1; index < req->num_outstanding_cmds; index++) {
2126 if (handle == req->num_outstanding_cmds)
2128 if (!req->outstanding_cmds[handle])
2132 if (index == req->num_outstanding_cmds)
2135 /* Compute number of required data segments */
2136 /* Map the sg table so we have an accurate count of sg entries needed */
2137 if (scsi_sg_count(cmd)) {
2138 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2139 scsi_sg_count(cmd), cmd->sc_data_direction);
2140 if (unlikely(!nseg))
2143 sp->flags |= SRB_DMA_VALID;
2145 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2146 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2147 struct qla2_sgx sgx;
2150 memset(&sgx, 0, sizeof(struct qla2_sgx));
2151 sgx.tot_bytes = scsi_bufflen(cmd);
2152 sgx.cur_sg = scsi_sglist(cmd);
2156 while (qla24xx_get_one_block_sg(
2157 cmd->device->sector_size, &sgx, &partial))
2163 /* number of required data segments */
2166 /* Compute number of required protection segments */
2167 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2168 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2169 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2170 if (unlikely(!nseg))
2173 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2175 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2176 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2177 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2184 /* Total Data and protection sg segment(s) */
2185 tot_prot_dsds = nseg;
2187 if (req->cnt < (req_cnt + 2)) {
2188 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2189 RD_REG_DWORD_RELAXED(req->req_q_out);
2190 if (req->ring_index < cnt)
2191 req->cnt = cnt - req->ring_index;
2193 req->cnt = req->length -
2194 (req->ring_index - cnt);
2195 if (req->cnt < (req_cnt + 2))
2199 status |= QDSS_GOT_Q_SPACE;
2201 /* Build header part of command packet (excluding the OPCODE). */
2202 req->current_outstanding_cmd = handle;
2203 req->outstanding_cmds[handle] = sp;
2204 sp->handle = handle;
2205 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2206 req->cnt -= req_cnt;
2208 /* Fill-in common area */
2209 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2210 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2212 clr_ptr = (uint32_t *)cmd_pkt + 2;
2213 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2215 /* Set NPORT-ID and LUN number*/
2216 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2217 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2218 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2219 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2221 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2222 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2224 /* Total Data and protection segment(s) */
2225 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2227 /* Build IOCB segments and adjust for data protection segments */
2228 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2229 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2233 cmd_pkt->entry_count = (uint8_t)req_cnt;
2234 cmd_pkt->timeout = cpu_to_le16(0);
2237 /* Adjust ring index. */
2239 if (req->ring_index == req->length) {
2240 req->ring_index = 0;
2241 req->ring_ptr = req->ring;
2245 /* Set chip new ring index. */
2246 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2248 /* Manage unprocessed RIO/ZIO commands in response queue. */
2249 if (vha->flags.process_response_queue &&
2250 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2251 qla24xx_process_response_queue(vha, rsp);
2253 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2258 if (status & QDSS_GOT_Q_SPACE) {
2259 req->outstanding_cmds[handle] = NULL;
2260 req->cnt += req_cnt;
2262 /* Cleanup will be performed by the caller (queuecommand) */
2264 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2265 return QLA_FUNCTION_FAILED;
2268 /* Generic Control-SRB manipulation functions. */
2270 /* hardware_lock assumed to be held. */
2273 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2275 scsi_qla_host_t *vha = qpair->vha;
2276 struct qla_hw_data *ha = vha->hw;
2277 struct req_que *req = qpair->req;
2278 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2279 uint32_t index, handle;
2281 uint16_t cnt, req_cnt;
2287 if (sp && (sp->type != SRB_SCSI_CMD)) {
2288 /* Adjust entry-counts as needed. */
2289 req_cnt = sp->iocbs;
2292 /* Check for room on request queue. */
2293 if (req->cnt < req_cnt + 2) {
2294 if (qpair->use_shadow_reg)
2295 cnt = *req->out_ptr;
2296 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2298 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2299 else if (IS_P3P_TYPE(ha))
2300 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2301 else if (IS_FWI2_CAPABLE(ha))
2302 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2303 else if (IS_QLAFX00(ha))
2304 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2306 cnt = qla2x00_debounce_register(
2307 ISP_REQ_Q_OUT(ha, ®->isp));
2309 if (req->ring_index < cnt)
2310 req->cnt = cnt - req->ring_index;
2312 req->cnt = req->length -
2313 (req->ring_index - cnt);
2315 if (req->cnt < req_cnt + 2)
2319 /* Check for room in outstanding command list. */
2320 handle = req->current_outstanding_cmd;
2321 for (index = 1; index < req->num_outstanding_cmds; index++) {
2323 if (handle == req->num_outstanding_cmds)
2325 if (!req->outstanding_cmds[handle])
2328 if (index == req->num_outstanding_cmds) {
2329 ql_log(ql_log_warn, vha, 0x700b,
2330 "No room on outstanding cmd array.\n");
2334 /* Prep command array. */
2335 req->current_outstanding_cmd = handle;
2336 req->outstanding_cmds[handle] = sp;
2337 sp->handle = handle;
2341 req->cnt -= req_cnt;
2342 pkt = req->ring_ptr;
2343 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2344 if (IS_QLAFX00(ha)) {
2345 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2346 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2348 pkt->entry_count = req_cnt;
2349 pkt->handle = handle;
2355 qpair->tgt_counters.num_alloc_iocb_failed++;
2360 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2362 scsi_qla_host_t *vha = qpair->vha;
2364 if (qla2x00_reset_active(vha))
2367 return __qla2x00_alloc_iocbs(qpair, sp);
2371 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2373 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2377 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2379 struct srb_iocb *lio = &sp->u.iocb_cmd;
2381 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2382 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2383 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2384 logio->control_flags |= LCF_NVME_PRLI;
2385 if (sp->vha->flags.nvme_first_burst)
2386 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2389 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2390 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2391 logio->port_id[1] = sp->fcport->d_id.b.area;
2392 logio->port_id[2] = sp->fcport->d_id.b.domain;
2393 logio->vp_index = sp->vha->vp_idx;
2397 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2399 struct srb_iocb *lio = &sp->u.iocb_cmd;
2401 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2402 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2403 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2405 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2406 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2407 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2408 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2409 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2411 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2412 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2413 logio->port_id[1] = sp->fcport->d_id.b.area;
2414 logio->port_id[2] = sp->fcport->d_id.b.domain;
2415 logio->vp_index = sp->vha->vp_idx;
2419 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2421 struct qla_hw_data *ha = sp->vha->hw;
2422 struct srb_iocb *lio = &sp->u.iocb_cmd;
2425 mbx->entry_type = MBX_IOCB_TYPE;
2426 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2427 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2428 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2429 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2430 if (HAS_EXTENDED_IDS(ha)) {
2431 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2432 mbx->mb10 = cpu_to_le16(opts);
2434 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2436 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2437 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2438 sp->fcport->d_id.b.al_pa);
2439 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2443 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2445 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2446 logio->control_flags =
2447 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2448 if (!sp->fcport->keep_nport_handle)
2449 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2450 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2451 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2452 logio->port_id[1] = sp->fcport->d_id.b.area;
2453 logio->port_id[2] = sp->fcport->d_id.b.domain;
2454 logio->vp_index = sp->vha->vp_idx;
2458 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2460 struct qla_hw_data *ha = sp->vha->hw;
2462 mbx->entry_type = MBX_IOCB_TYPE;
2463 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2464 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2465 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2466 cpu_to_le16(sp->fcport->loop_id) :
2467 cpu_to_le16(sp->fcport->loop_id << 8);
2468 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2469 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2470 sp->fcport->d_id.b.al_pa);
2471 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2472 /* Implicit: mbx->mbx10 = 0. */
2476 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2478 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2479 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2480 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2481 logio->vp_index = sp->vha->vp_idx;
2485 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2487 struct qla_hw_data *ha = sp->vha->hw;
2489 mbx->entry_type = MBX_IOCB_TYPE;
2490 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2491 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2492 if (HAS_EXTENDED_IDS(ha)) {
2493 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2494 mbx->mb10 = cpu_to_le16(BIT_0);
2496 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2498 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2499 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2500 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2501 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2502 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2506 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2510 struct fc_port *fcport = sp->fcport;
2511 scsi_qla_host_t *vha = fcport->vha;
2512 struct qla_hw_data *ha = vha->hw;
2513 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2514 struct req_que *req = vha->req;
2516 flags = iocb->u.tmf.flags;
2517 lun = iocb->u.tmf.lun;
2519 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2520 tsk->entry_count = 1;
2521 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2522 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2523 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2524 tsk->control_flags = cpu_to_le32(flags);
2525 tsk->port_id[0] = fcport->d_id.b.al_pa;
2526 tsk->port_id[1] = fcport->d_id.b.area;
2527 tsk->port_id[2] = fcport->d_id.b.domain;
2528 tsk->vp_index = fcport->vha->vp_idx;
2530 if (flags == TCF_LUN_RESET) {
2531 int_to_scsilun(lun, &tsk->lun);
2532 host_to_fcp_swap((uint8_t *)&tsk->lun,
2537 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2539 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2540 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2541 sp->free = qla2x00_sp_free;
2542 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2543 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2544 sp->start_timer = 1;
2548 qla2x00_els_dcmd_sp_free(void *data)
2551 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2555 if (elsio->u.els_logo.els_logo_pyld)
2556 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2557 elsio->u.els_logo.els_logo_pyld,
2558 elsio->u.els_logo.els_logo_pyld_dma);
2560 del_timer(&elsio->timer);
2565 qla2x00_els_dcmd_iocb_timeout(void *data)
2568 fc_port_t *fcport = sp->fcport;
2569 struct scsi_qla_host *vha = sp->vha;
2570 struct srb_iocb *lio = &sp->u.iocb_cmd;
2572 ql_dbg(ql_dbg_io, vha, 0x3069,
2573 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2574 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2575 fcport->d_id.b.al_pa);
2577 complete(&lio->u.els_logo.comp);
2581 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2584 fc_port_t *fcport = sp->fcport;
2585 struct srb_iocb *lio = &sp->u.iocb_cmd;
2586 struct scsi_qla_host *vha = sp->vha;
2588 ql_dbg(ql_dbg_io, vha, 0x3072,
2589 "%s hdl=%x, portid=%02x%02x%02x done\n",
2590 sp->name, sp->handle, fcport->d_id.b.domain,
2591 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2593 complete(&lio->u.els_logo.comp);
2597 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2598 port_id_t remote_did)
2601 fc_port_t *fcport = NULL;
2602 struct srb_iocb *elsio = NULL;
2603 struct qla_hw_data *ha = vha->hw;
2604 struct els_logo_payload logo_pyld;
2605 int rval = QLA_SUCCESS;
2607 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2609 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2613 /* Alloc SRB structure */
2614 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2617 ql_log(ql_log_info, vha, 0x70e6,
2618 "SRB allocation failed\n");
2622 elsio = &sp->u.iocb_cmd;
2623 fcport->loop_id = 0xFFFF;
2624 fcport->d_id.b.domain = remote_did.b.domain;
2625 fcport->d_id.b.area = remote_did.b.area;
2626 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2628 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2629 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2631 sp->type = SRB_ELS_DCMD;
2632 sp->name = "ELS_DCMD";
2633 sp->fcport = fcport;
2634 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2635 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2636 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2637 sp->done = qla2x00_els_dcmd_sp_done;
2638 sp->free = qla2x00_els_dcmd_sp_free;
2640 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2641 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2644 if (!elsio->u.els_logo.els_logo_pyld) {
2646 return QLA_FUNCTION_FAILED;
2649 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2651 elsio->u.els_logo.els_cmd = els_opcode;
2652 logo_pyld.opcode = els_opcode;
2653 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2654 logo_pyld.s_id[1] = vha->d_id.b.area;
2655 logo_pyld.s_id[2] = vha->d_id.b.domain;
2656 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2657 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2659 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2660 sizeof(struct els_logo_payload));
2662 rval = qla2x00_start_sp(sp);
2663 if (rval != QLA_SUCCESS) {
2665 return QLA_FUNCTION_FAILED;
2668 ql_dbg(ql_dbg_io, vha, 0x3074,
2669 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2670 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2671 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2673 wait_for_completion(&elsio->u.els_logo.comp);
2680 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2682 scsi_qla_host_t *vha = sp->vha;
2683 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2685 els_iocb->entry_type = ELS_IOCB_TYPE;
2686 els_iocb->entry_count = 1;
2687 els_iocb->sys_define = 0;
2688 els_iocb->entry_status = 0;
2689 els_iocb->handle = sp->handle;
2690 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2691 els_iocb->tx_dsd_count = 1;
2692 els_iocb->vp_index = vha->vp_idx;
2693 els_iocb->sof_type = EST_SOFI3;
2694 els_iocb->rx_dsd_count = 0;
2695 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2697 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2698 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2699 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2700 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2701 els_iocb->s_id[1] = vha->d_id.b.area;
2702 els_iocb->s_id[2] = vha->d_id.b.domain;
2704 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2705 els_iocb->control_flags = 0;
2706 els_iocb->tx_byte_count = els_iocb->tx_len =
2707 cpu_to_le32(sizeof(struct els_plogi_payload));
2708 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2709 &els_iocb->tx_address);
2710 els_iocb->rx_dsd_count = 1;
2711 els_iocb->rx_byte_count = els_iocb->rx_len =
2712 cpu_to_le32(sizeof(struct els_plogi_payload));
2713 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2714 &els_iocb->rx_address);
2716 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2717 "PLOGI ELS IOCB:\n");
2718 ql_dump_buffer(ql_log_info, vha, 0x0109,
2719 (uint8_t *)els_iocb, 0x70);
2721 els_iocb->control_flags = 1 << 13;
2722 els_iocb->tx_byte_count =
2723 cpu_to_le32(sizeof(struct els_logo_payload));
2724 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2725 &els_iocb->tx_address);
2726 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2728 els_iocb->rx_byte_count = 0;
2729 els_iocb->rx_address = 0;
2730 els_iocb->rx_len = 0;
2733 sp->vha->qla_stats.control_requests++;
2737 qla2x00_els_dcmd2_iocb_timeout(void *data)
2740 fc_port_t *fcport = sp->fcport;
2741 struct scsi_qla_host *vha = sp->vha;
2742 struct qla_hw_data *ha = vha->hw;
2743 unsigned long flags = 0;
2746 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2747 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2748 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2750 /* Abort the exchange */
2751 spin_lock_irqsave(&ha->hardware_lock, flags);
2752 res = ha->isp_ops->abort_command(sp);
2753 ql_dbg(ql_dbg_io, vha, 0x3070,
2754 "mbx abort_command %s\n",
2755 (res == QLA_SUCCESS) ? "successful" : "failed");
2756 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2758 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2762 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2765 fc_port_t *fcport = sp->fcport;
2766 struct srb_iocb *lio = &sp->u.iocb_cmd;
2767 struct scsi_qla_host *vha = sp->vha;
2768 struct event_arg ea;
2769 struct qla_work_evt *e;
2771 ql_dbg(ql_dbg_disc, vha, 0x3072,
2772 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2773 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2775 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2776 del_timer(&sp->u.iocb_cmd.timer);
2778 if (sp->flags & SRB_WAKEUP_ON_COMP)
2779 complete(&lio->u.els_plogi.comp);
2782 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2784 memset(&ea, 0, sizeof(ea));
2787 ea.event = FCME_ELS_PLOGI_DONE;
2788 qla2x00_fcport_event_handler(vha, &ea);
2791 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2793 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2795 if (elsio->u.els_plogi.els_plogi_pyld)
2796 dma_free_coherent(&sp->vha->hw->pdev->dev,
2797 elsio->u.els_plogi.tx_size,
2798 elsio->u.els_plogi.els_plogi_pyld,
2799 elsio->u.els_plogi.els_plogi_pyld_dma);
2801 if (elsio->u.els_plogi.els_resp_pyld)
2802 dma_free_coherent(&sp->vha->hw->pdev->dev,
2803 elsio->u.els_plogi.rx_size,
2804 elsio->u.els_plogi.els_resp_pyld,
2805 elsio->u.els_plogi.els_resp_pyld_dma);
2810 qla2x00_post_work(vha, e);
2815 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2816 fc_port_t *fcport, bool wait)
2819 struct srb_iocb *elsio = NULL;
2820 struct qla_hw_data *ha = vha->hw;
2821 int rval = QLA_SUCCESS;
2822 void *ptr, *resp_ptr;
2824 /* Alloc SRB structure */
2825 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2827 ql_log(ql_log_info, vha, 0x70e6,
2828 "SRB allocation failed\n");
2832 elsio = &sp->u.iocb_cmd;
2833 ql_dbg(ql_dbg_io, vha, 0x3073,
2834 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2836 fcport->flags |= FCF_ASYNC_SENT;
2837 sp->type = SRB_ELS_DCMD;
2838 sp->name = "ELS_DCMD";
2839 sp->fcport = fcport;
2841 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2842 init_completion(&elsio->u.els_plogi.comp);
2844 sp->flags = SRB_WAKEUP_ON_COMP;
2846 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2848 sp->done = qla2x00_els_dcmd2_sp_done;
2849 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2851 ptr = elsio->u.els_plogi.els_plogi_pyld =
2852 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2853 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2855 if (!elsio->u.els_plogi.els_plogi_pyld) {
2856 rval = QLA_FUNCTION_FAILED;
2860 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2861 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2862 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2864 if (!elsio->u.els_plogi.els_resp_pyld) {
2865 rval = QLA_FUNCTION_FAILED;
2869 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2871 memset(ptr, 0, sizeof(struct els_plogi_payload));
2872 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2873 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2874 &ha->plogi_els_payld.data,
2875 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2877 elsio->u.els_plogi.els_cmd = els_opcode;
2878 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2880 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2881 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2882 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2884 rval = qla2x00_start_sp(sp);
2885 if (rval != QLA_SUCCESS) {
2886 rval = QLA_FUNCTION_FAILED;
2888 ql_dbg(ql_dbg_disc, vha, 0x3074,
2889 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2890 sp->name, sp->handle, fcport->loop_id,
2891 fcport->d_id.b24, vha->d_id.b24);
2895 wait_for_completion(&elsio->u.els_plogi.comp);
2897 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2898 rval = QLA_FUNCTION_FAILED;
2904 fcport->flags &= ~(FCF_ASYNC_SENT);
2905 if (elsio->u.els_plogi.els_plogi_pyld)
2906 dma_free_coherent(&sp->vha->hw->pdev->dev,
2907 elsio->u.els_plogi.tx_size,
2908 elsio->u.els_plogi.els_plogi_pyld,
2909 elsio->u.els_plogi.els_plogi_pyld_dma);
2911 if (elsio->u.els_plogi.els_resp_pyld)
2912 dma_free_coherent(&sp->vha->hw->pdev->dev,
2913 elsio->u.els_plogi.rx_size,
2914 elsio->u.els_plogi.els_resp_pyld,
2915 elsio->u.els_plogi.els_resp_pyld_dma);
2923 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2925 struct bsg_job *bsg_job = sp->u.bsg_job;
2926 struct fc_bsg_request *bsg_request = bsg_job->request;
2928 els_iocb->entry_type = ELS_IOCB_TYPE;
2929 els_iocb->entry_count = 1;
2930 els_iocb->sys_define = 0;
2931 els_iocb->entry_status = 0;
2932 els_iocb->handle = sp->handle;
2933 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2934 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2935 els_iocb->vp_index = sp->vha->vp_idx;
2936 els_iocb->sof_type = EST_SOFI3;
2937 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2940 sp->type == SRB_ELS_CMD_RPT ?
2941 bsg_request->rqst_data.r_els.els_code :
2942 bsg_request->rqst_data.h_els.command_code;
2943 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2944 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2945 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2946 els_iocb->control_flags = 0;
2947 els_iocb->rx_byte_count =
2948 cpu_to_le32(bsg_job->reply_payload.payload_len);
2949 els_iocb->tx_byte_count =
2950 cpu_to_le32(bsg_job->request_payload.payload_len);
2952 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2953 &els_iocb->tx_address);
2954 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2955 (bsg_job->request_payload.sg_list));
2957 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2958 &els_iocb->rx_address);
2959 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2960 (bsg_job->reply_payload.sg_list));
2962 sp->vha->qla_stats.control_requests++;
2966 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2968 uint16_t avail_dsds;
2969 struct dsd64 *cur_dsd;
2970 struct scatterlist *sg;
2973 scsi_qla_host_t *vha = sp->vha;
2974 struct qla_hw_data *ha = vha->hw;
2975 struct bsg_job *bsg_job = sp->u.bsg_job;
2976 int entry_count = 1;
2978 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2979 ct_iocb->entry_type = CT_IOCB_TYPE;
2980 ct_iocb->entry_status = 0;
2981 ct_iocb->handle1 = sp->handle;
2982 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2983 ct_iocb->status = cpu_to_le16(0);
2984 ct_iocb->control_flags = cpu_to_le16(0);
2985 ct_iocb->timeout = 0;
2986 ct_iocb->cmd_dsd_count =
2987 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2988 ct_iocb->total_dsd_count =
2989 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2990 ct_iocb->req_bytecount =
2991 cpu_to_le32(bsg_job->request_payload.payload_len);
2992 ct_iocb->rsp_bytecount =
2993 cpu_to_le32(bsg_job->reply_payload.payload_len);
2995 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2996 &ct_iocb->req_dsd.address);
2997 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
2999 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3000 &ct_iocb->rsp_dsd.address);
3001 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3004 cur_dsd = &ct_iocb->rsp_dsd;
3006 tot_dsds = bsg_job->reply_payload.sg_cnt;
3008 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3009 cont_a64_entry_t *cont_pkt;
3011 /* Allocate additional continuation packets? */
3012 if (avail_dsds == 0) {
3014 * Five DSDs are available in the Cont.
3017 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3018 vha->hw->req_q_map[0]);
3019 cur_dsd = cont_pkt->dsd;
3024 append_dsd64(&cur_dsd, sg);
3027 ct_iocb->entry_count = entry_count;
3029 sp->vha->qla_stats.control_requests++;
3033 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3035 uint16_t avail_dsds;
3036 struct dsd64 *cur_dsd;
3037 struct scatterlist *sg;
3039 uint16_t cmd_dsds, rsp_dsds;
3040 scsi_qla_host_t *vha = sp->vha;
3041 struct qla_hw_data *ha = vha->hw;
3042 struct bsg_job *bsg_job = sp->u.bsg_job;
3043 int entry_count = 1;
3044 cont_a64_entry_t *cont_pkt = NULL;
3046 ct_iocb->entry_type = CT_IOCB_TYPE;
3047 ct_iocb->entry_status = 0;
3048 ct_iocb->sys_define = 0;
3049 ct_iocb->handle = sp->handle;
3051 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3052 ct_iocb->vp_index = sp->vha->vp_idx;
3053 ct_iocb->comp_status = cpu_to_le16(0);
3055 cmd_dsds = bsg_job->request_payload.sg_cnt;
3056 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3058 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3059 ct_iocb->timeout = 0;
3060 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3061 ct_iocb->cmd_byte_count =
3062 cpu_to_le32(bsg_job->request_payload.payload_len);
3065 cur_dsd = ct_iocb->dsd;
3068 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3069 /* Allocate additional continuation packets? */
3070 if (avail_dsds == 0) {
3072 * Five DSDs are available in the Cont.
3075 cont_pkt = qla2x00_prep_cont_type1_iocb(
3076 vha, ha->req_q_map[0]);
3077 cur_dsd = cont_pkt->dsd;
3082 append_dsd64(&cur_dsd, sg);
3088 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3089 /* Allocate additional continuation packets? */
3090 if (avail_dsds == 0) {
3092 * Five DSDs are available in the Cont.
3095 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3097 cur_dsd = cont_pkt->dsd;
3102 append_dsd64(&cur_dsd, sg);
3105 ct_iocb->entry_count = entry_count;
3109 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3110 * @sp: command to send to the ISP
3112 * Returns non-zero if a failure occurred, else zero.
3115 qla82xx_start_scsi(srb_t *sp)
3118 unsigned long flags;
3119 struct scsi_cmnd *cmd;
3126 struct device_reg_82xx __iomem *reg;
3129 uint8_t additional_cdb_len;
3130 struct ct6_dsd *ctx;
3131 struct scsi_qla_host *vha = sp->vha;
3132 struct qla_hw_data *ha = vha->hw;
3133 struct req_que *req = NULL;
3134 struct rsp_que *rsp = NULL;
3136 /* Setup device pointers. */
3137 reg = &ha->iobase->isp82;
3138 cmd = GET_CMD_SP(sp);
3140 rsp = ha->rsp_q_map[0];
3142 /* So we know we haven't pci_map'ed anything yet */
3145 dbval = 0x04 | (ha->portnum << 5);
3147 /* Send marker if required */
3148 if (vha->marker_needed != 0) {
3149 if (qla2x00_marker(vha, ha->base_qpair,
3150 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3151 ql_log(ql_log_warn, vha, 0x300c,
3152 "qla2x00_marker failed for cmd=%p.\n", cmd);
3153 return QLA_FUNCTION_FAILED;
3155 vha->marker_needed = 0;
3158 /* Acquire ring specific lock */
3159 spin_lock_irqsave(&ha->hardware_lock, flags);
3161 /* Check for room in outstanding command list. */
3162 handle = req->current_outstanding_cmd;
3163 for (index = 1; index < req->num_outstanding_cmds; index++) {
3165 if (handle == req->num_outstanding_cmds)
3167 if (!req->outstanding_cmds[handle])
3170 if (index == req->num_outstanding_cmds)
3173 /* Map the sg table so we have an accurate count of sg entries needed */
3174 if (scsi_sg_count(cmd)) {
3175 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3176 scsi_sg_count(cmd), cmd->sc_data_direction);
3177 if (unlikely(!nseg))
3184 if (tot_dsds > ql2xshiftctondsd) {
3185 struct cmd_type_6 *cmd_pkt;
3186 uint16_t more_dsd_lists = 0;
3187 struct dsd_dma *dsd_ptr;
3190 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3191 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3192 ql_dbg(ql_dbg_io, vha, 0x300d,
3193 "Num of DSD list %d is than %d for cmd=%p.\n",
3194 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3199 if (more_dsd_lists <= ha->gbl_dsd_avail)
3200 goto sufficient_dsds;
3202 more_dsd_lists -= ha->gbl_dsd_avail;
3204 for (i = 0; i < more_dsd_lists; i++) {
3205 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3207 ql_log(ql_log_fatal, vha, 0x300e,
3208 "Failed to allocate memory for dsd_dma "
3209 "for cmd=%p.\n", cmd);
3213 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3214 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3215 if (!dsd_ptr->dsd_addr) {
3217 ql_log(ql_log_fatal, vha, 0x300f,
3218 "Failed to allocate memory for dsd_addr "
3219 "for cmd=%p.\n", cmd);
3222 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3223 ha->gbl_dsd_avail++;
3229 if (req->cnt < (req_cnt + 2)) {
3230 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3231 ®->req_q_out[0]);
3232 if (req->ring_index < cnt)
3233 req->cnt = cnt - req->ring_index;
3235 req->cnt = req->length -
3236 (req->ring_index - cnt);
3237 if (req->cnt < (req_cnt + 2))
3241 ctx = sp->u.scmd.ctx =
3242 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3244 ql_log(ql_log_fatal, vha, 0x3010,
3245 "Failed to allocate ctx for cmd=%p.\n", cmd);
3249 memset(ctx, 0, sizeof(struct ct6_dsd));
3250 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3251 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3252 if (!ctx->fcp_cmnd) {
3253 ql_log(ql_log_fatal, vha, 0x3011,
3254 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3258 /* Initialize the DSD list and dma handle */
3259 INIT_LIST_HEAD(&ctx->dsd_list);
3260 ctx->dsd_use_cnt = 0;
3262 if (cmd->cmd_len > 16) {
3263 additional_cdb_len = cmd->cmd_len - 16;
3264 if ((cmd->cmd_len % 4) != 0) {
3265 /* SCSI command bigger than 16 bytes must be
3268 ql_log(ql_log_warn, vha, 0x3012,
3269 "scsi cmd len %d not multiple of 4 "
3270 "for cmd=%p.\n", cmd->cmd_len, cmd);
3271 goto queuing_error_fcp_cmnd;
3273 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3275 additional_cdb_len = 0;
3276 ctx->fcp_cmnd_len = 12 + 16 + 4;
3279 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3280 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3282 /* Zero out remaining portion of packet. */
3283 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3284 clr_ptr = (uint32_t *)cmd_pkt + 2;
3285 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3286 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3288 /* Set NPORT-ID and LUN number*/
3289 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3290 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3291 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3292 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3293 cmd_pkt->vp_index = sp->vha->vp_idx;
3295 /* Build IOCB segments */
3296 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3297 goto queuing_error_fcp_cmnd;
3299 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3300 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3302 /* build FCP_CMND IU */
3303 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3304 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3306 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3307 ctx->fcp_cmnd->additional_cdb_len |= 1;
3308 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3309 ctx->fcp_cmnd->additional_cdb_len |= 2;
3311 /* Populate the FCP_PRIO. */
3312 if (ha->flags.fcp_prio_enabled)
3313 ctx->fcp_cmnd->task_attribute |=
3314 sp->fcport->fcp_prio << 3;
3316 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3318 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3319 additional_cdb_len);
3320 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3322 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3323 put_unaligned_le64(ctx->fcp_cmnd_dma,
3324 &cmd_pkt->fcp_cmnd_dseg_address);
3326 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3327 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3328 /* Set total data segment count. */
3329 cmd_pkt->entry_count = (uint8_t)req_cnt;
3330 /* Specify response queue number where
3331 * completion should happen
3333 cmd_pkt->entry_status = (uint8_t) rsp->id;
3335 struct cmd_type_7 *cmd_pkt;
3337 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3338 if (req->cnt < (req_cnt + 2)) {
3339 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3340 ®->req_q_out[0]);
3341 if (req->ring_index < cnt)
3342 req->cnt = cnt - req->ring_index;
3344 req->cnt = req->length -
3345 (req->ring_index - cnt);
3347 if (req->cnt < (req_cnt + 2))
3350 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3351 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3353 /* Zero out remaining portion of packet. */
3354 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3355 clr_ptr = (uint32_t *)cmd_pkt + 2;
3356 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3357 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3359 /* Set NPORT-ID and LUN number*/
3360 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3361 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3362 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3363 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3364 cmd_pkt->vp_index = sp->vha->vp_idx;
3366 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3367 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3368 sizeof(cmd_pkt->lun));
3370 /* Populate the FCP_PRIO. */
3371 if (ha->flags.fcp_prio_enabled)
3372 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3374 /* Load SCSI command packet. */
3375 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3376 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3378 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3380 /* Build IOCB segments */
3381 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3383 /* Set total data segment count. */
3384 cmd_pkt->entry_count = (uint8_t)req_cnt;
3385 /* Specify response queue number where
3386 * completion should happen.
3388 cmd_pkt->entry_status = (uint8_t) rsp->id;
3391 /* Build command packet. */
3392 req->current_outstanding_cmd = handle;
3393 req->outstanding_cmds[handle] = sp;
3394 sp->handle = handle;
3395 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3396 req->cnt -= req_cnt;
3399 /* Adjust ring index. */
3401 if (req->ring_index == req->length) {
3402 req->ring_index = 0;
3403 req->ring_ptr = req->ring;
3407 sp->flags |= SRB_DMA_VALID;
3409 /* Set chip new ring index. */
3410 /* write, read and verify logic */
3411 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3413 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3415 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3417 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3418 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3423 /* Manage unprocessed RIO/ZIO commands in response queue. */
3424 if (vha->flags.process_response_queue &&
3425 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3426 qla24xx_process_response_queue(vha, rsp);
3428 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3431 queuing_error_fcp_cmnd:
3432 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3435 scsi_dma_unmap(cmd);
3437 if (sp->u.scmd.ctx) {
3438 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3439 sp->u.scmd.ctx = NULL;
3441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3443 return QLA_FUNCTION_FAILED;
3447 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3449 struct srb_iocb *aio = &sp->u.iocb_cmd;
3450 scsi_qla_host_t *vha = sp->vha;
3451 struct req_que *req = sp->qpair->req;
3453 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3454 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3455 abt_iocb->entry_count = 1;
3456 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3458 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3459 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3460 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3461 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3463 abt_iocb->handle_to_abort =
3464 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3465 aio->u.abt.cmd_hndl));
3466 abt_iocb->vp_index = vha->vp_idx;
3467 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3468 /* Send the command to the firmware */
3473 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3477 mbx->entry_type = MBX_IOCB_TYPE;
3478 mbx->handle = sp->handle;
3479 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3481 for (i = 0; i < sz; i++)
3482 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3486 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3488 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3489 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3490 ct_pkt->handle = sp->handle;
3493 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3494 struct nack_to_isp *nack)
3496 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3498 nack->entry_type = NOTIFY_ACK_TYPE;
3499 nack->entry_count = 1;
3500 nack->ox_id = ntfy->ox_id;
3502 nack->u.isp24.handle = sp->handle;
3503 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3504 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3505 nack->u.isp24.flags = ntfy->u.isp24.flags &
3506 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3508 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3509 nack->u.isp24.status = ntfy->u.isp24.status;
3510 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3511 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3512 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3513 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3514 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3515 nack->u.isp24.srr_flags = 0;
3516 nack->u.isp24.srr_reject_code = 0;
3517 nack->u.isp24.srr_reject_code_expl = 0;
3518 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3522 * Build NVME LS request
3525 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3527 struct srb_iocb *nvme;
3528 int rval = QLA_SUCCESS;
3530 nvme = &sp->u.iocb_cmd;
3531 cmd_pkt->entry_type = PT_LS4_REQUEST;
3532 cmd_pkt->entry_count = 1;
3533 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3535 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3536 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3537 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3539 cmd_pkt->tx_dseg_count = 1;
3540 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3541 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3542 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3544 cmd_pkt->rx_dseg_count = 1;
3545 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3546 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
3547 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3553 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3557 vce->entry_type = VP_CTRL_IOCB_TYPE;
3558 vce->handle = sp->handle;
3559 vce->entry_count = 1;
3560 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3561 vce->vp_count = cpu_to_le16(1);
3564 * index map in firmware starts with 1; decrement index
3565 * this is ok as we never use index 0
3567 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3568 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3569 vce->vp_idx_map[map] |= 1 << pos;
3573 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3575 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3576 logio->control_flags =
3577 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3579 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3580 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3581 logio->port_id[1] = sp->fcport->d_id.b.area;
3582 logio->port_id[2] = sp->fcport->d_id.b.domain;
3583 logio->vp_index = sp->fcport->vha->vp_idx;
3587 qla2x00_start_sp(srb_t *sp)
3589 int rval = QLA_SUCCESS;
3590 scsi_qla_host_t *vha = sp->vha;
3591 struct qla_hw_data *ha = vha->hw;
3592 struct qla_qpair *qp = sp->qpair;
3594 unsigned long flags;
3596 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3597 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3600 ql_log(ql_log_warn, vha, 0x700c,
3601 "qla2x00_alloc_iocbs failed.\n");
3607 IS_FWI2_CAPABLE(ha) ?
3608 qla24xx_login_iocb(sp, pkt) :
3609 qla2x00_login_iocb(sp, pkt);
3612 qla24xx_prli_iocb(sp, pkt);
3614 case SRB_LOGOUT_CMD:
3615 IS_FWI2_CAPABLE(ha) ?
3616 qla24xx_logout_iocb(sp, pkt) :
3617 qla2x00_logout_iocb(sp, pkt);
3619 case SRB_ELS_CMD_RPT:
3620 case SRB_ELS_CMD_HST:
3621 qla24xx_els_iocb(sp, pkt);
3624 IS_FWI2_CAPABLE(ha) ?
3625 qla24xx_ct_iocb(sp, pkt) :
3626 qla2x00_ct_iocb(sp, pkt);
3629 IS_FWI2_CAPABLE(ha) ?
3630 qla24xx_adisc_iocb(sp, pkt) :
3631 qla2x00_adisc_iocb(sp, pkt);
3635 qlafx00_tm_iocb(sp, pkt) :
3636 qla24xx_tm_iocb(sp, pkt);
3638 case SRB_FXIOCB_DCMD:
3639 case SRB_FXIOCB_BCMD:
3640 qlafx00_fxdisc_iocb(sp, pkt);
3643 qla_nvme_ls(sp, pkt);
3647 qlafx00_abort_iocb(sp, pkt) :
3648 qla24xx_abort_iocb(sp, pkt);
3651 qla24xx_els_logo_iocb(sp, pkt);
3653 case SRB_CT_PTHRU_CMD:
3654 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3657 qla2x00_mb_iocb(sp, pkt);
3659 case SRB_NACK_PLOGI:
3662 qla2x00_send_notify_ack_iocb(sp, pkt);
3665 qla25xx_ctrlvp_iocb(sp, pkt);
3668 qla24xx_prlo_iocb(sp, pkt);
3674 if (sp->start_timer)
3675 add_timer(&sp->u.iocb_cmd.timer);
3678 qla2x00_start_iocbs(vha, qp->req);
3680 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3685 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3686 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3688 uint16_t avail_dsds;
3689 struct dsd64 *cur_dsd;
3690 uint32_t req_data_len = 0;
3691 uint32_t rsp_data_len = 0;
3692 struct scatterlist *sg;
3694 int entry_count = 1;
3695 struct bsg_job *bsg_job = sp->u.bsg_job;
3697 /*Update entry type to indicate bidir command */
3698 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3700 /* Set the transfer direction, in this set both flags
3701 * Also set the BD_WRAP_BACK flag, firmware will take care
3702 * assigning DID=SID for outgoing pkts.
3704 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3705 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3706 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3709 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3710 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3711 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3712 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3714 vha->bidi_stats.transfer_bytes += req_data_len;
3715 vha->bidi_stats.io_count++;
3717 vha->qla_stats.output_bytes += req_data_len;
3718 vha->qla_stats.output_requests++;
3720 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3721 * are bundled in continuation iocb
3724 cur_dsd = &cmd_pkt->fcp_dsd;
3728 for_each_sg(bsg_job->request_payload.sg_list, sg,
3729 bsg_job->request_payload.sg_cnt, index) {
3730 cont_a64_entry_t *cont_pkt;
3732 /* Allocate additional continuation packets */
3733 if (avail_dsds == 0) {
3734 /* Continuation type 1 IOCB can accomodate
3737 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3738 cur_dsd = cont_pkt->dsd;
3742 append_dsd64(&cur_dsd, sg);
3745 /* For read request DSD will always goes to continuation IOCB
3746 * and follow the write DSD. If there is room on the current IOCB
3747 * then it is added to that IOCB else new continuation IOCB is
3750 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3751 bsg_job->reply_payload.sg_cnt, index) {
3752 cont_a64_entry_t *cont_pkt;
3754 /* Allocate additional continuation packets */
3755 if (avail_dsds == 0) {
3756 /* Continuation type 1 IOCB can accomodate
3759 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3760 cur_dsd = cont_pkt->dsd;
3764 append_dsd64(&cur_dsd, sg);
3767 /* This value should be same as number of IOCB required for this cmd */
3768 cmd_pkt->entry_count = entry_count;
3772 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3775 struct qla_hw_data *ha = vha->hw;
3776 unsigned long flags;
3782 struct cmd_bidir *cmd_pkt = NULL;
3783 struct rsp_que *rsp;
3784 struct req_que *req;
3785 int rval = EXT_STATUS_OK;
3789 rsp = ha->rsp_q_map[0];
3792 /* Send marker if required */
3793 if (vha->marker_needed != 0) {
3794 if (qla2x00_marker(vha, ha->base_qpair,
3795 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3796 return EXT_STATUS_MAILBOX;
3797 vha->marker_needed = 0;
3800 /* Acquire ring specific lock */
3801 spin_lock_irqsave(&ha->hardware_lock, flags);
3803 /* Check for room in outstanding command list. */
3804 handle = req->current_outstanding_cmd;
3805 for (index = 1; index < req->num_outstanding_cmds; index++) {
3807 if (handle == req->num_outstanding_cmds)
3809 if (!req->outstanding_cmds[handle])
3813 if (index == req->num_outstanding_cmds) {
3814 rval = EXT_STATUS_BUSY;
3818 /* Calculate number of IOCB required */
3819 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3821 /* Check for room on request queue. */
3822 if (req->cnt < req_cnt + 2) {
3823 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3824 RD_REG_DWORD_RELAXED(req->req_q_out);
3825 if (req->ring_index < cnt)
3826 req->cnt = cnt - req->ring_index;
3828 req->cnt = req->length -
3829 (req->ring_index - cnt);
3831 if (req->cnt < req_cnt + 2) {
3832 rval = EXT_STATUS_BUSY;
3836 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3837 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3839 /* Zero out remaining portion of packet. */
3840 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3841 clr_ptr = (uint32_t *)cmd_pkt + 2;
3842 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3844 /* Set NPORT-ID (of vha)*/
3845 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3846 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3847 cmd_pkt->port_id[1] = vha->d_id.b.area;
3848 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3850 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3851 cmd_pkt->entry_status = (uint8_t) rsp->id;
3852 /* Build command packet. */
3853 req->current_outstanding_cmd = handle;
3854 req->outstanding_cmds[handle] = sp;
3855 sp->handle = handle;
3856 req->cnt -= req_cnt;
3858 /* Send the command to the firmware */
3860 qla2x00_start_iocbs(vha, req);
3862 spin_unlock_irqrestore(&ha->hardware_lock, flags);