2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
26 const char *const port_state_str[] = {
34 static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt,
35 void (*process_item)(struct scsi_qla_host *vha, void *pkt))
37 struct purex_list *list = &vha->purex_list;
38 struct purex_item *item;
41 item = kzalloc(sizeof(*item), GFP_KERNEL);
43 ql_log(ql_log_warn, vha, 0x5092,
44 ">> Failed allocate purex list item.\n");
49 item->process_item = process_item;
50 memcpy(&item->iocb, pkt, sizeof(item->iocb));
52 spin_lock_irqsave(&list->lock, flags);
53 list_add_tail(&item->list, &list->head);
54 spin_unlock_irqrestore(&list->lock, flags);
56 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
60 qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
62 struct abts_entry_24xx *abts = pkt;
63 struct qla_hw_data *ha = vha->hw;
64 struct els_entry_24xx *rsp_els;
65 struct abts_entry_24xx *abts_rsp;
70 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
72 ql_log(ql_log_warn, vha, 0x0287,
73 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
74 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
75 abts->seq_id, abts->seq_cnt);
76 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
77 "-------- ABTS RCV -------\n");
78 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
79 (uint8_t *)abts, sizeof(*abts));
81 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
84 ql_log(ql_log_warn, vha, 0x0287,
85 "Failed allocate dma buffer ABTS/ELS RSP.\n");
89 /* terminate exchange */
90 rsp_els->entry_type = ELS_IOCB_TYPE;
91 rsp_els->entry_count = 1;
92 rsp_els->nport_handle = cpu_to_le16(~0);
93 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
94 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
95 ql_dbg(ql_dbg_init, vha, 0x0283,
96 "Sending ELS Response to terminate exchange %#x...\n",
97 abts->rx_xch_addr_to_abort);
98 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
99 "-------- ELS RSP -------\n");
100 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
101 (uint8_t *)rsp_els, sizeof(*rsp_els));
102 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
104 ql_log(ql_log_warn, vha, 0x0288,
105 "%s: iocb failed to execute -> %x\n", __func__, rval);
106 } else if (rsp_els->comp_status) {
107 ql_log(ql_log_warn, vha, 0x0289,
108 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
109 __func__, rsp_els->comp_status,
110 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
112 ql_dbg(ql_dbg_init, vha, 0x028a,
113 "%s: abort exchange done.\n", __func__);
116 /* send ABTS response */
117 abts_rsp = (void *)rsp_els;
118 memset(abts_rsp, 0, sizeof(*abts_rsp));
119 abts_rsp->entry_type = ABTS_RSP_TYPE;
120 abts_rsp->entry_count = 1;
121 abts_rsp->nport_handle = abts->nport_handle;
122 abts_rsp->vp_idx = abts->vp_idx;
123 abts_rsp->sof_type = abts->sof_type & 0xf0;
124 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
125 abts_rsp->d_id[0] = abts->s_id[0];
126 abts_rsp->d_id[1] = abts->s_id[1];
127 abts_rsp->d_id[2] = abts->s_id[2];
128 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
129 abts_rsp->s_id[0] = abts->d_id[0];
130 abts_rsp->s_id[1] = abts->d_id[1];
131 abts_rsp->s_id[2] = abts->d_id[2];
132 abts_rsp->cs_ctl = abts->cs_ctl;
133 /* include flipping bit23 in fctl */
134 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
135 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
136 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
137 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
138 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
139 abts_rsp->type = FC_TYPE_BLD;
140 abts_rsp->rx_id = abts->rx_id;
141 abts_rsp->ox_id = abts->ox_id;
142 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
143 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
144 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
145 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
146 ql_dbg(ql_dbg_init, vha, 0x028b,
147 "Sending BA ACC response to ABTS %#x...\n",
148 abts->rx_xch_addr_to_abort);
149 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
150 "-------- ELS RSP -------\n");
151 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
152 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
153 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
155 ql_log(ql_log_warn, vha, 0x028c,
156 "%s: iocb failed to execute -> %x\n", __func__, rval);
157 } else if (abts_rsp->comp_status) {
158 ql_log(ql_log_warn, vha, 0x028d,
159 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
160 __func__, abts_rsp->comp_status,
161 abts_rsp->payload.error.subcode1,
162 abts_rsp->payload.error.subcode2);
164 ql_dbg(ql_dbg_init, vha, 0x028ea,
165 "%s: done.\n", __func__);
168 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
172 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
173 * @irq: interrupt number
174 * @dev_id: SCSI driver HA context
176 * Called by system whenever the host adapter generates an interrupt.
178 * Returns handled flag.
181 qla2100_intr_handler(int irq, void *dev_id)
183 scsi_qla_host_t *vha;
184 struct qla_hw_data *ha;
185 struct device_reg_2xxx __iomem *reg;
193 rsp = (struct rsp_que *) dev_id;
195 ql_log(ql_log_info, NULL, 0x505d,
196 "%s: NULL response queue pointer.\n", __func__);
201 reg = &ha->iobase->isp;
204 spin_lock_irqsave(&ha->hardware_lock, flags);
205 vha = pci_get_drvdata(ha->pdev);
206 for (iter = 50; iter--; ) {
207 hccr = rd_reg_word(®->hccr);
208 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
210 if (hccr & HCCR_RISC_PAUSE) {
211 if (pci_channel_offline(ha->pdev))
215 * Issue a "HARD" reset in order for the RISC interrupt
216 * bit to be cleared. Schedule a big hammer to get
217 * out of the RISC PAUSED state.
219 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
220 rd_reg_word(®->hccr);
222 ha->isp_ops->fw_dump(vha);
223 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
225 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
228 if (rd_reg_word(®->semaphore) & BIT_0) {
229 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
230 rd_reg_word(®->hccr);
232 /* Get mailbox data. */
233 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
234 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
235 qla2x00_mbx_completion(vha, mb[0]);
236 status |= MBX_INTERRUPT;
237 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
238 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
239 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
240 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
241 qla2x00_async_event(vha, rsp, mb);
244 ql_dbg(ql_dbg_async, vha, 0x5025,
245 "Unrecognized interrupt type (%d).\n",
248 /* Release mailbox registers. */
249 wrt_reg_word(®->semaphore, 0);
250 rd_reg_word(®->semaphore);
252 qla2x00_process_response_queue(rsp);
254 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
255 rd_reg_word(®->hccr);
258 qla2x00_handle_mbx_completion(ha, status);
259 spin_unlock_irqrestore(&ha->hardware_lock, flags);
261 return (IRQ_HANDLED);
265 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
267 /* Check for PCI disconnection */
268 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
269 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
270 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
271 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
273 * Schedule this (only once) on the default system
274 * workqueue so that all the adapter workqueues and the
275 * DPC thread can be shutdown cleanly.
277 schedule_work(&vha->hw->board_disable);
285 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
287 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
291 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
292 * @irq: interrupt number
293 * @dev_id: SCSI driver HA context
295 * Called by system whenever the host adapter generates an interrupt.
297 * Returns handled flag.
300 qla2300_intr_handler(int irq, void *dev_id)
302 scsi_qla_host_t *vha;
303 struct device_reg_2xxx __iomem *reg;
310 struct qla_hw_data *ha;
313 rsp = (struct rsp_que *) dev_id;
315 ql_log(ql_log_info, NULL, 0x5058,
316 "%s: NULL response queue pointer.\n", __func__);
321 reg = &ha->iobase->isp;
324 spin_lock_irqsave(&ha->hardware_lock, flags);
325 vha = pci_get_drvdata(ha->pdev);
326 for (iter = 50; iter--; ) {
327 stat = rd_reg_dword(®->u.isp2300.host_status);
328 if (qla2x00_check_reg32_for_disconnect(vha, stat))
330 if (stat & HSR_RISC_PAUSED) {
331 if (unlikely(pci_channel_offline(ha->pdev)))
334 hccr = rd_reg_word(®->hccr);
336 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
337 ql_log(ql_log_warn, vha, 0x5026,
338 "Parity error -- HCCR=%x, Dumping "
339 "firmware.\n", hccr);
341 ql_log(ql_log_warn, vha, 0x5027,
342 "RISC paused -- HCCR=%x, Dumping "
343 "firmware.\n", hccr);
346 * Issue a "HARD" reset in order for the RISC
347 * interrupt bit to be cleared. Schedule a big
348 * hammer to get out of the RISC PAUSED state.
350 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
351 rd_reg_word(®->hccr);
353 ha->isp_ops->fw_dump(vha);
354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
356 } else if ((stat & HSR_RISC_INT) == 0)
359 switch (stat & 0xff) {
364 qla2x00_mbx_completion(vha, MSW(stat));
365 status |= MBX_INTERRUPT;
367 /* Release mailbox registers. */
368 wrt_reg_word(®->semaphore, 0);
372 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
373 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
374 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
375 qla2x00_async_event(vha, rsp, mb);
378 qla2x00_process_response_queue(rsp);
381 mb[0] = MBA_CMPLT_1_16BIT;
383 qla2x00_async_event(vha, rsp, mb);
386 mb[0] = MBA_SCSI_COMPLETION;
388 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
389 qla2x00_async_event(vha, rsp, mb);
392 ql_dbg(ql_dbg_async, vha, 0x5028,
393 "Unrecognized interrupt type (%d).\n", stat & 0xff);
396 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
397 rd_reg_word_relaxed(®->hccr);
399 qla2x00_handle_mbx_completion(ha, status);
400 spin_unlock_irqrestore(&ha->hardware_lock, flags);
402 return (IRQ_HANDLED);
406 * qla2x00_mbx_completion() - Process mailbox command completions.
407 * @vha: SCSI driver HA context
408 * @mb0: Mailbox0 register
411 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
415 __le16 __iomem *wptr;
416 struct qla_hw_data *ha = vha->hw;
417 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
419 /* Read all mbox registers? */
420 WARN_ON_ONCE(ha->mbx_count > 32);
421 mboxes = (1ULL << ha->mbx_count) - 1;
423 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
425 mboxes = ha->mcp->in_mb;
427 /* Load return mailbox registers. */
428 ha->flags.mbox_int = 1;
429 ha->mailbox_out[0] = mb0;
431 wptr = MAILBOX_REG(ha, reg, 1);
433 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
434 if (IS_QLA2200(ha) && cnt == 8)
435 wptr = MAILBOX_REG(ha, reg, 8);
436 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
437 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
438 else if (mboxes & BIT_0)
439 ha->mailbox_out[cnt] = rd_reg_word(wptr);
447 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
449 static char *event[] =
450 { "Complete", "Request Notification", "Time Extension" };
452 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
453 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
454 __le16 __iomem *wptr;
455 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
457 /* Seed data -- mailbox1 -> mailbox7. */
458 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
459 wptr = ®24->mailbox1;
460 else if (IS_QLA8044(vha->hw))
461 wptr = ®82->mailbox_out[1];
465 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
466 mb[cnt] = rd_reg_word(wptr);
468 ql_dbg(ql_dbg_async, vha, 0x5021,
469 "Inter-Driver Communication %s -- "
470 "%04x %04x %04x %04x %04x %04x %04x.\n",
471 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
472 mb[4], mb[5], mb[6]);
474 /* Handle IDC Error completion case. */
475 case MBA_IDC_COMPLETE:
477 vha->hw->flags.idc_compl_status = 1;
478 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
479 complete(&vha->hw->dcbx_comp);
484 /* Acknowledgement needed? [Notify && non-zero timeout]. */
485 timeout = (descr >> 8) & 0xf;
486 ql_dbg(ql_dbg_async, vha, 0x5022,
487 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
488 vha->host_no, event[aen & 0xff], timeout);
492 rval = qla2x00_post_idc_ack_work(vha, mb);
493 if (rval != QLA_SUCCESS)
494 ql_log(ql_log_warn, vha, 0x5023,
495 "IDC failed to post ACK.\n");
497 case MBA_IDC_TIME_EXT:
498 vha->hw->idc_extend_tmo = descr;
499 ql_dbg(ql_dbg_async, vha, 0x5087,
500 "%lu Inter-Driver Communication %s -- "
501 "Extend timeout by=%d.\n",
502 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
509 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
511 static const char *const link_speeds[] = {
512 "1", "2", "?", "4", "8", "16", "32", "10"
514 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
516 if (IS_QLA2100(ha) || IS_QLA2200(ha))
517 return link_speeds[0];
518 else if (speed == 0x13)
519 return link_speeds[QLA_LAST_SPEED];
520 else if (speed < QLA_LAST_SPEED)
521 return link_speeds[speed];
523 return link_speeds[LS_UNKNOWN];
527 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
529 struct qla_hw_data *ha = vha->hw;
532 * 8200 AEN Interpretation:
534 * mb[1] = AEN Reason code
535 * mb[2] = LSW of Peg-Halt Status-1 Register
536 * mb[6] = MSW of Peg-Halt Status-1 Register
537 * mb[3] = LSW of Peg-Halt Status-2 register
538 * mb[7] = MSW of Peg-Halt Status-2 register
539 * mb[4] = IDC Device-State Register value
540 * mb[5] = IDC Driver-Presence Register value
542 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
543 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
544 mb[0], mb[1], mb[2], mb[6]);
545 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
546 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
547 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
549 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
550 IDC_HEARTBEAT_FAILURE)) {
551 ha->flags.nic_core_hung = 1;
552 ql_log(ql_log_warn, vha, 0x5060,
553 "83XX: F/W Error Reported: Check if reset required.\n");
555 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
556 uint32_t protocol_engine_id, fw_err_code, err_level;
559 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
560 * - PEG-Halt Status-1 Register:
561 * (LSW = mb[2], MSW = mb[6])
562 * Bits 0-7 = protocol-engine ID
563 * Bits 8-28 = f/w error code
564 * Bits 29-31 = Error-level
565 * Error-level 0x1 = Non-Fatal error
566 * Error-level 0x2 = Recoverable Fatal error
567 * Error-level 0x4 = UnRecoverable Fatal error
568 * - PEG-Halt Status-2 Register:
569 * (LSW = mb[3], MSW = mb[7])
571 protocol_engine_id = (mb[2] & 0xff);
572 fw_err_code = (((mb[2] & 0xff00) >> 8) |
573 ((mb[6] & 0x1fff) << 8));
574 err_level = ((mb[6] & 0xe000) >> 13);
575 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
576 "Register: protocol_engine_id=0x%x "
577 "fw_err_code=0x%x err_level=0x%x.\n",
578 protocol_engine_id, fw_err_code, err_level);
579 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
580 "Register: 0x%x%x.\n", mb[7], mb[3]);
581 if (err_level == ERR_LEVEL_NON_FATAL) {
582 ql_log(ql_log_warn, vha, 0x5063,
583 "Not a fatal error, f/w has recovered itself.\n");
584 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
585 ql_log(ql_log_fatal, vha, 0x5064,
586 "Recoverable Fatal error: Chip reset "
588 qla83xx_schedule_work(vha,
589 QLA83XX_NIC_CORE_RESET);
590 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
591 ql_log(ql_log_fatal, vha, 0x5065,
592 "Unrecoverable Fatal error: Set FAILED "
593 "state, reboot required.\n");
594 qla83xx_schedule_work(vha,
595 QLA83XX_NIC_CORE_UNRECOVERABLE);
599 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
600 uint16_t peg_fw_state, nw_interface_link_up;
601 uint16_t nw_interface_signal_detect, sfp_status;
602 uint16_t htbt_counter, htbt_monitor_enable;
603 uint16_t sfp_additional_info, sfp_multirate;
604 uint16_t sfp_tx_fault, link_speed, dcbx_status;
607 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
608 * - PEG-to-FC Status Register:
609 * (LSW = mb[2], MSW = mb[6])
610 * Bits 0-7 = Peg-Firmware state
611 * Bit 8 = N/W Interface Link-up
612 * Bit 9 = N/W Interface signal detected
613 * Bits 10-11 = SFP Status
614 * SFP Status 0x0 = SFP+ transceiver not expected
615 * SFP Status 0x1 = SFP+ transceiver not present
616 * SFP Status 0x2 = SFP+ transceiver invalid
617 * SFP Status 0x3 = SFP+ transceiver present and
619 * Bits 12-14 = Heartbeat Counter
620 * Bit 15 = Heartbeat Monitor Enable
621 * Bits 16-17 = SFP Additional Info
622 * SFP info 0x0 = Unregocnized transceiver for
624 * SFP info 0x1 = SFP+ brand validation failed
625 * SFP info 0x2 = SFP+ speed validation failed
626 * SFP info 0x3 = SFP+ access error
627 * Bit 18 = SFP Multirate
628 * Bit 19 = SFP Tx Fault
629 * Bits 20-22 = Link Speed
630 * Bits 23-27 = Reserved
631 * Bits 28-30 = DCBX Status
632 * DCBX Status 0x0 = DCBX Disabled
633 * DCBX Status 0x1 = DCBX Enabled
634 * DCBX Status 0x2 = DCBX Exchange error
637 peg_fw_state = (mb[2] & 0x00ff);
638 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
639 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
640 sfp_status = ((mb[2] & 0x0c00) >> 10);
641 htbt_counter = ((mb[2] & 0x7000) >> 12);
642 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
643 sfp_additional_info = (mb[6] & 0x0003);
644 sfp_multirate = ((mb[6] & 0x0004) >> 2);
645 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
646 link_speed = ((mb[6] & 0x0070) >> 4);
647 dcbx_status = ((mb[6] & 0x7000) >> 12);
649 ql_log(ql_log_warn, vha, 0x5066,
650 "Peg-to-Fc Status Register:\n"
651 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
652 "nw_interface_signal_detect=0x%x"
653 "\nsfp_statis=0x%x.\n ", peg_fw_state,
654 nw_interface_link_up, nw_interface_signal_detect,
656 ql_log(ql_log_warn, vha, 0x5067,
657 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
658 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
659 htbt_counter, htbt_monitor_enable,
660 sfp_additional_info, sfp_multirate);
661 ql_log(ql_log_warn, vha, 0x5068,
662 "sfp_tx_fault=0x%x, link_state=0x%x, "
663 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
666 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
669 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
670 ql_log(ql_log_warn, vha, 0x5069,
671 "Heartbeat Failure encountered, chip reset "
674 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
678 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
679 ql_log(ql_log_info, vha, 0x506a,
680 "IDC Device-State changed = 0x%x.\n", mb[4]);
681 if (ha->flags.nic_core_reset_owner)
683 qla83xx_schedule_work(vha, MBA_IDC_AEN);
688 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
690 struct qla_hw_data *ha = vha->hw;
699 spin_lock_irqsave(&ha->vport_slock, flags);
700 list_for_each_entry(vp, &ha->vp_list, list) {
701 vp_did = vp->d_id.b24;
702 if (vp_did == rscn_entry) {
707 spin_unlock_irqrestore(&ha->vport_slock, flags);
713 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
718 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
719 if (f->loop_id == loop_id)
725 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
730 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
731 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
734 else if (f->deleted == 0)
742 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
748 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
749 if (f->d_id.b24 == id->b24) {
752 else if (f->deleted == 0)
759 /* Shall be called only on supported adapters. */
761 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
763 struct qla_hw_data *ha = vha->hw;
764 bool reset_isp_needed = 0;
766 ql_log(ql_log_warn, vha, 0x02f0,
767 "MPI Heartbeat stop. MPI reset is%s needed. "
768 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
769 mb[0] & BIT_8 ? "" : " not",
770 mb[0], mb[1], mb[2], mb[3]);
772 if ((mb[1] & BIT_8) == 0)
775 ql_log(ql_log_warn, vha, 0x02f1,
776 "MPI Heartbeat stop. FW dump needed\n");
778 if (ql2xfulldump_on_mpifail) {
779 ha->isp_ops->fw_dump(vha);
780 reset_isp_needed = 1;
783 ha->isp_ops->mpi_fw_dump(vha, 1);
785 if (reset_isp_needed) {
786 vha->hw->flags.fw_init_done = 0;
787 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
788 qla2xxx_wake_dpc(vha);
793 * qla2x00_async_event() - Process aynchronous events.
794 * @vha: SCSI driver HA context
795 * @rsp: response queue
796 * @mb: Mailbox registers (0 - 3)
799 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
804 struct qla_hw_data *ha = vha->hw;
805 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
806 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
807 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
808 uint32_t rscn_entry, host_pid;
810 fc_port_t *fcport = NULL;
812 if (!vha->hw->flags.fw_started)
815 /* Setup to process RIO completion. */
817 if (IS_CNA_CAPABLE(ha))
820 case MBA_SCSI_COMPLETION:
821 handles[0] = make_handle(mb[2], mb[1]);
824 case MBA_CMPLT_1_16BIT:
827 mb[0] = MBA_SCSI_COMPLETION;
829 case MBA_CMPLT_2_16BIT:
833 mb[0] = MBA_SCSI_COMPLETION;
835 case MBA_CMPLT_3_16BIT:
840 mb[0] = MBA_SCSI_COMPLETION;
842 case MBA_CMPLT_4_16BIT:
846 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
848 mb[0] = MBA_SCSI_COMPLETION;
850 case MBA_CMPLT_5_16BIT:
854 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
855 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
857 mb[0] = MBA_SCSI_COMPLETION;
859 case MBA_CMPLT_2_32BIT:
860 handles[0] = make_handle(mb[2], mb[1]);
861 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
862 RD_MAILBOX_REG(ha, reg, 6));
864 mb[0] = MBA_SCSI_COMPLETION;
871 case MBA_SCSI_COMPLETION: /* Fast Post */
872 if (!vha->flags.online)
875 for (cnt = 0; cnt < handle_cnt; cnt++)
876 qla2x00_process_completed_request(vha, rsp->req,
880 case MBA_RESET: /* Reset */
881 ql_dbg(ql_dbg_async, vha, 0x5002,
882 "Asynchronous RESET.\n");
884 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
887 case MBA_SYSTEM_ERR: /* System Error */
889 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
890 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
893 m[0] = rd_reg_word(®24->mailbox4);
894 m[1] = rd_reg_word(®24->mailbox5);
895 m[2] = rd_reg_word(®24->mailbox6);
896 mbx = m[3] = rd_reg_word(®24->mailbox7);
898 ql_log(ql_log_warn, vha, 0x5003,
899 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
900 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
902 ql_log(ql_log_warn, vha, 0x5003,
903 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
904 mb[1], mb[2], mb[3]);
906 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
907 rd_reg_word(®24->mailbox7) & BIT_8)
908 ha->isp_ops->mpi_fw_dump(vha, 1);
909 ha->isp_ops->fw_dump(vha);
910 ha->flags.fw_init_done = 0;
913 if (IS_FWI2_CAPABLE(ha)) {
914 if (mb[1] == 0 && mb[2] == 0) {
915 ql_log(ql_log_fatal, vha, 0x5004,
916 "Unrecoverable Hardware Error: adapter "
917 "marked OFFLINE!\n");
918 vha->flags.online = 0;
919 vha->device_flags |= DFLG_DEV_FAILED;
921 /* Check to see if MPI timeout occurred */
922 if ((mbx & MBX_3) && (ha->port_no == 0))
923 set_bit(MPI_RESET_NEEDED,
926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
928 } else if (mb[1] == 0) {
929 ql_log(ql_log_fatal, vha, 0x5005,
930 "Unrecoverable Hardware Error: adapter marked "
932 vha->flags.online = 0;
933 vha->device_flags |= DFLG_DEV_FAILED;
935 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
938 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
939 ql_log(ql_log_warn, vha, 0x5006,
940 "ISP Request Transfer Error (%x).\n", mb[1]);
942 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
945 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
946 ql_log(ql_log_warn, vha, 0x5007,
947 "ISP Response Transfer Error (%x).\n", mb[1]);
949 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
952 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
953 ql_dbg(ql_dbg_async, vha, 0x5008,
954 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
957 case MBA_LOOP_INIT_ERR:
958 ql_log(ql_log_warn, vha, 0x5090,
959 "LOOP INIT ERROR (%x).\n", mb[1]);
960 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
963 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
964 ha->flags.lip_ae = 1;
966 ql_dbg(ql_dbg_async, vha, 0x5009,
967 "LIP occurred (%x).\n", mb[1]);
969 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
970 atomic_set(&vha->loop_state, LOOP_DOWN);
971 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
972 qla2x00_mark_all_devices_lost(vha);
976 atomic_set(&vha->vp_state, VP_FAILED);
977 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
980 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
981 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
983 vha->flags.management_server_logged_in = 0;
984 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
987 case MBA_LOOP_UP: /* Loop Up Event */
988 if (IS_QLA2100(ha) || IS_QLA2200(ha))
989 ha->link_data_rate = PORT_SPEED_1GB;
991 ha->link_data_rate = mb[1];
993 ql_log(ql_log_info, vha, 0x500a,
994 "LOOP UP detected (%s Gbps).\n",
995 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
997 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
999 ql_log(ql_log_info, vha, 0x11a0,
1000 "FEC=enabled (link up).\n");
1003 vha->flags.management_server_logged_in = 0;
1004 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1008 case MBA_LOOP_DOWN: /* Loop Down Event */
1010 ha->flags.lip_ae = 0;
1011 ha->current_topology = 0;
1013 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1014 ? rd_reg_word(®24->mailbox4) : 0;
1015 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1017 ql_log(ql_log_info, vha, 0x500b,
1018 "LOOP DOWN detected (%x %x %x %x).\n",
1019 mb[1], mb[2], mb[3], mbx);
1021 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1022 atomic_set(&vha->loop_state, LOOP_DOWN);
1023 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1025 * In case of loop down, restore WWPN from
1026 * NVRAM in case of FA-WWPN capable ISP
1027 * Restore for Physical Port only
1030 if (ha->flags.fawwpn_enabled &&
1031 (ha->current_topology == ISP_CFG_F)) {
1032 void *wwpn = ha->init_cb->port_name;
1034 memcpy(vha->port_name, wwpn, WWN_SIZE);
1035 fc_host_port_name(vha->host) =
1036 wwn_to_u64(vha->port_name);
1037 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1038 vha, 0x00d8, "LOOP DOWN detected,"
1039 "restore WWPN %016llx\n",
1040 wwn_to_u64(vha->port_name));
1043 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1046 vha->device_flags |= DFLG_NO_CABLE;
1047 qla2x00_mark_all_devices_lost(vha);
1051 atomic_set(&vha->vp_state, VP_FAILED);
1052 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1055 vha->flags.management_server_logged_in = 0;
1056 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1057 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1060 case MBA_LIP_RESET: /* LIP reset occurred */
1061 ql_dbg(ql_dbg_async, vha, 0x500c,
1062 "LIP reset occurred (%x).\n", mb[1]);
1064 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1065 atomic_set(&vha->loop_state, LOOP_DOWN);
1066 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1067 qla2x00_mark_all_devices_lost(vha);
1071 atomic_set(&vha->vp_state, VP_FAILED);
1072 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1075 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1077 ha->operating_mode = LOOP;
1078 vha->flags.management_server_logged_in = 0;
1079 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1082 /* case MBA_DCBX_COMPLETE: */
1083 case MBA_POINT_TO_POINT: /* Point-to-Point */
1084 ha->flags.lip_ae = 0;
1089 if (IS_CNA_CAPABLE(ha)) {
1090 ql_dbg(ql_dbg_async, vha, 0x500d,
1091 "DCBX Completed -- %04x %04x %04x.\n",
1092 mb[1], mb[2], mb[3]);
1093 if (ha->notify_dcbx_comp && !vha->vp_idx)
1094 complete(&ha->dcbx_comp);
1097 ql_dbg(ql_dbg_async, vha, 0x500e,
1098 "Asynchronous P2P MODE received.\n");
1101 * Until there's a transition from loop down to loop up, treat
1102 * this as loop down only.
1104 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1105 atomic_set(&vha->loop_state, LOOP_DOWN);
1106 if (!atomic_read(&vha->loop_down_timer))
1107 atomic_set(&vha->loop_down_timer,
1110 qla2x00_mark_all_devices_lost(vha);
1114 atomic_set(&vha->vp_state, VP_FAILED);
1115 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1118 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1119 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1121 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1122 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1124 vha->flags.management_server_logged_in = 0;
1127 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1131 ql_dbg(ql_dbg_async, vha, 0x500f,
1132 "Configuration change detected: value=%x.\n", mb[1]);
1134 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1135 atomic_set(&vha->loop_state, LOOP_DOWN);
1136 if (!atomic_read(&vha->loop_down_timer))
1137 atomic_set(&vha->loop_down_timer,
1139 qla2x00_mark_all_devices_lost(vha);
1143 atomic_set(&vha->vp_state, VP_FAILED);
1144 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1147 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1148 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1151 case MBA_PORT_UPDATE: /* Port database update */
1153 * Handle only global and vn-port update events
1156 * mb[1] = N_Port handle of changed port
1157 * OR 0xffff for global event
1158 * mb[2] = New login state
1159 * 7 = Port logged out
1160 * mb[3] = LSB is vp_idx, 0xff = all vps
1162 * Skip processing if:
1163 * Event is global, vp_idx is NOT all vps,
1164 * vp_idx does not match
1165 * Event is not global, vp_idx does not match
1167 if (IS_QLA2XXX_MIDTYPE(ha) &&
1168 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1169 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1173 ql_dbg(ql_dbg_async, vha, 0x5010,
1174 "Port %s %04x %04x %04x.\n",
1175 mb[1] == 0xffff ? "unavailable" : "logout",
1176 mb[1], mb[2], mb[3]);
1178 if (mb[1] == 0xffff)
1179 goto global_port_update;
1181 if (mb[1] == NPH_SNS_LID(ha)) {
1182 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1183 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1187 /* use handle_cnt for loop id/nport handle */
1188 if (IS_FWI2_CAPABLE(ha))
1189 handle_cnt = NPH_SNS;
1191 handle_cnt = SIMPLE_NAME_SERVER;
1192 if (mb[1] == handle_cnt) {
1193 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1194 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1199 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1202 if (atomic_read(&fcport->state) != FCS_ONLINE)
1204 ql_dbg(ql_dbg_async, vha, 0x508a,
1205 "Marking port lost loopid=%04x portid=%06x.\n",
1206 fcport->loop_id, fcport->d_id.b24);
1207 if (qla_ini_mode_enabled(vha)) {
1208 fcport->logout_on_delete = 0;
1209 qlt_schedule_sess_for_deletion(fcport);
1214 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1215 atomic_set(&vha->loop_state, LOOP_DOWN);
1216 atomic_set(&vha->loop_down_timer,
1218 vha->device_flags |= DFLG_NO_CABLE;
1219 qla2x00_mark_all_devices_lost(vha);
1223 atomic_set(&vha->vp_state, VP_FAILED);
1224 fc_vport_set_state(vha->fc_vport,
1226 qla2x00_mark_all_devices_lost(vha);
1229 vha->flags.management_server_logged_in = 0;
1230 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1235 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1236 * event etc. earlier indicating loop is down) then process
1237 * it. Otherwise ignore it and Wait for RSCN to come in.
1239 atomic_set(&vha->loop_down_timer, 0);
1240 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1241 !ha->flags.n2n_ae &&
1242 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1243 ql_dbg(ql_dbg_async, vha, 0x5011,
1244 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1245 mb[1], mb[2], mb[3]);
1249 ql_dbg(ql_dbg_async, vha, 0x5012,
1250 "Port database changed %04x %04x %04x.\n",
1251 mb[1], mb[2], mb[3]);
1254 * Mark all devices as missing so we will login again.
1256 atomic_set(&vha->loop_state, LOOP_UP);
1257 vha->scan.scan_retry = 0;
1259 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1260 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1261 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1264 case MBA_RSCN_UPDATE: /* State Change Registration */
1265 /* Check if the Vport has issued a SCR */
1266 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1268 /* Only handle SCNs for our Vport index. */
1269 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1272 ql_dbg(ql_dbg_async, vha, 0x5013,
1273 "RSCN database changed -- %04x %04x %04x.\n",
1274 mb[1], mb[2], mb[3]);
1276 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1277 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1278 | vha->d_id.b.al_pa;
1279 if (rscn_entry == host_pid) {
1280 ql_dbg(ql_dbg_async, vha, 0x5014,
1281 "Ignoring RSCN update to local host "
1282 "port ID (%06x).\n", host_pid);
1286 /* Ignore reserved bits from RSCN-payload. */
1287 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1289 /* Skip RSCNs for virtual ports on the same physical port */
1290 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1293 atomic_set(&vha->loop_down_timer, 0);
1294 vha->flags.management_server_logged_in = 0;
1296 struct event_arg ea;
1298 memset(&ea, 0, sizeof(ea));
1299 ea.id.b24 = rscn_entry;
1300 ea.id.b.rsvd_1 = rscn_entry >> 24;
1301 qla2x00_handle_rscn(vha, &ea);
1302 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1305 /* case MBA_RIO_RESPONSE: */
1306 case MBA_ZIO_RESPONSE:
1307 ql_dbg(ql_dbg_async, vha, 0x5015,
1308 "[R|Z]IO update completion.\n");
1310 if (IS_FWI2_CAPABLE(ha))
1311 qla24xx_process_response_queue(vha, rsp);
1313 qla2x00_process_response_queue(rsp);
1316 case MBA_DISCARD_RND_FRAME:
1317 ql_dbg(ql_dbg_async, vha, 0x5016,
1318 "Discard RND Frame -- %04x %04x %04x.\n",
1319 mb[1], mb[2], mb[3]);
1322 case MBA_TRACE_NOTIFICATION:
1323 ql_dbg(ql_dbg_async, vha, 0x5017,
1324 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1327 case MBA_ISP84XX_ALERT:
1328 ql_dbg(ql_dbg_async, vha, 0x5018,
1329 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1330 mb[1], mb[2], mb[3]);
1332 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1334 case A84_PANIC_RECOVERY:
1335 ql_log(ql_log_info, vha, 0x5019,
1336 "Alert 84XX: panic recovery %04x %04x.\n",
1339 case A84_OP_LOGIN_COMPLETE:
1340 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1341 ql_log(ql_log_info, vha, 0x501a,
1342 "Alert 84XX: firmware version %x.\n",
1343 ha->cs84xx->op_fw_version);
1345 case A84_DIAG_LOGIN_COMPLETE:
1346 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1347 ql_log(ql_log_info, vha, 0x501b,
1348 "Alert 84XX: diagnostic firmware version %x.\n",
1349 ha->cs84xx->diag_fw_version);
1351 case A84_GOLD_LOGIN_COMPLETE:
1352 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1353 ha->cs84xx->fw_update = 1;
1354 ql_log(ql_log_info, vha, 0x501c,
1355 "Alert 84XX: gold firmware version %x.\n",
1356 ha->cs84xx->gold_fw_version);
1359 ql_log(ql_log_warn, vha, 0x501d,
1360 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1361 mb[1], mb[2], mb[3]);
1363 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1365 case MBA_DCBX_START:
1366 ql_dbg(ql_dbg_async, vha, 0x501e,
1367 "DCBX Started -- %04x %04x %04x.\n",
1368 mb[1], mb[2], mb[3]);
1370 case MBA_DCBX_PARAM_UPDATE:
1371 ql_dbg(ql_dbg_async, vha, 0x501f,
1372 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1373 mb[1], mb[2], mb[3]);
1375 case MBA_FCF_CONF_ERR:
1376 ql_dbg(ql_dbg_async, vha, 0x5020,
1377 "FCF Configuration Error -- %04x %04x %04x.\n",
1378 mb[1], mb[2], mb[3]);
1380 case MBA_IDC_NOTIFY:
1381 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1382 mb[4] = rd_reg_word(®24->mailbox4);
1383 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1384 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1385 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1386 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1388 * Extend loop down timer since port is active.
1390 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1391 atomic_set(&vha->loop_down_timer,
1393 qla2xxx_wake_dpc(vha);
1397 case MBA_IDC_COMPLETE:
1398 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1399 complete(&ha->lb_portup_comp);
1401 case MBA_IDC_TIME_EXT:
1402 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1404 qla81xx_idc_event(vha, mb[0], mb[1]);
1408 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1409 qla27xx_handle_8200_aen(vha, mb);
1410 } else if (IS_QLA83XX(ha)) {
1411 mb[4] = rd_reg_word(®24->mailbox4);
1412 mb[5] = rd_reg_word(®24->mailbox5);
1413 mb[6] = rd_reg_word(®24->mailbox6);
1414 mb[7] = rd_reg_word(®24->mailbox7);
1415 qla83xx_handle_8200_aen(vha, mb);
1417 ql_dbg(ql_dbg_async, vha, 0x5052,
1418 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1419 mb[0], mb[1], mb[2], mb[3]);
1423 case MBA_DPORT_DIAGNOSTICS:
1424 ql_dbg(ql_dbg_async, vha, 0x5052,
1425 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1426 mb[0], mb[1], mb[2], mb[3]);
1427 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1428 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1429 static char *results[] = {
1430 "start", "done(pass)", "done(error)", "undefined" };
1431 static char *types[] = {
1432 "none", "dynamic", "static", "other" };
1433 uint result = mb[1] >> 0 & 0x3;
1434 uint type = mb[1] >> 6 & 0x3;
1435 uint sw = mb[1] >> 15 & 0x1;
1436 ql_dbg(ql_dbg_async, vha, 0x5052,
1437 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1438 results[result], types[type], sw);
1440 static char *reasons[] = {
1441 "reserved", "unexpected reject",
1442 "unexpected phase", "retry exceeded",
1443 "timed out", "not supported",
1445 uint reason = mb[2] >> 0 & 0xf;
1446 uint phase = mb[2] >> 12 & 0xf;
1447 ql_dbg(ql_dbg_async, vha, 0x5052,
1448 "D-Port Diagnostics: reason=%s phase=%u \n",
1449 reason < 7 ? reasons[reason] : "other",
1455 case MBA_TEMPERATURE_ALERT:
1456 ql_dbg(ql_dbg_async, vha, 0x505e,
1457 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1459 schedule_work(&ha->board_disable);
1462 case MBA_TRANS_INSERT:
1463 ql_dbg(ql_dbg_async, vha, 0x5091,
1464 "Transceiver Insertion: %04x\n", mb[1]);
1465 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1468 case MBA_TRANS_REMOVE:
1469 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1473 ql_dbg(ql_dbg_async, vha, 0x5057,
1474 "Unknown AEN:%04x %04x %04x %04x\n",
1475 mb[0], mb[1], mb[2], mb[3]);
1478 qlt_async_event(mb[0], vha, mb);
1480 if (!vha->vp_idx && ha->num_vhosts)
1481 qla2x00_alert_all_vps(rsp, mb);
1485 * qla2x00_process_completed_request() - Process a Fast Post response.
1486 * @vha: SCSI driver HA context
1487 * @req: request queue
1491 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1492 struct req_que *req, uint32_t index)
1495 struct qla_hw_data *ha = vha->hw;
1497 /* Validate handle. */
1498 if (index >= req->num_outstanding_cmds) {
1499 ql_log(ql_log_warn, vha, 0x3014,
1500 "Invalid SCSI command index (%x).\n", index);
1502 if (IS_P3P_TYPE(ha))
1503 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1505 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1509 sp = req->outstanding_cmds[index];
1511 /* Free outstanding command slot. */
1512 req->outstanding_cmds[index] = NULL;
1514 /* Save ISP completion status */
1515 sp->done(sp, DID_OK << 16);
1517 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1519 if (IS_P3P_TYPE(ha))
1520 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1522 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1527 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1528 struct req_que *req, void *iocb)
1530 struct qla_hw_data *ha = vha->hw;
1531 sts_entry_t *pkt = iocb;
1535 index = LSW(pkt->handle);
1536 if (index >= req->num_outstanding_cmds) {
1537 ql_log(ql_log_warn, vha, 0x5031,
1538 "Invalid command index (%x) type %8ph.\n",
1540 if (IS_P3P_TYPE(ha))
1541 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1543 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1546 sp = req->outstanding_cmds[index];
1548 ql_log(ql_log_warn, vha, 0x5032,
1549 "Invalid completion handle (%x) -- timed-out.\n", index);
1552 if (sp->handle != index) {
1553 ql_log(ql_log_warn, vha, 0x5033,
1554 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1558 req->outstanding_cmds[index] = NULL;
1565 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1566 struct mbx_entry *mbx)
1568 const char func[] = "MBX-IOCB";
1572 struct srb_iocb *lio;
1576 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1580 lio = &sp->u.iocb_cmd;
1582 fcport = sp->fcport;
1583 data = lio->u.logio.data;
1585 data[0] = MBS_COMMAND_ERROR;
1586 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1587 QLA_LOGIO_LOGIN_RETRIED : 0;
1588 if (mbx->entry_status) {
1589 ql_dbg(ql_dbg_async, vha, 0x5043,
1590 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1591 "entry-status=%x status=%x state-flag=%x "
1592 "status-flags=%x.\n", type, sp->handle,
1593 fcport->d_id.b.domain, fcport->d_id.b.area,
1594 fcport->d_id.b.al_pa, mbx->entry_status,
1595 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1596 le16_to_cpu(mbx->status_flags));
1598 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1604 status = le16_to_cpu(mbx->status);
1605 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1606 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1608 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1609 ql_dbg(ql_dbg_async, vha, 0x5045,
1610 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1611 type, sp->handle, fcport->d_id.b.domain,
1612 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1613 le16_to_cpu(mbx->mb1));
1615 data[0] = MBS_COMMAND_COMPLETE;
1616 if (sp->type == SRB_LOGIN_CMD) {
1617 fcport->port_type = FCT_TARGET;
1618 if (le16_to_cpu(mbx->mb1) & BIT_0)
1619 fcport->port_type = FCT_INITIATOR;
1620 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1621 fcport->flags |= FCF_FCP2_DEVICE;
1626 data[0] = le16_to_cpu(mbx->mb0);
1628 case MBS_PORT_ID_USED:
1629 data[1] = le16_to_cpu(mbx->mb1);
1631 case MBS_LOOP_ID_USED:
1634 data[0] = MBS_COMMAND_ERROR;
1638 ql_log(ql_log_warn, vha, 0x5046,
1639 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1640 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1641 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1642 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1643 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1644 le16_to_cpu(mbx->mb7));
1651 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1652 struct mbx_24xx_entry *pkt)
1654 const char func[] = "MBX-IOCB2";
1656 struct srb_iocb *si;
1660 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1664 si = &sp->u.iocb_cmd;
1665 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1667 for (i = 0; i < sz; i++)
1668 si->u.mbx.in_mb[i] = pkt->mb[i];
1670 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1676 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1677 struct nack_to_isp *pkt)
1679 const char func[] = "nack";
1683 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1687 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1688 res = QLA_FUNCTION_FAILED;
1694 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1695 sts_entry_t *pkt, int iocb_type)
1697 const char func[] = "CT_IOCB";
1700 struct bsg_job *bsg_job;
1701 struct fc_bsg_reply *bsg_reply;
1702 uint16_t comp_status;
1705 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1711 bsg_job = sp->u.bsg_job;
1712 bsg_reply = bsg_job->reply;
1714 type = "ct pass-through";
1716 comp_status = le16_to_cpu(pkt->comp_status);
1719 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1720 * fc payload to the caller
1722 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1723 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1725 if (comp_status != CS_COMPLETE) {
1726 if (comp_status == CS_DATA_UNDERRUN) {
1728 bsg_reply->reply_payload_rcv_len =
1729 le16_to_cpu(pkt->rsp_info_len);
1731 ql_log(ql_log_warn, vha, 0x5048,
1732 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1734 bsg_reply->reply_payload_rcv_len);
1736 ql_log(ql_log_warn, vha, 0x5049,
1737 "CT pass-through-%s error comp_status=0x%x.\n",
1739 res = DID_ERROR << 16;
1740 bsg_reply->reply_payload_rcv_len = 0;
1742 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1746 bsg_reply->reply_payload_rcv_len =
1747 bsg_job->reply_payload.payload_len;
1748 bsg_job->reply_len = 0;
1751 case SRB_CT_PTHRU_CMD:
1753 * borrowing sts_entry_24xx.comp_status.
1754 * same location as ct_entry_24xx.comp_status
1756 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1757 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1766 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1767 struct sts_entry_24xx *pkt, int iocb_type)
1769 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
1770 const char func[] = "ELS_CT_IOCB";
1773 struct bsg_job *bsg_job;
1774 struct fc_bsg_reply *bsg_reply;
1775 uint16_t comp_status;
1776 uint32_t fw_status[3];
1778 struct srb_iocb *els;
1780 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1786 case SRB_ELS_CMD_RPT:
1787 case SRB_ELS_CMD_HST:
1791 type = "ct pass-through";
1794 type = "Driver ELS logo";
1795 if (iocb_type != ELS_IOCB_TYPE) {
1796 ql_dbg(ql_dbg_user, vha, 0x5047,
1797 "Completing %s: (%p) type=%d.\n",
1798 type, sp, sp->type);
1803 case SRB_CT_PTHRU_CMD:
1804 /* borrowing sts_entry_24xx.comp_status.
1805 same location as ct_entry_24xx.comp_status
1807 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1808 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1813 ql_dbg(ql_dbg_user, vha, 0x503e,
1814 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1818 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1819 fw_status[1] = le32_to_cpu(ese->error_subcode_1);
1820 fw_status[2] = le32_to_cpu(ese->error_subcode_2);
1822 if (iocb_type == ELS_IOCB_TYPE) {
1823 els = &sp->u.iocb_cmd;
1824 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
1825 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
1826 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
1827 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
1828 if (comp_status == CS_COMPLETE) {
1831 if (comp_status == CS_DATA_UNDERRUN) {
1833 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
1834 ese->total_byte_count));
1836 els->u.els_plogi.len = 0;
1837 res = DID_ERROR << 16;
1840 ql_dbg(ql_dbg_user, vha, 0x503f,
1841 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1842 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1843 le32_to_cpu(ese->total_byte_count));
1847 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1848 * fc payload to the caller
1850 bsg_job = sp->u.bsg_job;
1851 bsg_reply = bsg_job->reply;
1852 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1853 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1855 if (comp_status != CS_COMPLETE) {
1856 if (comp_status == CS_DATA_UNDERRUN) {
1858 bsg_reply->reply_payload_rcv_len =
1859 le32_to_cpu(ese->total_byte_count);
1861 ql_dbg(ql_dbg_user, vha, 0x503f,
1862 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1863 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1864 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1865 le32_to_cpu(ese->total_byte_count));
1867 ql_dbg(ql_dbg_user, vha, 0x5040,
1868 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1869 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1870 type, sp->handle, comp_status,
1871 le32_to_cpu(ese->error_subcode_1),
1872 le32_to_cpu(ese->error_subcode_2));
1873 res = DID_ERROR << 16;
1874 bsg_reply->reply_payload_rcv_len = 0;
1876 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1877 fw_status, sizeof(fw_status));
1878 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1883 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1884 bsg_job->reply_len = 0;
1892 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1893 struct logio_entry_24xx *logio)
1895 const char func[] = "LOGIO-IOCB";
1899 struct srb_iocb *lio;
1903 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1907 lio = &sp->u.iocb_cmd;
1909 fcport = sp->fcport;
1910 data = lio->u.logio.data;
1912 data[0] = MBS_COMMAND_ERROR;
1913 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1914 QLA_LOGIO_LOGIN_RETRIED : 0;
1915 if (logio->entry_status) {
1916 ql_log(ql_log_warn, fcport->vha, 0x5034,
1917 "Async-%s error entry - %8phC hdl=%x"
1918 "portid=%02x%02x%02x entry-status=%x.\n",
1919 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1920 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1921 logio->entry_status);
1922 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1923 logio, sizeof(*logio));
1928 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1929 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
1930 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
1931 type, sp->handle, fcport->d_id.b24, fcport->port_name,
1932 le32_to_cpu(logio->io_parameter[0]));
1934 vha->hw->exch_starvation = 0;
1935 data[0] = MBS_COMMAND_COMPLETE;
1937 if (sp->type == SRB_PRLI_CMD) {
1938 lio->u.logio.iop[0] =
1939 le32_to_cpu(logio->io_parameter[0]);
1940 lio->u.logio.iop[1] =
1941 le32_to_cpu(logio->io_parameter[1]);
1945 if (sp->type != SRB_LOGIN_CMD)
1948 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1949 if (iop[0] & BIT_4) {
1950 fcport->port_type = FCT_TARGET;
1952 fcport->flags |= FCF_FCP2_DEVICE;
1953 } else if (iop[0] & BIT_5)
1954 fcport->port_type = FCT_INITIATOR;
1957 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1959 if (logio->io_parameter[7] || logio->io_parameter[8])
1960 fcport->supported_classes |= FC_COS_CLASS2;
1961 if (logio->io_parameter[9] || logio->io_parameter[10])
1962 fcport->supported_classes |= FC_COS_CLASS3;
1967 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1968 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1969 lio->u.logio.iop[0] = iop[0];
1970 lio->u.logio.iop[1] = iop[1];
1972 case LSC_SCODE_PORTID_USED:
1973 data[0] = MBS_PORT_ID_USED;
1974 data[1] = LSW(iop[1]);
1976 case LSC_SCODE_NPORT_USED:
1977 data[0] = MBS_LOOP_ID_USED;
1979 case LSC_SCODE_CMD_FAILED:
1980 if (iop[1] == 0x0606) {
1982 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1983 * Target side acked.
1985 data[0] = MBS_COMMAND_COMPLETE;
1988 data[0] = MBS_COMMAND_ERROR;
1990 case LSC_SCODE_NOXCB:
1991 vha->hw->exch_starvation++;
1992 if (vha->hw->exch_starvation > 5) {
1993 ql_log(ql_log_warn, vha, 0xd046,
1994 "Exchange starvation. Resetting RISC\n");
1996 vha->hw->exch_starvation = 0;
1998 if (IS_P3P_TYPE(vha->hw))
1999 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2001 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2002 qla2xxx_wake_dpc(vha);
2006 data[0] = MBS_COMMAND_ERROR;
2010 ql_dbg(ql_dbg_async, sp->vha, 0x5037,
2011 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2012 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2013 le16_to_cpu(logio->comp_status),
2014 le32_to_cpu(logio->io_parameter[0]),
2015 le32_to_cpu(logio->io_parameter[1]));
2022 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2024 const char func[] = "TMF-IOCB";
2028 struct srb_iocb *iocb;
2029 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2031 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2035 iocb = &sp->u.iocb_cmd;
2037 fcport = sp->fcport;
2038 iocb->u.tmf.data = QLA_SUCCESS;
2040 if (sts->entry_status) {
2041 ql_log(ql_log_warn, fcport->vha, 0x5038,
2042 "Async-%s error - hdl=%x entry-status(%x).\n",
2043 type, sp->handle, sts->entry_status);
2044 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2045 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2046 ql_log(ql_log_warn, fcport->vha, 0x5039,
2047 "Async-%s error - hdl=%x completion status(%x).\n",
2048 type, sp->handle, sts->comp_status);
2049 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2050 } else if ((le16_to_cpu(sts->scsi_status) &
2051 SS_RESPONSE_INFO_LEN_VALID)) {
2052 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2053 ql_log(ql_log_warn, fcport->vha, 0x503b,
2054 "Async-%s error - hdl=%x not enough response(%d).\n",
2055 type, sp->handle, sts->rsp_data_len);
2056 } else if (sts->data[3]) {
2057 ql_log(ql_log_warn, fcport->vha, 0x503c,
2058 "Async-%s error - hdl=%x response(%x).\n",
2059 type, sp->handle, sts->data[3]);
2060 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2064 if (iocb->u.tmf.data != QLA_SUCCESS)
2065 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2071 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2072 void *tsk, srb_t *sp)
2075 struct srb_iocb *iocb;
2076 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2077 uint16_t state_flags;
2078 struct nvmefc_fcp_req *fd;
2079 uint16_t ret = QLA_SUCCESS;
2080 __le16 comp_status = sts->comp_status;
2083 iocb = &sp->u.iocb_cmd;
2084 fcport = sp->fcport;
2085 iocb->u.nvme.comp_status = comp_status;
2086 state_flags = le16_to_cpu(sts->state_flags);
2087 fd = iocb->u.nvme.desc;
2089 if (unlikely(iocb->u.nvme.aen_op))
2090 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2092 if (unlikely(comp_status != CS_COMPLETE))
2095 fd->transferred_length = fd->payload_length -
2096 le32_to_cpu(sts->residual_len);
2099 * State flags: Bit 6 and 0.
2100 * If 0 is set, we don't care about 6.
2101 * both cases resp was dma'd to host buffer
2102 * if both are 0, that is good path case.
2103 * if six is set and 0 is clear, we need to
2104 * copy resp data from status iocb to resp buffer.
2106 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2107 iocb->u.nvme.rsp_pyld_len = 0;
2108 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2109 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2110 /* Response already DMA'd to fd->rspaddr. */
2111 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2112 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2114 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2117 iocb->u.nvme.rsp_pyld_len = 0;
2118 fd->transferred_length = 0;
2119 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2120 "Unexpected values in NVMe_RSP IU.\n");
2122 } else if (state_flags & SF_NVME_ERSP) {
2123 uint32_t *inbuf, *outbuf;
2126 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2127 outbuf = (uint32_t *)fd->rspaddr;
2128 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2129 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2130 sizeof(struct nvme_fc_ersp_iu))) {
2131 if (ql_mask_match(ql_dbg_io)) {
2132 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2133 iocb->u.nvme.rsp_pyld_len);
2134 ql_log(ql_log_warn, fcport->vha, 0x5100,
2135 "Unexpected response payload length %u.\n",
2136 iocb->u.nvme.rsp_pyld_len);
2138 iocb->u.nvme.rsp_pyld_len =
2139 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2141 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2142 for (; iter; iter--)
2143 *outbuf++ = swab32(*inbuf++);
2146 if (state_flags & SF_NVME_ERSP) {
2147 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2150 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2151 if (fd->transferred_length != tgt_xfer_len) {
2152 ql_dbg(ql_dbg_io, fcport->vha, 0x3079,
2153 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2154 tgt_xfer_len, fd->transferred_length);
2156 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2158 * Do not log if this is just an underflow and there
2165 if (unlikely(logit))
2166 ql_log(ql_log_warn, fcport->vha, 0x5060,
2167 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2168 sp->name, sp->handle, comp_status,
2169 fd->transferred_length, le32_to_cpu(sts->residual_len),
2173 * If transport error then Failure (HBA rejects request)
2174 * otherwise transport will handle.
2176 switch (le16_to_cpu(comp_status)) {
2181 case CS_PORT_UNAVAILABLE:
2182 case CS_PORT_LOGGED_OUT:
2183 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2187 fd->transferred_length = 0;
2188 iocb->u.nvme.rsp_pyld_len = 0;
2191 case CS_DATA_UNDERRUN:
2194 ret = QLA_FUNCTION_FAILED;
2200 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2201 struct vp_ctrl_entry_24xx *vce)
2203 const char func[] = "CTRLVP-IOCB";
2205 int rval = QLA_SUCCESS;
2207 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2211 if (vce->entry_status != 0) {
2212 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2213 "%s: Failed to complete IOCB -- error status (%x)\n",
2214 sp->name, vce->entry_status);
2215 rval = QLA_FUNCTION_FAILED;
2216 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2217 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2218 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2219 sp->name, le16_to_cpu(vce->comp_status),
2220 le16_to_cpu(vce->vp_idx_failed));
2221 rval = QLA_FUNCTION_FAILED;
2223 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2224 "Done %s.\n", __func__);
2231 /* Process a single response queue entry. */
2232 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2233 struct rsp_que *rsp,
2236 sts21_entry_t *sts21_entry;
2237 sts22_entry_t *sts22_entry;
2238 uint16_t handle_cnt;
2241 switch (pkt->entry_type) {
2243 qla2x00_status_entry(vha, rsp, pkt);
2245 case STATUS_TYPE_21:
2246 sts21_entry = (sts21_entry_t *)pkt;
2247 handle_cnt = sts21_entry->handle_count;
2248 for (cnt = 0; cnt < handle_cnt; cnt++)
2249 qla2x00_process_completed_request(vha, rsp->req,
2250 sts21_entry->handle[cnt]);
2252 case STATUS_TYPE_22:
2253 sts22_entry = (sts22_entry_t *)pkt;
2254 handle_cnt = sts22_entry->handle_count;
2255 for (cnt = 0; cnt < handle_cnt; cnt++)
2256 qla2x00_process_completed_request(vha, rsp->req,
2257 sts22_entry->handle[cnt]);
2259 case STATUS_CONT_TYPE:
2260 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2263 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2266 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2269 /* Type Not Supported. */
2270 ql_log(ql_log_warn, vha, 0x504a,
2271 "Received unknown response pkt type %x entry status=%x.\n",
2272 pkt->entry_type, pkt->entry_status);
2278 * qla2x00_process_response_queue() - Process response queue entries.
2279 * @rsp: response queue
2282 qla2x00_process_response_queue(struct rsp_que *rsp)
2284 struct scsi_qla_host *vha;
2285 struct qla_hw_data *ha = rsp->hw;
2286 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2289 vha = pci_get_drvdata(ha->pdev);
2291 if (!vha->flags.online)
2294 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2295 pkt = (sts_entry_t *)rsp->ring_ptr;
2298 if (rsp->ring_index == rsp->length) {
2299 rsp->ring_index = 0;
2300 rsp->ring_ptr = rsp->ring;
2305 if (pkt->entry_status != 0) {
2306 qla2x00_error_entry(vha, rsp, pkt);
2307 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2312 qla2x00_process_response_entry(vha, rsp, pkt);
2313 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2317 /* Adjust ring index */
2318 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2322 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2323 uint32_t sense_len, struct rsp_que *rsp, int res)
2325 struct scsi_qla_host *vha = sp->vha;
2326 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2327 uint32_t track_sense_len;
2329 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2330 sense_len = SCSI_SENSE_BUFFERSIZE;
2332 SET_CMD_SENSE_LEN(sp, sense_len);
2333 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2334 track_sense_len = sense_len;
2336 if (sense_len > par_sense_len)
2337 sense_len = par_sense_len;
2339 memcpy(cp->sense_buffer, sense_data, sense_len);
2341 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2342 track_sense_len -= sense_len;
2343 SET_CMD_SENSE_LEN(sp, track_sense_len);
2345 if (track_sense_len != 0) {
2346 rsp->status_srb = sp;
2351 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2352 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2353 sp->vha->host_no, cp->device->id, cp->device->lun,
2355 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2356 cp->sense_buffer, sense_len);
2360 struct scsi_dif_tuple {
2361 __be16 guard; /* Checksum */
2362 __be16 app_tag; /* APPL identifier */
2363 __be32 ref_tag; /* Target LBA or indirect LBA */
2367 * Checks the guard or meta-data for the type of error
2368 * detected by the HBA. In case of errors, we set the
2369 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2370 * to indicate to the kernel that the HBA detected error.
2373 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2375 struct scsi_qla_host *vha = sp->vha;
2376 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2377 uint8_t *ap = &sts24->data[12];
2378 uint8_t *ep = &sts24->data[20];
2379 uint32_t e_ref_tag, a_ref_tag;
2380 uint16_t e_app_tag, a_app_tag;
2381 uint16_t e_guard, a_guard;
2384 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2385 * would make guard field appear at offset 2
2387 a_guard = get_unaligned_le16(ap + 2);
2388 a_app_tag = get_unaligned_le16(ap + 0);
2389 a_ref_tag = get_unaligned_le32(ap + 4);
2390 e_guard = get_unaligned_le16(ep + 2);
2391 e_app_tag = get_unaligned_le16(ep + 0);
2392 e_ref_tag = get_unaligned_le32(ep + 4);
2394 ql_dbg(ql_dbg_io, vha, 0x3023,
2395 "iocb(s) %p Returned STATUS.\n", sts24);
2397 ql_dbg(ql_dbg_io, vha, 0x3024,
2398 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2399 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2400 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2401 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2402 a_app_tag, e_app_tag, a_guard, e_guard);
2406 * For type 3: ref & app tag is all 'f's
2407 * For type 0,1,2: app tag is all 'f's
2409 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2410 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2411 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2412 uint32_t blocks_done, resid;
2413 sector_t lba_s = scsi_get_lba(cmd);
2415 /* 2TB boundary case covered automatically with this */
2416 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2418 resid = scsi_bufflen(cmd) - (blocks_done *
2419 cmd->device->sector_size);
2421 scsi_set_resid(cmd, resid);
2422 cmd->result = DID_OK << 16;
2424 /* Update protection tag */
2425 if (scsi_prot_sg_count(cmd)) {
2426 uint32_t i, j = 0, k = 0, num_ent;
2427 struct scatterlist *sg;
2428 struct t10_pi_tuple *spt;
2430 /* Patch the corresponding protection tags */
2431 scsi_for_each_prot_sg(cmd, sg,
2432 scsi_prot_sg_count(cmd), i) {
2433 num_ent = sg_dma_len(sg) / 8;
2434 if (k + num_ent < blocks_done) {
2438 j = blocks_done - k - 1;
2443 if (k != blocks_done) {
2444 ql_log(ql_log_warn, vha, 0x302f,
2445 "unexpected tag values tag:lba=%x:%llx)\n",
2446 e_ref_tag, (unsigned long long)lba_s);
2450 spt = page_address(sg_page(sg)) + sg->offset;
2453 spt->app_tag = T10_PI_APP_ESCAPE;
2454 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2455 spt->ref_tag = T10_PI_REF_ESCAPE;
2462 if (e_guard != a_guard) {
2463 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2465 set_driver_byte(cmd, DRIVER_SENSE);
2466 set_host_byte(cmd, DID_ABORT);
2467 cmd->result |= SAM_STAT_CHECK_CONDITION;
2472 if (e_ref_tag != a_ref_tag) {
2473 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2475 set_driver_byte(cmd, DRIVER_SENSE);
2476 set_host_byte(cmd, DID_ABORT);
2477 cmd->result |= SAM_STAT_CHECK_CONDITION;
2481 /* check appl tag */
2482 if (e_app_tag != a_app_tag) {
2483 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2485 set_driver_byte(cmd, DRIVER_SENSE);
2486 set_host_byte(cmd, DID_ABORT);
2487 cmd->result |= SAM_STAT_CHECK_CONDITION;
2495 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2496 struct req_que *req, uint32_t index)
2498 struct qla_hw_data *ha = vha->hw;
2500 uint16_t comp_status;
2501 uint16_t scsi_status;
2503 uint32_t rval = EXT_STATUS_OK;
2504 struct bsg_job *bsg_job = NULL;
2505 struct fc_bsg_request *bsg_request;
2506 struct fc_bsg_reply *bsg_reply;
2507 sts_entry_t *sts = pkt;
2508 struct sts_entry_24xx *sts24 = pkt;
2510 /* Validate handle. */
2511 if (index >= req->num_outstanding_cmds) {
2512 ql_log(ql_log_warn, vha, 0x70af,
2513 "Invalid SCSI completion handle 0x%x.\n", index);
2514 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2518 sp = req->outstanding_cmds[index];
2520 ql_log(ql_log_warn, vha, 0x70b0,
2521 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2524 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2528 /* Free outstanding command slot. */
2529 req->outstanding_cmds[index] = NULL;
2530 bsg_job = sp->u.bsg_job;
2531 bsg_request = bsg_job->request;
2532 bsg_reply = bsg_job->reply;
2534 if (IS_FWI2_CAPABLE(ha)) {
2535 comp_status = le16_to_cpu(sts24->comp_status);
2536 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2538 comp_status = le16_to_cpu(sts->comp_status);
2539 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2542 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2543 switch (comp_status) {
2545 if (scsi_status == 0) {
2546 bsg_reply->reply_payload_rcv_len =
2547 bsg_job->reply_payload.payload_len;
2548 vha->qla_stats.input_bytes +=
2549 bsg_reply->reply_payload_rcv_len;
2550 vha->qla_stats.input_requests++;
2551 rval = EXT_STATUS_OK;
2555 case CS_DATA_OVERRUN:
2556 ql_dbg(ql_dbg_user, vha, 0x70b1,
2557 "Command completed with data overrun thread_id=%d\n",
2559 rval = EXT_STATUS_DATA_OVERRUN;
2562 case CS_DATA_UNDERRUN:
2563 ql_dbg(ql_dbg_user, vha, 0x70b2,
2564 "Command completed with data underrun thread_id=%d\n",
2566 rval = EXT_STATUS_DATA_UNDERRUN;
2568 case CS_BIDIR_RD_OVERRUN:
2569 ql_dbg(ql_dbg_user, vha, 0x70b3,
2570 "Command completed with read data overrun thread_id=%d\n",
2572 rval = EXT_STATUS_DATA_OVERRUN;
2575 case CS_BIDIR_RD_WR_OVERRUN:
2576 ql_dbg(ql_dbg_user, vha, 0x70b4,
2577 "Command completed with read and write data overrun "
2578 "thread_id=%d\n", thread_id);
2579 rval = EXT_STATUS_DATA_OVERRUN;
2582 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2583 ql_dbg(ql_dbg_user, vha, 0x70b5,
2584 "Command completed with read data over and write data "
2585 "underrun thread_id=%d\n", thread_id);
2586 rval = EXT_STATUS_DATA_OVERRUN;
2589 case CS_BIDIR_RD_UNDERRUN:
2590 ql_dbg(ql_dbg_user, vha, 0x70b6,
2591 "Command completed with read data underrun "
2592 "thread_id=%d\n", thread_id);
2593 rval = EXT_STATUS_DATA_UNDERRUN;
2596 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2597 ql_dbg(ql_dbg_user, vha, 0x70b7,
2598 "Command completed with read data under and write data "
2599 "overrun thread_id=%d\n", thread_id);
2600 rval = EXT_STATUS_DATA_UNDERRUN;
2603 case CS_BIDIR_RD_WR_UNDERRUN:
2604 ql_dbg(ql_dbg_user, vha, 0x70b8,
2605 "Command completed with read and write data underrun "
2606 "thread_id=%d\n", thread_id);
2607 rval = EXT_STATUS_DATA_UNDERRUN;
2611 ql_dbg(ql_dbg_user, vha, 0x70b9,
2612 "Command completed with data DMA error thread_id=%d\n",
2614 rval = EXT_STATUS_DMA_ERR;
2618 ql_dbg(ql_dbg_user, vha, 0x70ba,
2619 "Command completed with timeout thread_id=%d\n",
2621 rval = EXT_STATUS_TIMEOUT;
2624 ql_dbg(ql_dbg_user, vha, 0x70bb,
2625 "Command completed with completion status=0x%x "
2626 "thread_id=%d\n", comp_status, thread_id);
2627 rval = EXT_STATUS_ERR;
2630 bsg_reply->reply_payload_rcv_len = 0;
2633 /* Return the vendor specific reply to API */
2634 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2635 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2636 /* Always return DID_OK, bsg will send the vendor specific response
2637 * in this case only */
2638 sp->done(sp, DID_OK << 16);
2643 * qla2x00_status_entry() - Process a Status IOCB entry.
2644 * @vha: SCSI driver HA context
2645 * @rsp: response queue
2646 * @pkt: Entry pointer
2649 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2653 struct scsi_cmnd *cp;
2654 sts_entry_t *sts = pkt;
2655 struct sts_entry_24xx *sts24 = pkt;
2656 uint16_t comp_status;
2657 uint16_t scsi_status;
2659 uint8_t lscsi_status;
2661 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2663 uint8_t *rsp_info, *sense_data;
2664 struct qla_hw_data *ha = vha->hw;
2667 struct req_que *req;
2670 uint16_t state_flags = 0;
2671 uint16_t retry_delay = 0;
2673 if (IS_FWI2_CAPABLE(ha)) {
2674 comp_status = le16_to_cpu(sts24->comp_status);
2675 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2676 state_flags = le16_to_cpu(sts24->state_flags);
2678 comp_status = le16_to_cpu(sts->comp_status);
2679 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2681 handle = (uint32_t) LSW(sts->handle);
2682 que = MSW(sts->handle);
2683 req = ha->req_q_map[que];
2685 /* Check for invalid queue pointer */
2687 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2688 ql_dbg(ql_dbg_io, vha, 0x3059,
2689 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2690 "que=%u.\n", sts->handle, req, que);
2694 /* Validate handle. */
2695 if (handle < req->num_outstanding_cmds) {
2696 sp = req->outstanding_cmds[handle];
2698 ql_dbg(ql_dbg_io, vha, 0x3075,
2699 "%s(%ld): Already returned command for status handle (0x%x).\n",
2700 __func__, vha->host_no, sts->handle);
2704 ql_dbg(ql_dbg_io, vha, 0x3017,
2705 "Invalid status handle, out of range (0x%x).\n",
2708 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2709 if (IS_P3P_TYPE(ha))
2710 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2713 qla2xxx_wake_dpc(vha);
2718 if (sp->cmd_type != TYPE_SRB) {
2719 req->outstanding_cmds[handle] = NULL;
2720 ql_dbg(ql_dbg_io, vha, 0x3015,
2721 "Unknown sp->cmd_type %x %p).\n",
2726 /* NVME completion. */
2727 if (sp->type == SRB_NVME_CMD) {
2728 req->outstanding_cmds[handle] = NULL;
2729 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2733 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2734 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2738 /* Task Management completion. */
2739 if (sp->type == SRB_TM_CMD) {
2740 qla24xx_tm_iocb_entry(vha, req, pkt);
2744 /* Fast path completion. */
2745 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2746 qla2x00_process_completed_request(vha, req, handle);
2751 req->outstanding_cmds[handle] = NULL;
2752 cp = GET_CMD_SP(sp);
2754 ql_dbg(ql_dbg_io, vha, 0x3018,
2755 "Command already returned (0x%x/%p).\n",
2761 lscsi_status = scsi_status & STATUS_MASK;
2763 fcport = sp->fcport;
2766 sense_len = par_sense_len = rsp_info_len = resid_len =
2768 if (IS_FWI2_CAPABLE(ha)) {
2769 u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
2771 if (scsi_status & SS_SENSE_LEN_VALID)
2772 sense_len = le32_to_cpu(sts24->sense_len);
2773 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2774 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2775 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2776 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2777 if (comp_status == CS_DATA_UNDERRUN)
2778 fw_resid_len = le32_to_cpu(sts24->residual_len);
2779 rsp_info = sts24->data;
2780 sense_data = sts24->data;
2781 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2782 ox_id = le16_to_cpu(sts24->ox_id);
2783 par_sense_len = sizeof(sts24->data);
2784 /* Valid values of the retry delay timer are 0x1-0xffef */
2785 if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
2786 retry_delay = sts24_retry_delay & 0x3fff;
2787 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2788 "%s: scope=%#x retry_delay=%#x\n", __func__,
2789 sts24_retry_delay >> 14, retry_delay);
2792 if (scsi_status & SS_SENSE_LEN_VALID)
2793 sense_len = le16_to_cpu(sts->req_sense_length);
2794 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2795 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2796 resid_len = le32_to_cpu(sts->residual_length);
2797 rsp_info = sts->rsp_info;
2798 sense_data = sts->req_sense_data;
2799 par_sense_len = sizeof(sts->req_sense_data);
2802 /* Check for any FCP transport errors. */
2803 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2804 /* Sense data lies beyond any FCP RESPONSE data. */
2805 if (IS_FWI2_CAPABLE(ha)) {
2806 sense_data += rsp_info_len;
2807 par_sense_len -= rsp_info_len;
2809 if (rsp_info_len > 3 && rsp_info[3]) {
2810 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2811 "FCP I/O protocol failure (0x%x/0x%x).\n",
2812 rsp_info_len, rsp_info[3]);
2814 res = DID_BUS_BUSY << 16;
2819 /* Check for overrun. */
2820 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2821 scsi_status & SS_RESIDUAL_OVER)
2822 comp_status = CS_DATA_OVERRUN;
2825 * Check retry_delay_timer value if we receive a busy or
2828 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2829 lscsi_status == SAM_STAT_BUSY)
2830 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2833 * Based on Host and scsi status generate status code for Linux
2835 switch (comp_status) {
2838 if (scsi_status == 0) {
2842 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2844 scsi_set_resid(cp, resid);
2846 if (!lscsi_status &&
2847 ((unsigned)(scsi_bufflen(cp) - resid) <
2849 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2850 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2851 resid, scsi_bufflen(cp));
2853 res = DID_ERROR << 16;
2857 res = DID_OK << 16 | lscsi_status;
2859 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2860 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2861 "QUEUE FULL detected.\n");
2865 if (lscsi_status != SS_CHECK_CONDITION)
2868 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2869 if (!(scsi_status & SS_SENSE_LEN_VALID))
2872 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2876 case CS_DATA_UNDERRUN:
2877 /* Use F/W calculated residual length. */
2878 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2879 scsi_set_resid(cp, resid);
2880 if (scsi_status & SS_RESIDUAL_UNDER) {
2881 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2882 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2883 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2884 resid, scsi_bufflen(cp));
2886 res = DID_ERROR << 16 | lscsi_status;
2887 goto check_scsi_status;
2890 if (!lscsi_status &&
2891 ((unsigned)(scsi_bufflen(cp) - resid) <
2893 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2894 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2895 resid, scsi_bufflen(cp));
2897 res = DID_ERROR << 16;
2900 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2901 lscsi_status != SAM_STAT_BUSY) {
2903 * scsi status of task set and busy are considered to be
2904 * task not completed.
2907 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2908 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2909 resid, scsi_bufflen(cp));
2911 res = DID_ERROR << 16 | lscsi_status;
2912 goto check_scsi_status;
2914 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2915 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2916 scsi_status, lscsi_status);
2919 res = DID_OK << 16 | lscsi_status;
2924 * Check to see if SCSI Status is non zero. If so report SCSI
2927 if (lscsi_status != 0) {
2928 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2929 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2930 "QUEUE FULL detected.\n");
2934 if (lscsi_status != SS_CHECK_CONDITION)
2937 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2938 if (!(scsi_status & SS_SENSE_LEN_VALID))
2941 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2942 sense_len, rsp, res);
2946 case CS_PORT_LOGGED_OUT:
2947 case CS_PORT_CONFIG_CHG:
2950 case CS_PORT_UNAVAILABLE:
2955 * We are going to have the fc class block the rport
2956 * while we try to recover so instruct the mid layer
2957 * to requeue until the class decides how to handle this.
2959 res = DID_TRANSPORT_DISRUPTED << 16;
2961 if (comp_status == CS_TIMEOUT) {
2962 if (IS_FWI2_CAPABLE(ha))
2964 else if ((le16_to_cpu(sts->status_flags) &
2965 SF_LOGOUT_SENT) == 0)
2969 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2970 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2971 "Port to be marked lost on fcport=%02x%02x%02x, current "
2972 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2973 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2974 port_state_str[FCS_ONLINE],
2977 qlt_schedule_sess_for_deletion(fcport);
2983 res = DID_RESET << 16;
2987 logit = qla2x00_handle_dif_error(sp, sts24);
2992 res = DID_ERROR << 16;
2994 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2997 if (state_flags & BIT_4)
2998 scmd_printk(KERN_WARNING, cp,
2999 "Unsupported device '%s' found.\n",
3000 cp->device->vendor);
3004 ql_log(ql_log_info, fcport->vha, 0x3022,
3005 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3006 comp_status, scsi_status, res, vha->host_no,
3007 cp->device->id, cp->device->lun, fcport->d_id.b24,
3008 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3009 resid_len, fw_resid_len, sp, cp);
3010 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3011 pkt, sizeof(*sts24));
3012 res = DID_ERROR << 16;
3015 res = DID_ERROR << 16;
3021 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
3022 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
3023 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
3024 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3025 comp_status, scsi_status, res, vha->host_no,
3026 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3027 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3028 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3029 resid_len, fw_resid_len, sp, cp);
3031 if (rsp->status_srb == NULL)
3036 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3037 * @rsp: response queue
3038 * @pkt: Entry pointer
3040 * Extended sense data.
3043 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3045 uint8_t sense_sz = 0;
3046 struct qla_hw_data *ha = rsp->hw;
3047 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3048 srb_t *sp = rsp->status_srb;
3049 struct scsi_cmnd *cp;
3053 if (!sp || !GET_CMD_SENSE_LEN(sp))
3056 sense_len = GET_CMD_SENSE_LEN(sp);
3057 sense_ptr = GET_CMD_SENSE_PTR(sp);
3059 cp = GET_CMD_SP(sp);
3061 ql_log(ql_log_warn, vha, 0x3025,
3062 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3064 rsp->status_srb = NULL;
3068 if (sense_len > sizeof(pkt->data))
3069 sense_sz = sizeof(pkt->data);
3071 sense_sz = sense_len;
3073 /* Move sense data. */
3074 if (IS_FWI2_CAPABLE(ha))
3075 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3076 memcpy(sense_ptr, pkt->data, sense_sz);
3077 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3078 sense_ptr, sense_sz);
3080 sense_len -= sense_sz;
3081 sense_ptr += sense_sz;
3083 SET_CMD_SENSE_PTR(sp, sense_ptr);
3084 SET_CMD_SENSE_LEN(sp, sense_len);
3086 /* Place command on done queue. */
3087 if (sense_len == 0) {
3088 rsp->status_srb = NULL;
3089 sp->done(sp, cp->result);
3094 * qla2x00_error_entry() - Process an error entry.
3095 * @vha: SCSI driver HA context
3096 * @rsp: response queue
3097 * @pkt: Entry pointer
3098 * return : 1=allow further error analysis. 0=no additional error analysis.
3101 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3104 struct qla_hw_data *ha = vha->hw;
3105 const char func[] = "ERROR-IOCB";
3106 uint16_t que = MSW(pkt->handle);
3107 struct req_que *req = NULL;
3108 int res = DID_ERROR << 16;
3110 ql_dbg(ql_dbg_async, vha, 0x502a,
3111 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3112 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3114 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3117 req = ha->req_q_map[que];
3119 if (pkt->entry_status & RF_BUSY)
3120 res = DID_BUS_BUSY << 16;
3122 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3125 switch (pkt->entry_type) {
3126 case NOTIFY_ACK_TYPE:
3128 case STATUS_CONT_TYPE:
3129 case LOGINOUT_PORT_IOCB_TYPE:
3132 case ABORT_IOCB_TYPE:
3135 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3142 case ABTS_RESP_24XX:
3148 ql_log(ql_log_warn, vha, 0x5030,
3149 "Error entry - invalid handle/queue (%04x).\n", que);
3154 * qla24xx_mbx_completion() - Process mailbox command completions.
3155 * @vha: SCSI driver HA context
3156 * @mb0: Mailbox0 register
3159 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3163 __le16 __iomem *wptr;
3164 struct qla_hw_data *ha = vha->hw;
3165 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3167 /* Read all mbox registers? */
3168 WARN_ON_ONCE(ha->mbx_count > 32);
3169 mboxes = (1ULL << ha->mbx_count) - 1;
3171 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3173 mboxes = ha->mcp->in_mb;
3175 /* Load return mailbox registers. */
3176 ha->flags.mbox_int = 1;
3177 ha->mailbox_out[0] = mb0;
3179 wptr = ®->mailbox1;
3181 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3183 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3191 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3192 struct abort_entry_24xx *pkt)
3194 const char func[] = "ABT_IOCB";
3196 struct srb_iocb *abt;
3198 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3202 abt = &sp->u.iocb_cmd;
3203 abt->u.abt.comp_status = pkt->nport_handle;
3207 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3208 struct pt_ls4_request *pkt, struct req_que *req)
3211 const char func[] = "LS4_IOCB";
3212 uint16_t comp_status;
3214 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3218 comp_status = le16_to_cpu(pkt->status);
3219 sp->done(sp, comp_status);
3223 * qla24xx_process_response_queue() - Process response queue entries.
3224 * @vha: SCSI driver HA context
3225 * @rsp: response queue
3227 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3228 struct rsp_que *rsp)
3230 struct sts_entry_24xx *pkt;
3231 struct qla_hw_data *ha = vha->hw;
3233 if (!ha->flags.fw_started)
3236 if (rsp->qpair->cpuid != smp_processor_id())
3237 qla_cpu_update(rsp->qpair, smp_processor_id());
3239 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3240 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3243 if (rsp->ring_index == rsp->length) {
3244 rsp->ring_index = 0;
3245 rsp->ring_ptr = rsp->ring;
3250 if (pkt->entry_status != 0) {
3251 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3254 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3260 switch (pkt->entry_type) {
3262 qla2x00_status_entry(vha, rsp, pkt);
3264 case STATUS_CONT_TYPE:
3265 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3267 case VP_RPT_ID_IOCB_TYPE:
3268 qla24xx_report_id_acquisition(vha,
3269 (struct vp_rpt_id_entry_24xx *)pkt);
3271 case LOGINOUT_PORT_IOCB_TYPE:
3272 qla24xx_logio_entry(vha, rsp->req,
3273 (struct logio_entry_24xx *)pkt);
3276 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3279 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3281 case ABTS_RECV_24XX:
3282 if (qla_ini_mode_enabled(vha)) {
3283 qla24xx_purex_iocb(vha, pkt,
3284 qla24xx_process_abts);
3287 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3289 /* ensure that the ATIO queue is empty */
3290 qlt_handle_abts_recv(vha, rsp,
3294 qlt_24xx_process_atio_queue(vha, 1);
3297 case ABTS_RESP_24XX:
3300 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3302 case PT_LS4_REQUEST:
3303 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3306 case NOTIFY_ACK_TYPE:
3307 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3308 qlt_response_pkt_all_vps(vha, rsp,
3311 qla24xxx_nack_iocb_entry(vha, rsp->req,
3312 (struct nack_to_isp *)pkt);
3315 /* Do nothing in this case, this check is to prevent it
3316 * from falling into default case
3319 case ABORT_IOCB_TYPE:
3320 qla24xx_abort_iocb_entry(vha, rsp->req,
3321 (struct abort_entry_24xx *)pkt);
3324 qla24xx_mbx_iocb_entry(vha, rsp->req,
3325 (struct mbx_24xx_entry *)pkt);
3327 case VP_CTRL_IOCB_TYPE:
3328 qla_ctrlvp_completed(vha, rsp->req,
3329 (struct vp_ctrl_entry_24xx *)pkt);
3331 case PUREX_IOCB_TYPE:
3333 struct purex_entry_24xx *purex = (void *)pkt;
3335 if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) {
3336 ql_dbg(ql_dbg_init, vha, 0x5091,
3337 "Discarding ELS Request opcode %#x...\n",
3338 purex->els_frame_payload[3]);
3341 qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp);
3345 /* Type Not Supported. */
3346 ql_dbg(ql_dbg_async, vha, 0x5042,
3347 "Received unknown response pkt type %x "
3348 "entry status=%x.\n",
3349 pkt->entry_type, pkt->entry_status);
3352 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3356 /* Adjust ring index */
3357 if (IS_P3P_TYPE(ha)) {
3358 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3360 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3362 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3367 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3371 struct qla_hw_data *ha = vha->hw;
3372 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3374 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3375 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3379 wrt_reg_dword(®->iobase_addr, 0x7C00);
3380 rd_reg_dword(®->iobase_addr);
3381 wrt_reg_dword(®->iobase_window, 0x0001);
3382 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3383 rval == QLA_SUCCESS; cnt--) {
3385 wrt_reg_dword(®->iobase_window, 0x0001);
3388 rval = QLA_FUNCTION_TIMEOUT;
3390 if (rval == QLA_SUCCESS)
3394 wrt_reg_dword(®->iobase_window, 0x0003);
3395 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3396 rval == QLA_SUCCESS; cnt--) {
3398 wrt_reg_dword(®->iobase_window, 0x0003);
3401 rval = QLA_FUNCTION_TIMEOUT;
3403 if (rval != QLA_SUCCESS)
3407 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3408 ql_log(ql_log_info, vha, 0x504c,
3409 "Additional code -- 0x55AA.\n");
3412 wrt_reg_dword(®->iobase_window, 0x0000);
3413 rd_reg_dword(®->iobase_window);
3417 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3418 * @irq: interrupt number
3419 * @dev_id: SCSI driver HA context
3421 * Called by system whenever the host adapter generates an interrupt.
3423 * Returns handled flag.
3426 qla24xx_intr_handler(int irq, void *dev_id)
3428 scsi_qla_host_t *vha;
3429 struct qla_hw_data *ha;
3430 struct device_reg_24xx __iomem *reg;
3436 struct rsp_que *rsp;
3437 unsigned long flags;
3438 bool process_atio = false;
3440 rsp = (struct rsp_que *) dev_id;
3442 ql_log(ql_log_info, NULL, 0x5059,
3443 "%s: NULL response queue pointer.\n", __func__);
3448 reg = &ha->iobase->isp24;
3451 if (unlikely(pci_channel_offline(ha->pdev)))
3454 spin_lock_irqsave(&ha->hardware_lock, flags);
3455 vha = pci_get_drvdata(ha->pdev);
3456 for (iter = 50; iter--; ) {
3457 stat = rd_reg_dword(®->host_status);
3458 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3460 if (stat & HSRX_RISC_PAUSED) {
3461 if (unlikely(pci_channel_offline(ha->pdev)))
3464 hccr = rd_reg_dword(®->hccr);
3466 ql_log(ql_log_warn, vha, 0x504b,
3467 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3470 qla2xxx_check_risc_status(vha);
3472 ha->isp_ops->fw_dump(vha);
3473 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3475 } else if ((stat & HSRX_RISC_INT) == 0)
3478 switch (stat & 0xff) {
3479 case INTR_ROM_MB_SUCCESS:
3480 case INTR_ROM_MB_FAILED:
3481 case INTR_MB_SUCCESS:
3482 case INTR_MB_FAILED:
3483 qla24xx_mbx_completion(vha, MSW(stat));
3484 status |= MBX_INTERRUPT;
3487 case INTR_ASYNC_EVENT:
3489 mb[1] = rd_reg_word(®->mailbox1);
3490 mb[2] = rd_reg_word(®->mailbox2);
3491 mb[3] = rd_reg_word(®->mailbox3);
3492 qla2x00_async_event(vha, rsp, mb);
3494 case INTR_RSP_QUE_UPDATE:
3495 case INTR_RSP_QUE_UPDATE_83XX:
3496 qla24xx_process_response_queue(vha, rsp);
3498 case INTR_ATIO_QUE_UPDATE_27XX:
3499 case INTR_ATIO_QUE_UPDATE:
3500 process_atio = true;
3502 case INTR_ATIO_RSP_QUE_UPDATE:
3503 process_atio = true;
3504 qla24xx_process_response_queue(vha, rsp);
3507 ql_dbg(ql_dbg_async, vha, 0x504f,
3508 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3511 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3512 rd_reg_dword_relaxed(®->hccr);
3513 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3516 qla2x00_handle_mbx_completion(ha, status);
3517 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3520 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3521 qlt_24xx_process_atio_queue(vha, 0);
3522 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3529 qla24xx_msix_rsp_q(int irq, void *dev_id)
3531 struct qla_hw_data *ha;
3532 struct rsp_que *rsp;
3533 struct device_reg_24xx __iomem *reg;
3534 struct scsi_qla_host *vha;
3535 unsigned long flags;
3537 rsp = (struct rsp_que *) dev_id;
3539 ql_log(ql_log_info, NULL, 0x505a,
3540 "%s: NULL response queue pointer.\n", __func__);
3544 reg = &ha->iobase->isp24;
3546 spin_lock_irqsave(&ha->hardware_lock, flags);
3548 vha = pci_get_drvdata(ha->pdev);
3549 qla24xx_process_response_queue(vha, rsp);
3550 if (!ha->flags.disable_msix_handshake) {
3551 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3552 rd_reg_dword_relaxed(®->hccr);
3554 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3560 qla24xx_msix_default(int irq, void *dev_id)
3562 scsi_qla_host_t *vha;
3563 struct qla_hw_data *ha;
3564 struct rsp_que *rsp;
3565 struct device_reg_24xx __iomem *reg;
3570 unsigned long flags;
3571 bool process_atio = false;
3573 rsp = (struct rsp_que *) dev_id;
3575 ql_log(ql_log_info, NULL, 0x505c,
3576 "%s: NULL response queue pointer.\n", __func__);
3580 reg = &ha->iobase->isp24;
3583 spin_lock_irqsave(&ha->hardware_lock, flags);
3584 vha = pci_get_drvdata(ha->pdev);
3586 stat = rd_reg_dword(®->host_status);
3587 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3589 if (stat & HSRX_RISC_PAUSED) {
3590 if (unlikely(pci_channel_offline(ha->pdev)))
3593 hccr = rd_reg_dword(®->hccr);
3595 ql_log(ql_log_info, vha, 0x5050,
3596 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3599 qla2xxx_check_risc_status(vha);
3601 ha->isp_ops->fw_dump(vha);
3602 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3604 } else if ((stat & HSRX_RISC_INT) == 0)
3607 switch (stat & 0xff) {
3608 case INTR_ROM_MB_SUCCESS:
3609 case INTR_ROM_MB_FAILED:
3610 case INTR_MB_SUCCESS:
3611 case INTR_MB_FAILED:
3612 qla24xx_mbx_completion(vha, MSW(stat));
3613 status |= MBX_INTERRUPT;
3616 case INTR_ASYNC_EVENT:
3618 mb[1] = rd_reg_word(®->mailbox1);
3619 mb[2] = rd_reg_word(®->mailbox2);
3620 mb[3] = rd_reg_word(®->mailbox3);
3621 qla2x00_async_event(vha, rsp, mb);
3623 case INTR_RSP_QUE_UPDATE:
3624 case INTR_RSP_QUE_UPDATE_83XX:
3625 qla24xx_process_response_queue(vha, rsp);
3627 case INTR_ATIO_QUE_UPDATE_27XX:
3628 case INTR_ATIO_QUE_UPDATE:
3629 process_atio = true;
3631 case INTR_ATIO_RSP_QUE_UPDATE:
3632 process_atio = true;
3633 qla24xx_process_response_queue(vha, rsp);
3636 ql_dbg(ql_dbg_async, vha, 0x5051,
3637 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3640 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3642 qla2x00_handle_mbx_completion(ha, status);
3643 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3646 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3647 qlt_24xx_process_atio_queue(vha, 0);
3648 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3655 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3657 struct qla_hw_data *ha;
3658 struct qla_qpair *qpair;
3662 ql_log(ql_log_info, NULL, 0x505b,
3663 "%s: NULL response queue pointer.\n", __func__);
3668 queue_work(ha->wq, &qpair->q_work);
3674 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
3676 struct qla_hw_data *ha;
3677 struct qla_qpair *qpair;
3678 struct device_reg_24xx __iomem *reg;
3679 unsigned long flags;
3683 ql_log(ql_log_info, NULL, 0x505b,
3684 "%s: NULL response queue pointer.\n", __func__);
3689 reg = &ha->iobase->isp24;
3690 spin_lock_irqsave(&ha->hardware_lock, flags);
3691 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3692 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3694 queue_work(ha->wq, &qpair->q_work);
3699 /* Interrupt handling helpers. */
3701 struct qla_init_msix_entry {
3703 irq_handler_t handler;
3706 static const struct qla_init_msix_entry msix_entries[] = {
3707 { "default", qla24xx_msix_default },
3708 { "rsp_q", qla24xx_msix_rsp_q },
3709 { "atio_q", qla83xx_msix_atio_q },
3710 { "qpair_multiq", qla2xxx_msix_rsp_q },
3711 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
3714 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3715 { "qla2xxx (default)", qla82xx_msix_default },
3716 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3720 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3723 struct qla_msix_entry *qentry;
3724 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3725 int min_vecs = QLA_BASE_VECTORS;
3726 struct irq_affinity desc = {
3727 .pre_vectors = QLA_BASE_VECTORS,
3730 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3731 IS_ATIO_MSIX_CAPABLE(ha)) {
3736 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
3737 /* user wants to control IRQ setting for target mode */
3738 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3739 ha->msix_count, PCI_IRQ_MSIX);
3741 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3742 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3746 ql_log(ql_log_fatal, vha, 0x00c7,
3747 "MSI-X: Failed to enable support, "
3748 "giving up -- %d/%d.\n",
3749 ha->msix_count, ret);
3751 } else if (ret < ha->msix_count) {
3752 ql_log(ql_log_info, vha, 0x00c6,
3753 "MSI-X: Using %d vectors\n", ret);
3754 ha->msix_count = ret;
3755 /* Recalculate queue values */
3756 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3757 ha->max_req_queues = ha->msix_count - 1;
3759 /* ATIOQ needs 1 vector. That's 1 less QPair */
3760 if (QLA_TGT_MODE_ENABLED())
3761 ha->max_req_queues--;
3763 ha->max_rsp_queues = ha->max_req_queues;
3765 ha->max_qpairs = ha->max_req_queues - 1;
3766 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3767 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3770 vha->irq_offset = desc.pre_vectors;
3771 ha->msix_entries = kcalloc(ha->msix_count,
3772 sizeof(struct qla_msix_entry),
3774 if (!ha->msix_entries) {
3775 ql_log(ql_log_fatal, vha, 0x00c8,
3776 "Failed to allocate memory for ha->msix_entries.\n");
3780 ha->flags.msix_enabled = 1;
3782 for (i = 0; i < ha->msix_count; i++) {
3783 qentry = &ha->msix_entries[i];
3784 qentry->vector = pci_irq_vector(ha->pdev, i);
3786 qentry->have_irq = 0;
3788 qentry->handle = NULL;
3791 /* Enable MSI-X vectors for the base queue */
3792 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3793 qentry = &ha->msix_entries[i];
3794 qentry->handle = rsp;
3796 scnprintf(qentry->name, sizeof(qentry->name),
3797 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3798 if (IS_P3P_TYPE(ha))
3799 ret = request_irq(qentry->vector,
3800 qla82xx_msix_entries[i].handler,
3801 0, qla82xx_msix_entries[i].name, rsp);
3803 ret = request_irq(qentry->vector,
3804 msix_entries[i].handler,
3805 0, qentry->name, rsp);
3807 goto msix_register_fail;
3808 qentry->have_irq = 1;
3813 * If target mode is enable, also request the vector for the ATIO
3816 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3817 IS_ATIO_MSIX_CAPABLE(ha)) {
3818 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3820 qentry->handle = rsp;
3821 scnprintf(qentry->name, sizeof(qentry->name),
3822 "qla2xxx%lu_%s", vha->host_no,
3823 msix_entries[QLA_ATIO_VECTOR].name);
3825 ret = request_irq(qentry->vector,
3826 msix_entries[QLA_ATIO_VECTOR].handler,
3827 0, qentry->name, rsp);
3828 qentry->have_irq = 1;
3833 ql_log(ql_log_fatal, vha, 0x00cb,
3834 "MSI-X: unable to register handler -- %x/%d.\n",
3835 qentry->vector, ret);
3836 qla2x00_free_irqs(vha);
3841 /* Enable MSI-X vector for response queue update for queue 0 */
3842 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3843 if (ha->msixbase && ha->mqiobase &&
3844 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3849 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3852 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3853 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3854 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3855 ql_dbg(ql_dbg_init, vha, 0x0055,
3856 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3857 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3863 pci_free_irq_vectors(ha->pdev);
3868 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3870 int ret = QLA_FUNCTION_FAILED;
3871 device_reg_t *reg = ha->iobase;
3872 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3874 /* If possible, enable MSI-X. */
3875 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3876 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3877 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
3880 if (ql2xenablemsix == 2)
3883 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3884 (ha->pdev->subsystem_device == 0x7040 ||
3885 ha->pdev->subsystem_device == 0x7041 ||
3886 ha->pdev->subsystem_device == 0x1705)) {
3887 ql_log(ql_log_warn, vha, 0x0034,
3888 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3889 ha->pdev->subsystem_vendor,
3890 ha->pdev->subsystem_device);
3894 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3895 ql_log(ql_log_warn, vha, 0x0035,
3896 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3897 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3901 ret = qla24xx_enable_msix(ha, rsp);
3903 ql_dbg(ql_dbg_init, vha, 0x0036,
3904 "MSI-X: Enabled (0x%X, 0x%X).\n",
3905 ha->chip_revision, ha->fw_attributes);
3906 goto clear_risc_ints;
3911 ql_log(ql_log_info, vha, 0x0037,
3912 "Falling back-to MSI mode -- ret=%d.\n", ret);
3914 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3915 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3916 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3919 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3921 ql_dbg(ql_dbg_init, vha, 0x0038,
3923 ha->flags.msi_enabled = 1;
3925 ql_log(ql_log_warn, vha, 0x0039,
3926 "Falling back-to INTa mode -- ret=%d.\n", ret);
3929 /* Skip INTx on ISP82xx. */
3930 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3931 return QLA_FUNCTION_FAILED;
3933 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3934 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3935 QLA2XXX_DRIVER_NAME, rsp);
3937 ql_log(ql_log_warn, vha, 0x003a,
3938 "Failed to reserve interrupt %d already in use.\n",
3941 } else if (!ha->flags.msi_enabled) {
3942 ql_dbg(ql_dbg_init, vha, 0x0125,
3943 "INTa mode: Enabled.\n");
3944 ha->flags.mr_intr_valid = 1;
3948 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3951 spin_lock_irq(&ha->hardware_lock);
3952 wrt_reg_word(®->isp.semaphore, 0);
3953 spin_unlock_irq(&ha->hardware_lock);
3960 qla2x00_free_irqs(scsi_qla_host_t *vha)
3962 struct qla_hw_data *ha = vha->hw;
3963 struct rsp_que *rsp;
3964 struct qla_msix_entry *qentry;
3968 * We need to check that ha->rsp_q_map is valid in case we are called
3969 * from a probe failure context.
3971 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3973 rsp = ha->rsp_q_map[0];
3975 if (ha->flags.msix_enabled) {
3976 for (i = 0; i < ha->msix_count; i++) {
3977 qentry = &ha->msix_entries[i];
3978 if (qentry->have_irq) {
3979 irq_set_affinity_notifier(qentry->vector, NULL);
3980 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3983 kfree(ha->msix_entries);
3984 ha->msix_entries = NULL;
3985 ha->flags.msix_enabled = 0;
3986 ql_dbg(ql_dbg_init, vha, 0x0042,
3987 "Disabled MSI-X.\n");
3989 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3993 pci_free_irq_vectors(ha->pdev);
3996 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3997 struct qla_msix_entry *msix, int vector_type)
3999 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4000 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4003 scnprintf(msix->name, sizeof(msix->name),
4004 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4005 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4007 ql_log(ql_log_fatal, vha, 0x00e6,
4008 "MSI-X: Unable to register handler -- %x/%d.\n",
4013 msix->handle = qpair;