2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29 * @dev_id: SCSI driver HA context
31 * Called by system whenever the host adapter generates an interrupt.
33 * Returns handled flag.
36 qla2100_intr_handler(int irq, void *dev_id)
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
48 rsp = (struct rsp_que *) dev_id;
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
56 reg = &ha->iobase->isp;
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
70 * Issue a "HARD" reset in order for the RISC interrupt
71 * bit to be cleared. Schedule a big hammer to get
72 * out of the RISC PAUSED state.
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
87 /* Get mailbox data. */
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
103 /* Release mailbox registers. */
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
107 qla2x00_process_response_queue(rsp);
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
116 return (IRQ_HANDLED);
120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 /* Check for PCI disconnection */
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128 * Schedule this (only once) on the default system
129 * workqueue so that all the adapter workqueues and the
130 * DPC thread can be shutdown cleanly.
132 schedule_work(&vha->hw->board_disable);
140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
148 * @dev_id: SCSI driver HA context
150 * Called by system whenever the host adapter generates an interrupt.
152 * Returns handled flag.
155 qla2300_intr_handler(int irq, void *dev_id)
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
165 struct qla_hw_data *ha;
168 rsp = (struct rsp_que *) dev_id;
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
176 reg = &ha->iobase->isp;
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
189 hccr = RD_REG_WORD(®->hccr);
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
201 * Issue a "HARD" reset in order for the RISC
202 * interrupt bit to be cleared. Schedule a big
203 * hammer to get out of the RISC PAUSED state.
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
211 } else if ((stat & HSR_RISC_INT) == 0)
214 switch (stat & 0xff) {
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
222 /* Release mailbox registers. */
223 WRT_REG_WORD(®->semaphore, 0);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
233 qla2x00_process_response_queue(rsp);
236 mb[0] = MBA_CMPLT_1_16BIT;
238 qla2x00_async_event(vha, rsp, mb);
241 mb[0] = MBA_SCSI_COMPLETION;
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
257 return (IRQ_HANDLED);
261 * qla2x00_mbx_completion() - Process mailbox command completions.
262 * @ha: SCSI driver HA context
263 * @mb0: Mailbox0 register
266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274 /* Read all mbox registers? */
275 mboxes = (1 << ha->mbx_count) - 1;
277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
279 mboxes = ha->mcp->in_mb;
281 /* Load return mailbox registers. */
282 ha->flags.mbox_int = 1;
283 ha->mailbox_out[0] = mb0;
285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
287 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
288 if (IS_QLA2200(ha) && cnt == 8)
289 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
290 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
291 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
292 else if (mboxes & BIT_0)
293 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
301 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
303 static char *event[] =
304 { "Complete", "Request Notification", "Time Extension" };
306 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
307 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
308 uint16_t __iomem *wptr;
309 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
311 /* Seed data -- mailbox1 -> mailbox7. */
312 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
313 wptr = (uint16_t __iomem *)®24->mailbox1;
314 else if (IS_QLA8044(vha->hw))
315 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
319 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
320 mb[cnt] = RD_REG_WORD(wptr);
322 ql_dbg(ql_dbg_async, vha, 0x5021,
323 "Inter-Driver Communication %s -- "
324 "%04x %04x %04x %04x %04x %04x %04x.\n",
325 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
326 mb[4], mb[5], mb[6]);
328 /* Handle IDC Error completion case. */
329 case MBA_IDC_COMPLETE:
331 vha->hw->flags.idc_compl_status = 1;
332 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
333 complete(&vha->hw->dcbx_comp);
338 /* Acknowledgement needed? [Notify && non-zero timeout]. */
339 timeout = (descr >> 8) & 0xf;
340 ql_dbg(ql_dbg_async, vha, 0x5022,
341 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
342 vha->host_no, event[aen & 0xff], timeout);
346 rval = qla2x00_post_idc_ack_work(vha, mb);
347 if (rval != QLA_SUCCESS)
348 ql_log(ql_log_warn, vha, 0x5023,
349 "IDC failed to post ACK.\n");
351 case MBA_IDC_TIME_EXT:
352 vha->hw->idc_extend_tmo = descr;
353 ql_dbg(ql_dbg_async, vha, 0x5087,
354 "%lu Inter-Driver Communication %s -- "
355 "Extend timeout by=%d.\n",
356 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
363 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
365 static const char *const link_speeds[] = {
366 "1", "2", "?", "4", "8", "16", "32", "10"
368 #define QLA_LAST_SPEED 7
370 if (IS_QLA2100(ha) || IS_QLA2200(ha))
371 return link_speeds[0];
372 else if (speed == 0x13)
373 return link_speeds[QLA_LAST_SPEED];
374 else if (speed < QLA_LAST_SPEED)
375 return link_speeds[speed];
377 return link_speeds[LS_UNKNOWN];
381 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
383 struct qla_hw_data *ha = vha->hw;
386 * 8200 AEN Interpretation:
388 * mb[1] = AEN Reason code
389 * mb[2] = LSW of Peg-Halt Status-1 Register
390 * mb[6] = MSW of Peg-Halt Status-1 Register
391 * mb[3] = LSW of Peg-Halt Status-2 register
392 * mb[7] = MSW of Peg-Halt Status-2 register
393 * mb[4] = IDC Device-State Register value
394 * mb[5] = IDC Driver-Presence Register value
396 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
397 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
398 mb[0], mb[1], mb[2], mb[6]);
399 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
400 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
401 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
403 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
404 IDC_HEARTBEAT_FAILURE)) {
405 ha->flags.nic_core_hung = 1;
406 ql_log(ql_log_warn, vha, 0x5060,
407 "83XX: F/W Error Reported: Check if reset required.\n");
409 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
410 uint32_t protocol_engine_id, fw_err_code, err_level;
413 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
414 * - PEG-Halt Status-1 Register:
415 * (LSW = mb[2], MSW = mb[6])
416 * Bits 0-7 = protocol-engine ID
417 * Bits 8-28 = f/w error code
418 * Bits 29-31 = Error-level
419 * Error-level 0x1 = Non-Fatal error
420 * Error-level 0x2 = Recoverable Fatal error
421 * Error-level 0x4 = UnRecoverable Fatal error
422 * - PEG-Halt Status-2 Register:
423 * (LSW = mb[3], MSW = mb[7])
425 protocol_engine_id = (mb[2] & 0xff);
426 fw_err_code = (((mb[2] & 0xff00) >> 8) |
427 ((mb[6] & 0x1fff) << 8));
428 err_level = ((mb[6] & 0xe000) >> 13);
429 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
430 "Register: protocol_engine_id=0x%x "
431 "fw_err_code=0x%x err_level=0x%x.\n",
432 protocol_engine_id, fw_err_code, err_level);
433 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
434 "Register: 0x%x%x.\n", mb[7], mb[3]);
435 if (err_level == ERR_LEVEL_NON_FATAL) {
436 ql_log(ql_log_warn, vha, 0x5063,
437 "Not a fatal error, f/w has recovered itself.\n");
438 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
439 ql_log(ql_log_fatal, vha, 0x5064,
440 "Recoverable Fatal error: Chip reset "
442 qla83xx_schedule_work(vha,
443 QLA83XX_NIC_CORE_RESET);
444 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
445 ql_log(ql_log_fatal, vha, 0x5065,
446 "Unrecoverable Fatal error: Set FAILED "
447 "state, reboot required.\n");
448 qla83xx_schedule_work(vha,
449 QLA83XX_NIC_CORE_UNRECOVERABLE);
453 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
454 uint16_t peg_fw_state, nw_interface_link_up;
455 uint16_t nw_interface_signal_detect, sfp_status;
456 uint16_t htbt_counter, htbt_monitor_enable;
457 uint16_t sfp_additional_info, sfp_multirate;
458 uint16_t sfp_tx_fault, link_speed, dcbx_status;
461 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
462 * - PEG-to-FC Status Register:
463 * (LSW = mb[2], MSW = mb[6])
464 * Bits 0-7 = Peg-Firmware state
465 * Bit 8 = N/W Interface Link-up
466 * Bit 9 = N/W Interface signal detected
467 * Bits 10-11 = SFP Status
468 * SFP Status 0x0 = SFP+ transceiver not expected
469 * SFP Status 0x1 = SFP+ transceiver not present
470 * SFP Status 0x2 = SFP+ transceiver invalid
471 * SFP Status 0x3 = SFP+ transceiver present and
473 * Bits 12-14 = Heartbeat Counter
474 * Bit 15 = Heartbeat Monitor Enable
475 * Bits 16-17 = SFP Additional Info
476 * SFP info 0x0 = Unregocnized transceiver for
478 * SFP info 0x1 = SFP+ brand validation failed
479 * SFP info 0x2 = SFP+ speed validation failed
480 * SFP info 0x3 = SFP+ access error
481 * Bit 18 = SFP Multirate
482 * Bit 19 = SFP Tx Fault
483 * Bits 20-22 = Link Speed
484 * Bits 23-27 = Reserved
485 * Bits 28-30 = DCBX Status
486 * DCBX Status 0x0 = DCBX Disabled
487 * DCBX Status 0x1 = DCBX Enabled
488 * DCBX Status 0x2 = DCBX Exchange error
491 peg_fw_state = (mb[2] & 0x00ff);
492 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
493 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
494 sfp_status = ((mb[2] & 0x0c00) >> 10);
495 htbt_counter = ((mb[2] & 0x7000) >> 12);
496 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
497 sfp_additional_info = (mb[6] & 0x0003);
498 sfp_multirate = ((mb[6] & 0x0004) >> 2);
499 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
500 link_speed = ((mb[6] & 0x0070) >> 4);
501 dcbx_status = ((mb[6] & 0x7000) >> 12);
503 ql_log(ql_log_warn, vha, 0x5066,
504 "Peg-to-Fc Status Register:\n"
505 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
506 "nw_interface_signal_detect=0x%x"
507 "\nsfp_statis=0x%x.\n ", peg_fw_state,
508 nw_interface_link_up, nw_interface_signal_detect,
510 ql_log(ql_log_warn, vha, 0x5067,
511 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
512 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
513 htbt_counter, htbt_monitor_enable,
514 sfp_additional_info, sfp_multirate);
515 ql_log(ql_log_warn, vha, 0x5068,
516 "sfp_tx_fault=0x%x, link_state=0x%x, "
517 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
520 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
523 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
524 ql_log(ql_log_warn, vha, 0x5069,
525 "Heartbeat Failure encountered, chip reset "
528 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
532 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
533 ql_log(ql_log_info, vha, 0x506a,
534 "IDC Device-State changed = 0x%x.\n", mb[4]);
535 if (ha->flags.nic_core_reset_owner)
537 qla83xx_schedule_work(vha, MBA_IDC_AEN);
542 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
544 struct qla_hw_data *ha = vha->hw;
553 spin_lock_irqsave(&ha->vport_slock, flags);
554 list_for_each_entry(vp, &ha->vp_list, list) {
555 vp_did = vp->d_id.b24;
556 if (vp_did == rscn_entry) {
561 spin_unlock_irqrestore(&ha->vport_slock, flags);
567 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
572 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
573 if (f->loop_id == loop_id)
579 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
584 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
585 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
588 else if (f->deleted == 0)
596 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
602 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
603 if (f->d_id.b24 == id->b24) {
606 else if (f->deleted == 0)
614 * qla2x00_async_event() - Process aynchronous events.
615 * @ha: SCSI driver HA context
616 * @mb: Mailbox registers (0 - 3)
619 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
624 struct qla_hw_data *ha = vha->hw;
625 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
626 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
627 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
628 uint32_t rscn_entry, host_pid;
630 fc_port_t *fcport = NULL;
632 /* Setup to process RIO completion. */
634 if (IS_CNA_CAPABLE(ha))
637 case MBA_SCSI_COMPLETION:
638 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
641 case MBA_CMPLT_1_16BIT:
644 mb[0] = MBA_SCSI_COMPLETION;
646 case MBA_CMPLT_2_16BIT:
650 mb[0] = MBA_SCSI_COMPLETION;
652 case MBA_CMPLT_3_16BIT:
657 mb[0] = MBA_SCSI_COMPLETION;
659 case MBA_CMPLT_4_16BIT:
663 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
665 mb[0] = MBA_SCSI_COMPLETION;
667 case MBA_CMPLT_5_16BIT:
671 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
672 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
674 mb[0] = MBA_SCSI_COMPLETION;
676 case MBA_CMPLT_2_32BIT:
677 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
678 handles[1] = le32_to_cpu(
679 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
680 RD_MAILBOX_REG(ha, reg, 6));
682 mb[0] = MBA_SCSI_COMPLETION;
689 case MBA_SCSI_COMPLETION: /* Fast Post */
690 if (!vha->flags.online)
693 for (cnt = 0; cnt < handle_cnt; cnt++)
694 qla2x00_process_completed_request(vha, rsp->req,
698 case MBA_RESET: /* Reset */
699 ql_dbg(ql_dbg_async, vha, 0x5002,
700 "Asynchronous RESET.\n");
702 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
705 case MBA_SYSTEM_ERR: /* System Error */
706 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
707 RD_REG_WORD(®24->mailbox7) : 0;
708 ql_log(ql_log_warn, vha, 0x5003,
709 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
710 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
712 ha->isp_ops->fw_dump(vha, 1);
713 ha->flags.fw_init_done = 0;
716 if (IS_FWI2_CAPABLE(ha)) {
717 if (mb[1] == 0 && mb[2] == 0) {
718 ql_log(ql_log_fatal, vha, 0x5004,
719 "Unrecoverable Hardware Error: adapter "
720 "marked OFFLINE!\n");
721 vha->flags.online = 0;
722 vha->device_flags |= DFLG_DEV_FAILED;
724 /* Check to see if MPI timeout occurred */
725 if ((mbx & MBX_3) && (ha->port_no == 0))
726 set_bit(MPI_RESET_NEEDED,
729 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
731 } else if (mb[1] == 0) {
732 ql_log(ql_log_fatal, vha, 0x5005,
733 "Unrecoverable Hardware Error: adapter marked "
735 vha->flags.online = 0;
736 vha->device_flags |= DFLG_DEV_FAILED;
738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
741 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
742 ql_log(ql_log_warn, vha, 0x5006,
743 "ISP Request Transfer Error (%x).\n", mb[1]);
745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
748 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
749 ql_log(ql_log_warn, vha, 0x5007,
750 "ISP Response Transfer Error (%x).\n", mb[1]);
752 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
755 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
756 ql_dbg(ql_dbg_async, vha, 0x5008,
757 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
760 case MBA_LOOP_INIT_ERR:
761 ql_log(ql_log_warn, vha, 0x5090,
762 "LOOP INIT ERROR (%x).\n", mb[1]);
763 ha->isp_ops->fw_dump(vha, 1);
764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
767 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
768 ha->flags.lip_ae = 1;
769 ha->flags.n2n_ae = 0;
771 ql_dbg(ql_dbg_async, vha, 0x5009,
772 "LIP occurred (%x).\n", mb[1]);
774 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
775 atomic_set(&vha->loop_state, LOOP_DOWN);
776 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
777 qla2x00_mark_all_devices_lost(vha, 1);
781 atomic_set(&vha->vp_state, VP_FAILED);
782 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
785 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
786 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
788 vha->flags.management_server_logged_in = 0;
789 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
792 case MBA_LOOP_UP: /* Loop Up Event */
793 if (IS_QLA2100(ha) || IS_QLA2200(ha))
794 ha->link_data_rate = PORT_SPEED_1GB;
796 ha->link_data_rate = mb[1];
798 ql_log(ql_log_info, vha, 0x500a,
799 "LOOP UP detected (%s Gbps).\n",
800 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
802 vha->flags.management_server_logged_in = 0;
803 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
805 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
806 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
807 qla2xxx_wake_dpc(vha);
811 case MBA_LOOP_DOWN: /* Loop Down Event */
812 ha->flags.n2n_ae = 0;
813 ha->flags.lip_ae = 0;
814 ha->current_topology = 0;
816 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
817 ? RD_REG_WORD(®24->mailbox4) : 0;
818 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
820 ql_log(ql_log_info, vha, 0x500b,
821 "LOOP DOWN detected (%x %x %x %x).\n",
822 mb[1], mb[2], mb[3], mbx);
824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
825 atomic_set(&vha->loop_state, LOOP_DOWN);
826 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
828 * In case of loop down, restore WWPN from
829 * NVRAM in case of FA-WWPN capable ISP
830 * Restore for Physical Port only
833 if (ha->flags.fawwpn_enabled) {
834 void *wwpn = ha->init_cb->port_name;
835 memcpy(vha->port_name, wwpn, WWN_SIZE);
836 fc_host_port_name(vha->host) =
837 wwn_to_u64(vha->port_name);
838 ql_dbg(ql_dbg_init + ql_dbg_verbose,
839 vha, 0x00d8, "LOOP DOWN detected,"
840 "restore WWPN %016llx\n",
841 wwn_to_u64(vha->port_name));
844 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
847 vha->device_flags |= DFLG_NO_CABLE;
848 qla2x00_mark_all_devices_lost(vha, 1);
852 atomic_set(&vha->vp_state, VP_FAILED);
853 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
856 vha->flags.management_server_logged_in = 0;
857 ha->link_data_rate = PORT_SPEED_UNKNOWN;
858 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
861 case MBA_LIP_RESET: /* LIP reset occurred */
862 ql_dbg(ql_dbg_async, vha, 0x500c,
863 "LIP reset occurred (%x).\n", mb[1]);
865 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
866 atomic_set(&vha->loop_state, LOOP_DOWN);
867 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
868 qla2x00_mark_all_devices_lost(vha, 1);
872 atomic_set(&vha->vp_state, VP_FAILED);
873 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
876 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
878 ha->operating_mode = LOOP;
879 vha->flags.management_server_logged_in = 0;
880 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
883 /* case MBA_DCBX_COMPLETE: */
884 case MBA_POINT_TO_POINT: /* Point-to-Point */
885 ha->flags.lip_ae = 0;
886 ha->flags.n2n_ae = 1;
891 if (IS_CNA_CAPABLE(ha)) {
892 ql_dbg(ql_dbg_async, vha, 0x500d,
893 "DCBX Completed -- %04x %04x %04x.\n",
894 mb[1], mb[2], mb[3]);
895 if (ha->notify_dcbx_comp && !vha->vp_idx)
896 complete(&ha->dcbx_comp);
899 ql_dbg(ql_dbg_async, vha, 0x500e,
900 "Asynchronous P2P MODE received.\n");
903 * Until there's a transition from loop down to loop up, treat
904 * this as loop down only.
906 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
907 atomic_set(&vha->loop_state, LOOP_DOWN);
908 if (!atomic_read(&vha->loop_down_timer))
909 atomic_set(&vha->loop_down_timer,
911 qla2x00_mark_all_devices_lost(vha, 1);
915 atomic_set(&vha->vp_state, VP_FAILED);
916 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
919 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
920 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
922 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
923 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
925 ha->flags.gpsc_supported = 1;
926 vha->flags.management_server_logged_in = 0;
929 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
933 ql_dbg(ql_dbg_async, vha, 0x500f,
934 "Configuration change detected: value=%x.\n", mb[1]);
936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
937 atomic_set(&vha->loop_state, LOOP_DOWN);
938 if (!atomic_read(&vha->loop_down_timer))
939 atomic_set(&vha->loop_down_timer,
941 qla2x00_mark_all_devices_lost(vha, 1);
945 atomic_set(&vha->vp_state, VP_FAILED);
946 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
949 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
950 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
953 case MBA_PORT_UPDATE: /* Port database update */
955 * Handle only global and vn-port update events
958 * mb[1] = N_Port handle of changed port
959 * OR 0xffff for global event
960 * mb[2] = New login state
961 * 7 = Port logged out
962 * mb[3] = LSB is vp_idx, 0xff = all vps
964 * Skip processing if:
965 * Event is global, vp_idx is NOT all vps,
966 * vp_idx does not match
967 * Event is not global, vp_idx does not match
969 if (IS_QLA2XXX_MIDTYPE(ha) &&
970 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
971 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
975 ql_dbg(ql_dbg_async, vha, 0x5010,
976 "Port %s %04x %04x %04x.\n",
977 mb[1] == 0xffff ? "unavailable" : "logout",
978 mb[1], mb[2], mb[3]);
981 goto global_port_update;
983 if (mb[1] == NPH_SNS_LID(ha)) {
984 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
985 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
989 /* use handle_cnt for loop id/nport handle */
990 if (IS_FWI2_CAPABLE(ha))
991 handle_cnt = NPH_SNS;
993 handle_cnt = SIMPLE_NAME_SERVER;
994 if (mb[1] == handle_cnt) {
995 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
996 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1001 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1004 if (atomic_read(&fcport->state) != FCS_ONLINE)
1006 ql_dbg(ql_dbg_async, vha, 0x508a,
1007 "Marking port lost loopid=%04x portid=%06x.\n",
1008 fcport->loop_id, fcport->d_id.b24);
1009 if (qla_ini_mode_enabled(vha)) {
1010 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1011 fcport->logout_on_delete = 0;
1012 qlt_schedule_sess_for_deletion_lock(fcport);
1017 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1018 atomic_set(&vha->loop_state, LOOP_DOWN);
1019 atomic_set(&vha->loop_down_timer,
1021 vha->device_flags |= DFLG_NO_CABLE;
1022 qla2x00_mark_all_devices_lost(vha, 1);
1026 atomic_set(&vha->vp_state, VP_FAILED);
1027 fc_vport_set_state(vha->fc_vport,
1029 qla2x00_mark_all_devices_lost(vha, 1);
1032 vha->flags.management_server_logged_in = 0;
1033 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1038 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1039 * event etc. earlier indicating loop is down) then process
1040 * it. Otherwise ignore it and Wait for RSCN to come in.
1042 atomic_set(&vha->loop_down_timer, 0);
1043 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1044 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1045 ql_dbg(ql_dbg_async, vha, 0x5011,
1046 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1047 mb[1], mb[2], mb[3]);
1049 qlt_async_event(mb[0], vha, mb);
1053 ql_dbg(ql_dbg_async, vha, 0x5012,
1054 "Port database changed %04x %04x %04x.\n",
1055 mb[1], mb[2], mb[3]);
1058 * Mark all devices as missing so we will login again.
1060 atomic_set(&vha->loop_state, LOOP_UP);
1062 qla2x00_mark_all_devices_lost(vha, 1);
1064 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1065 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1066 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1068 qlt_async_event(mb[0], vha, mb);
1071 case MBA_RSCN_UPDATE: /* State Change Registration */
1072 /* Check if the Vport has issued a SCR */
1073 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1075 /* Only handle SCNs for our Vport index. */
1076 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1079 ql_dbg(ql_dbg_async, vha, 0x5013,
1080 "RSCN database changed -- %04x %04x %04x.\n",
1081 mb[1], mb[2], mb[3]);
1083 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1084 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1085 | vha->d_id.b.al_pa;
1086 if (rscn_entry == host_pid) {
1087 ql_dbg(ql_dbg_async, vha, 0x5014,
1088 "Ignoring RSCN update to local host "
1089 "port ID (%06x).\n", host_pid);
1093 /* Ignore reserved bits from RSCN-payload. */
1094 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1096 /* Skip RSCNs for virtual ports on the same physical port */
1097 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1100 atomic_set(&vha->loop_down_timer, 0);
1101 vha->flags.management_server_logged_in = 0;
1103 struct event_arg ea;
1105 memset(&ea, 0, sizeof(ea));
1106 ea.event = FCME_RSCN;
1107 ea.id.b24 = rscn_entry;
1108 ea.id.b.rsvd_1 = rscn_entry >> 24;
1109 qla2x00_fcport_event_handler(vha, &ea);
1110 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1113 /* case MBA_RIO_RESPONSE: */
1114 case MBA_ZIO_RESPONSE:
1115 ql_dbg(ql_dbg_async, vha, 0x5015,
1116 "[R|Z]IO update completion.\n");
1118 if (IS_FWI2_CAPABLE(ha))
1119 qla24xx_process_response_queue(vha, rsp);
1121 qla2x00_process_response_queue(rsp);
1124 case MBA_DISCARD_RND_FRAME:
1125 ql_dbg(ql_dbg_async, vha, 0x5016,
1126 "Discard RND Frame -- %04x %04x %04x.\n",
1127 mb[1], mb[2], mb[3]);
1130 case MBA_TRACE_NOTIFICATION:
1131 ql_dbg(ql_dbg_async, vha, 0x5017,
1132 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1135 case MBA_ISP84XX_ALERT:
1136 ql_dbg(ql_dbg_async, vha, 0x5018,
1137 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1138 mb[1], mb[2], mb[3]);
1140 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1142 case A84_PANIC_RECOVERY:
1143 ql_log(ql_log_info, vha, 0x5019,
1144 "Alert 84XX: panic recovery %04x %04x.\n",
1147 case A84_OP_LOGIN_COMPLETE:
1148 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1149 ql_log(ql_log_info, vha, 0x501a,
1150 "Alert 84XX: firmware version %x.\n",
1151 ha->cs84xx->op_fw_version);
1153 case A84_DIAG_LOGIN_COMPLETE:
1154 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1155 ql_log(ql_log_info, vha, 0x501b,
1156 "Alert 84XX: diagnostic firmware version %x.\n",
1157 ha->cs84xx->diag_fw_version);
1159 case A84_GOLD_LOGIN_COMPLETE:
1160 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1161 ha->cs84xx->fw_update = 1;
1162 ql_log(ql_log_info, vha, 0x501c,
1163 "Alert 84XX: gold firmware version %x.\n",
1164 ha->cs84xx->gold_fw_version);
1167 ql_log(ql_log_warn, vha, 0x501d,
1168 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1169 mb[1], mb[2], mb[3]);
1171 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1173 case MBA_DCBX_START:
1174 ql_dbg(ql_dbg_async, vha, 0x501e,
1175 "DCBX Started -- %04x %04x %04x.\n",
1176 mb[1], mb[2], mb[3]);
1178 case MBA_DCBX_PARAM_UPDATE:
1179 ql_dbg(ql_dbg_async, vha, 0x501f,
1180 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1181 mb[1], mb[2], mb[3]);
1183 case MBA_FCF_CONF_ERR:
1184 ql_dbg(ql_dbg_async, vha, 0x5020,
1185 "FCF Configuration Error -- %04x %04x %04x.\n",
1186 mb[1], mb[2], mb[3]);
1188 case MBA_IDC_NOTIFY:
1189 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1190 mb[4] = RD_REG_WORD(®24->mailbox4);
1191 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1192 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1193 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1194 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1196 * Extend loop down timer since port is active.
1198 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1199 atomic_set(&vha->loop_down_timer,
1201 qla2xxx_wake_dpc(vha);
1204 case MBA_IDC_COMPLETE:
1205 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1206 complete(&ha->lb_portup_comp);
1208 case MBA_IDC_TIME_EXT:
1209 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1211 qla81xx_idc_event(vha, mb[0], mb[1]);
1215 mb[4] = RD_REG_WORD(®24->mailbox4);
1216 mb[5] = RD_REG_WORD(®24->mailbox5);
1217 mb[6] = RD_REG_WORD(®24->mailbox6);
1218 mb[7] = RD_REG_WORD(®24->mailbox7);
1219 qla83xx_handle_8200_aen(vha, mb);
1222 case MBA_DPORT_DIAGNOSTICS:
1223 ql_dbg(ql_dbg_async, vha, 0x5052,
1224 "D-Port Diagnostics: %04x result=%s\n",
1226 mb[1] == 0 ? "start" :
1227 mb[1] == 1 ? "done (pass)" :
1228 mb[1] == 2 ? "done (error)" : "other");
1231 case MBA_TEMPERATURE_ALERT:
1232 ql_dbg(ql_dbg_async, vha, 0x505e,
1233 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1235 schedule_work(&ha->board_disable);
1238 case MBA_TRANS_INSERT:
1239 ql_dbg(ql_dbg_async, vha, 0x5091,
1240 "Transceiver Insertion: %04x\n", mb[1]);
1244 ql_dbg(ql_dbg_async, vha, 0x5057,
1245 "Unknown AEN:%04x %04x %04x %04x\n",
1246 mb[0], mb[1], mb[2], mb[3]);
1249 qlt_async_event(mb[0], vha, mb);
1251 if (!vha->vp_idx && ha->num_vhosts)
1252 qla2x00_alert_all_vps(rsp, mb);
1256 * qla2x00_process_completed_request() - Process a Fast Post response.
1257 * @ha: SCSI driver HA context
1261 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1262 struct req_que *req, uint32_t index)
1265 struct qla_hw_data *ha = vha->hw;
1267 /* Validate handle. */
1268 if (index >= req->num_outstanding_cmds) {
1269 ql_log(ql_log_warn, vha, 0x3014,
1270 "Invalid SCSI command index (%x).\n", index);
1272 if (IS_P3P_TYPE(ha))
1273 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1275 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1279 sp = req->outstanding_cmds[index];
1281 /* Free outstanding command slot. */
1282 req->outstanding_cmds[index] = NULL;
1284 /* Save ISP completion status */
1285 sp->done(sp, DID_OK << 16);
1287 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1289 if (IS_P3P_TYPE(ha))
1290 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1292 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1297 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1298 struct req_que *req, void *iocb)
1300 struct qla_hw_data *ha = vha->hw;
1301 sts_entry_t *pkt = iocb;
1305 index = LSW(pkt->handle);
1306 if (index >= req->num_outstanding_cmds) {
1307 ql_log(ql_log_warn, vha, 0x5031,
1308 "Invalid command index (%x) type %8ph.\n",
1310 if (IS_P3P_TYPE(ha))
1311 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1313 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1316 sp = req->outstanding_cmds[index];
1318 ql_log(ql_log_warn, vha, 0x5032,
1319 "Invalid completion handle (%x) -- timed-out.\n", index);
1322 if (sp->handle != index) {
1323 ql_log(ql_log_warn, vha, 0x5033,
1324 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1328 req->outstanding_cmds[index] = NULL;
1335 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1336 struct mbx_entry *mbx)
1338 const char func[] = "MBX-IOCB";
1342 struct srb_iocb *lio;
1346 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1350 lio = &sp->u.iocb_cmd;
1352 fcport = sp->fcport;
1353 data = lio->u.logio.data;
1355 data[0] = MBS_COMMAND_ERROR;
1356 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1357 QLA_LOGIO_LOGIN_RETRIED : 0;
1358 if (mbx->entry_status) {
1359 ql_dbg(ql_dbg_async, vha, 0x5043,
1360 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1361 "entry-status=%x status=%x state-flag=%x "
1362 "status-flags=%x.\n", type, sp->handle,
1363 fcport->d_id.b.domain, fcport->d_id.b.area,
1364 fcport->d_id.b.al_pa, mbx->entry_status,
1365 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1366 le16_to_cpu(mbx->status_flags));
1368 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1369 (uint8_t *)mbx, sizeof(*mbx));
1374 status = le16_to_cpu(mbx->status);
1375 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1376 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1378 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1379 ql_dbg(ql_dbg_async, vha, 0x5045,
1380 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1381 type, sp->handle, fcport->d_id.b.domain,
1382 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1383 le16_to_cpu(mbx->mb1));
1385 data[0] = MBS_COMMAND_COMPLETE;
1386 if (sp->type == SRB_LOGIN_CMD) {
1387 fcport->port_type = FCT_TARGET;
1388 if (le16_to_cpu(mbx->mb1) & BIT_0)
1389 fcport->port_type = FCT_INITIATOR;
1390 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1391 fcport->flags |= FCF_FCP2_DEVICE;
1396 data[0] = le16_to_cpu(mbx->mb0);
1398 case MBS_PORT_ID_USED:
1399 data[1] = le16_to_cpu(mbx->mb1);
1401 case MBS_LOOP_ID_USED:
1404 data[0] = MBS_COMMAND_ERROR;
1408 ql_log(ql_log_warn, vha, 0x5046,
1409 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1410 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1411 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1412 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1413 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1414 le16_to_cpu(mbx->mb7));
1421 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1422 struct mbx_24xx_entry *pkt)
1424 const char func[] = "MBX-IOCB2";
1426 struct srb_iocb *si;
1430 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1434 si = &sp->u.iocb_cmd;
1435 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1437 for (i = 0; i < sz; i++)
1438 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1440 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1446 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1447 struct nack_to_isp *pkt)
1449 const char func[] = "nack";
1453 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1457 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1458 res = QLA_FUNCTION_FAILED;
1464 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1465 sts_entry_t *pkt, int iocb_type)
1467 const char func[] = "CT_IOCB";
1470 struct bsg_job *bsg_job;
1471 struct fc_bsg_reply *bsg_reply;
1472 uint16_t comp_status;
1475 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1481 bsg_job = sp->u.bsg_job;
1482 bsg_reply = bsg_job->reply;
1484 type = "ct pass-through";
1486 comp_status = le16_to_cpu(pkt->comp_status);
1489 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1490 * fc payload to the caller
1492 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1493 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1495 if (comp_status != CS_COMPLETE) {
1496 if (comp_status == CS_DATA_UNDERRUN) {
1498 bsg_reply->reply_payload_rcv_len =
1499 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1501 ql_log(ql_log_warn, vha, 0x5048,
1502 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1504 bsg_reply->reply_payload_rcv_len);
1506 ql_log(ql_log_warn, vha, 0x5049,
1507 "CT pass-through-%s error comp_status=0x%x.\n",
1509 res = DID_ERROR << 16;
1510 bsg_reply->reply_payload_rcv_len = 0;
1512 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1513 (uint8_t *)pkt, sizeof(*pkt));
1516 bsg_reply->reply_payload_rcv_len =
1517 bsg_job->reply_payload.payload_len;
1518 bsg_job->reply_len = 0;
1521 case SRB_CT_PTHRU_CMD:
1523 * borrowing sts_entry_24xx.comp_status.
1524 * same location as ct_entry_24xx.comp_status
1526 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1527 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1536 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1537 struct sts_entry_24xx *pkt, int iocb_type)
1539 const char func[] = "ELS_CT_IOCB";
1542 struct bsg_job *bsg_job;
1543 struct fc_bsg_reply *bsg_reply;
1544 uint16_t comp_status;
1545 uint32_t fw_status[3];
1546 uint8_t* fw_sts_ptr;
1549 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1555 case SRB_ELS_CMD_RPT:
1556 case SRB_ELS_CMD_HST:
1560 type = "ct pass-through";
1563 type = "Driver ELS logo";
1564 ql_dbg(ql_dbg_user, vha, 0x5047,
1565 "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1568 case SRB_CT_PTHRU_CMD:
1569 /* borrowing sts_entry_24xx.comp_status.
1570 same location as ct_entry_24xx.comp_status
1572 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1573 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1578 ql_dbg(ql_dbg_user, vha, 0x503e,
1579 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1583 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1584 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1585 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1587 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1588 * fc payload to the caller
1590 bsg_job = sp->u.bsg_job;
1591 bsg_reply = bsg_job->reply;
1592 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1593 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1595 if (comp_status != CS_COMPLETE) {
1596 if (comp_status == CS_DATA_UNDERRUN) {
1598 bsg_reply->reply_payload_rcv_len =
1599 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1601 ql_dbg(ql_dbg_user, vha, 0x503f,
1602 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1603 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1604 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1605 le16_to_cpu(((struct els_sts_entry_24xx *)
1606 pkt)->total_byte_count));
1607 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1608 sizeof(struct fc_bsg_reply);
1609 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1612 ql_dbg(ql_dbg_user, vha, 0x5040,
1613 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1614 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1615 type, sp->handle, comp_status,
1616 le16_to_cpu(((struct els_sts_entry_24xx *)
1617 pkt)->error_subcode_1),
1618 le16_to_cpu(((struct els_sts_entry_24xx *)
1619 pkt)->error_subcode_2));
1620 res = DID_ERROR << 16;
1621 bsg_reply->reply_payload_rcv_len = 0;
1622 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1623 sizeof(struct fc_bsg_reply);
1624 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1626 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1627 (uint8_t *)pkt, sizeof(*pkt));
1631 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1632 bsg_job->reply_len = 0;
1639 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1640 struct logio_entry_24xx *logio)
1642 const char func[] = "LOGIO-IOCB";
1646 struct srb_iocb *lio;
1650 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1654 lio = &sp->u.iocb_cmd;
1656 fcport = sp->fcport;
1657 data = lio->u.logio.data;
1659 data[0] = MBS_COMMAND_ERROR;
1660 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1661 QLA_LOGIO_LOGIN_RETRIED : 0;
1662 if (logio->entry_status) {
1663 ql_log(ql_log_warn, fcport->vha, 0x5034,
1664 "Async-%s error entry - %8phC hdl=%x"
1665 "portid=%02x%02x%02x entry-status=%x.\n",
1666 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1667 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1668 logio->entry_status);
1669 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1670 (uint8_t *)logio, sizeof(*logio));
1675 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1676 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1677 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1678 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1679 fcport->d_id.b.domain,
1680 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1681 le32_to_cpu(logio->io_parameter[0]));
1683 vha->hw->exch_starvation = 0;
1684 data[0] = MBS_COMMAND_COMPLETE;
1685 if (sp->type != SRB_LOGIN_CMD)
1688 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1689 if (iop[0] & BIT_4) {
1690 fcport->port_type = FCT_TARGET;
1692 fcport->flags |= FCF_FCP2_DEVICE;
1693 } else if (iop[0] & BIT_5)
1694 fcport->port_type = FCT_INITIATOR;
1697 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1699 if (logio->io_parameter[7] || logio->io_parameter[8])
1700 fcport->supported_classes |= FC_COS_CLASS2;
1701 if (logio->io_parameter[9] || logio->io_parameter[10])
1702 fcport->supported_classes |= FC_COS_CLASS3;
1707 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1708 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1709 lio->u.logio.iop[0] = iop[0];
1710 lio->u.logio.iop[1] = iop[1];
1712 case LSC_SCODE_PORTID_USED:
1713 data[0] = MBS_PORT_ID_USED;
1714 data[1] = LSW(iop[1]);
1716 case LSC_SCODE_NPORT_USED:
1717 data[0] = MBS_LOOP_ID_USED;
1719 case LSC_SCODE_CMD_FAILED:
1720 if (iop[1] == 0x0606) {
1722 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1723 * Target side acked.
1725 data[0] = MBS_COMMAND_COMPLETE;
1728 data[0] = MBS_COMMAND_ERROR;
1730 case LSC_SCODE_NOXCB:
1731 vha->hw->exch_starvation++;
1732 if (vha->hw->exch_starvation > 5) {
1733 ql_log(ql_log_warn, vha, 0xd046,
1734 "Exchange starvation. Resetting RISC\n");
1736 vha->hw->exch_starvation = 0;
1738 if (IS_P3P_TYPE(vha->hw))
1739 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1742 qla2xxx_wake_dpc(vha);
1746 data[0] = MBS_COMMAND_ERROR;
1750 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1751 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1752 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1753 sp->handle, fcport->d_id.b.domain,
1754 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1755 le16_to_cpu(logio->comp_status),
1756 le32_to_cpu(logio->io_parameter[0]),
1757 le32_to_cpu(logio->io_parameter[1]));
1764 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1766 const char func[] = "TMF-IOCB";
1770 struct srb_iocb *iocb;
1771 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1773 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1777 iocb = &sp->u.iocb_cmd;
1779 fcport = sp->fcport;
1780 iocb->u.tmf.data = QLA_SUCCESS;
1782 if (sts->entry_status) {
1783 ql_log(ql_log_warn, fcport->vha, 0x5038,
1784 "Async-%s error - hdl=%x entry-status(%x).\n",
1785 type, sp->handle, sts->entry_status);
1786 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1787 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1788 ql_log(ql_log_warn, fcport->vha, 0x5039,
1789 "Async-%s error - hdl=%x completion status(%x).\n",
1790 type, sp->handle, sts->comp_status);
1791 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1792 } else if ((le16_to_cpu(sts->scsi_status) &
1793 SS_RESPONSE_INFO_LEN_VALID)) {
1794 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1795 ql_log(ql_log_warn, fcport->vha, 0x503b,
1796 "Async-%s error - hdl=%x not enough response(%d).\n",
1797 type, sp->handle, sts->rsp_data_len);
1798 } else if (sts->data[3]) {
1799 ql_log(ql_log_warn, fcport->vha, 0x503c,
1800 "Async-%s error - hdl=%x response(%x).\n",
1801 type, sp->handle, sts->data[3]);
1802 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1806 if (iocb->u.tmf.data != QLA_SUCCESS)
1807 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1808 (uint8_t *)sts, sizeof(*sts));
1814 qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1816 const char func[] = "NVME-IOCB";
1819 struct srb_iocb *iocb;
1820 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1821 uint16_t state_flags;
1822 struct nvmefc_fcp_req *fd;
1824 struct srb_iocb *nvme;
1826 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1830 iocb = &sp->u.iocb_cmd;
1831 fcport = sp->fcport;
1832 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
1833 state_flags = le16_to_cpu(sts->state_flags);
1834 fd = iocb->u.nvme.desc;
1835 nvme = &sp->u.iocb_cmd;
1837 if (unlikely(nvme->u.nvme.aen_op))
1838 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1841 * State flags: Bit 6 and 0.
1842 * If 0 is set, we don't care about 6.
1843 * both cases resp was dma'd to host buffer
1844 * if both are 0, that is good path case.
1845 * if six is set and 0 is clear, we need to
1846 * copy resp data from status iocb to resp buffer.
1848 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1849 iocb->u.nvme.rsp_pyld_len = 0;
1850 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1851 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1852 } else if (state_flags & SF_NVME_ERSP) {
1853 uint32_t *inbuf, *outbuf;
1856 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1857 outbuf = (uint32_t *)fd->rspaddr;
1858 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1859 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1860 for (; iter; iter--)
1861 *outbuf++ = swab32(*inbuf++);
1862 } else { /* unhandled case */
1863 ql_log(ql_log_warn, fcport->vha, 0x503a,
1864 "NVME-%s error. Unhandled state_flags of %x\n",
1865 sp->name, state_flags);
1868 fd->transferred_length = fd->payload_length -
1869 le32_to_cpu(sts->residual_len);
1872 * If transport error then Failure (HBA rejects request)
1873 * otherwise transport will handle.
1875 if (sts->entry_status) {
1876 ql_log(ql_log_warn, fcport->vha, 0x5038,
1877 "NVME-%s error - hdl=%x entry-status(%x).\n",
1878 sp->name, sp->handle, sts->entry_status);
1879 ret = QLA_FUNCTION_FAILED;
1881 switch (le16_to_cpu(sts->comp_status)) {
1888 case CS_PORT_UNAVAILABLE:
1889 case CS_PORT_LOGGED_OUT:
1891 ql_log(ql_log_warn, fcport->vha, 0x5060,
1892 "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1893 sp->name, sp->handle, sts->comp_status,
1894 le32_to_cpu(sts->residual_len), sts->ox_id);
1895 fd->transferred_length = fd->payload_length;
1900 ql_log(ql_log_warn, fcport->vha, 0x5060,
1901 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1902 sp->name, sp->handle, sts->comp_status,
1903 le32_to_cpu(sts->residual_len), sts->ox_id);
1904 ret = QLA_FUNCTION_FAILED;
1912 * qla2x00_process_response_queue() - Process response queue entries.
1913 * @ha: SCSI driver HA context
1916 qla2x00_process_response_queue(struct rsp_que *rsp)
1918 struct scsi_qla_host *vha;
1919 struct qla_hw_data *ha = rsp->hw;
1920 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1922 uint16_t handle_cnt;
1925 vha = pci_get_drvdata(ha->pdev);
1927 if (!vha->flags.online)
1930 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1931 pkt = (sts_entry_t *)rsp->ring_ptr;
1934 if (rsp->ring_index == rsp->length) {
1935 rsp->ring_index = 0;
1936 rsp->ring_ptr = rsp->ring;
1941 if (pkt->entry_status != 0) {
1942 qla2x00_error_entry(vha, rsp, pkt);
1943 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1948 switch (pkt->entry_type) {
1950 qla2x00_status_entry(vha, rsp, pkt);
1952 case STATUS_TYPE_21:
1953 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1954 for (cnt = 0; cnt < handle_cnt; cnt++) {
1955 qla2x00_process_completed_request(vha, rsp->req,
1956 ((sts21_entry_t *)pkt)->handle[cnt]);
1959 case STATUS_TYPE_22:
1960 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1961 for (cnt = 0; cnt < handle_cnt; cnt++) {
1962 qla2x00_process_completed_request(vha, rsp->req,
1963 ((sts22_entry_t *)pkt)->handle[cnt]);
1966 case STATUS_CONT_TYPE:
1967 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1970 qla2x00_mbx_iocb_entry(vha, rsp->req,
1971 (struct mbx_entry *)pkt);
1974 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1977 /* Type Not Supported. */
1978 ql_log(ql_log_warn, vha, 0x504a,
1979 "Received unknown response pkt type %x "
1980 "entry status=%x.\n",
1981 pkt->entry_type, pkt->entry_status);
1984 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1988 /* Adjust ring index */
1989 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1993 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1994 uint32_t sense_len, struct rsp_que *rsp, int res)
1996 struct scsi_qla_host *vha = sp->vha;
1997 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1998 uint32_t track_sense_len;
2000 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2001 sense_len = SCSI_SENSE_BUFFERSIZE;
2003 SET_CMD_SENSE_LEN(sp, sense_len);
2004 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2005 track_sense_len = sense_len;
2007 if (sense_len > par_sense_len)
2008 sense_len = par_sense_len;
2010 memcpy(cp->sense_buffer, sense_data, sense_len);
2012 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2013 track_sense_len -= sense_len;
2014 SET_CMD_SENSE_LEN(sp, track_sense_len);
2016 if (track_sense_len != 0) {
2017 rsp->status_srb = sp;
2022 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2023 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2024 sp->vha->host_no, cp->device->id, cp->device->lun,
2026 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2027 cp->sense_buffer, sense_len);
2031 struct scsi_dif_tuple {
2032 __be16 guard; /* Checksum */
2033 __be16 app_tag; /* APPL identifier */
2034 __be32 ref_tag; /* Target LBA or indirect LBA */
2038 * Checks the guard or meta-data for the type of error
2039 * detected by the HBA. In case of errors, we set the
2040 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2041 * to indicate to the kernel that the HBA detected error.
2044 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2046 struct scsi_qla_host *vha = sp->vha;
2047 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2048 uint8_t *ap = &sts24->data[12];
2049 uint8_t *ep = &sts24->data[20];
2050 uint32_t e_ref_tag, a_ref_tag;
2051 uint16_t e_app_tag, a_app_tag;
2052 uint16_t e_guard, a_guard;
2055 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2056 * would make guard field appear at offset 2
2058 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2059 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2060 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2061 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2062 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2063 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2065 ql_dbg(ql_dbg_io, vha, 0x3023,
2066 "iocb(s) %p Returned STATUS.\n", sts24);
2068 ql_dbg(ql_dbg_io, vha, 0x3024,
2069 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2070 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2071 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2072 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2073 a_app_tag, e_app_tag, a_guard, e_guard);
2077 * For type 3: ref & app tag is all 'f's
2078 * For type 0,1,2: app tag is all 'f's
2080 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2081 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2082 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2083 uint32_t blocks_done, resid;
2084 sector_t lba_s = scsi_get_lba(cmd);
2086 /* 2TB boundary case covered automatically with this */
2087 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2089 resid = scsi_bufflen(cmd) - (blocks_done *
2090 cmd->device->sector_size);
2092 scsi_set_resid(cmd, resid);
2093 cmd->result = DID_OK << 16;
2095 /* Update protection tag */
2096 if (scsi_prot_sg_count(cmd)) {
2097 uint32_t i, j = 0, k = 0, num_ent;
2098 struct scatterlist *sg;
2099 struct t10_pi_tuple *spt;
2101 /* Patch the corresponding protection tags */
2102 scsi_for_each_prot_sg(cmd, sg,
2103 scsi_prot_sg_count(cmd), i) {
2104 num_ent = sg_dma_len(sg) / 8;
2105 if (k + num_ent < blocks_done) {
2109 j = blocks_done - k - 1;
2114 if (k != blocks_done) {
2115 ql_log(ql_log_warn, vha, 0x302f,
2116 "unexpected tag values tag:lba=%x:%llx)\n",
2117 e_ref_tag, (unsigned long long)lba_s);
2121 spt = page_address(sg_page(sg)) + sg->offset;
2124 spt->app_tag = T10_PI_APP_ESCAPE;
2125 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2126 spt->ref_tag = T10_PI_REF_ESCAPE;
2133 if (e_guard != a_guard) {
2134 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2136 set_driver_byte(cmd, DRIVER_SENSE);
2137 set_host_byte(cmd, DID_ABORT);
2138 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
2143 if (e_ref_tag != a_ref_tag) {
2144 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2146 set_driver_byte(cmd, DRIVER_SENSE);
2147 set_host_byte(cmd, DID_ABORT);
2148 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
2152 /* check appl tag */
2153 if (e_app_tag != a_app_tag) {
2154 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2156 set_driver_byte(cmd, DRIVER_SENSE);
2157 set_host_byte(cmd, DID_ABORT);
2158 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
2166 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2167 struct req_que *req, uint32_t index)
2169 struct qla_hw_data *ha = vha->hw;
2171 uint16_t comp_status;
2172 uint16_t scsi_status;
2174 uint32_t rval = EXT_STATUS_OK;
2175 struct bsg_job *bsg_job = NULL;
2176 struct fc_bsg_request *bsg_request;
2177 struct fc_bsg_reply *bsg_reply;
2179 struct sts_entry_24xx *sts24;
2180 sts = (sts_entry_t *) pkt;
2181 sts24 = (struct sts_entry_24xx *) pkt;
2183 /* Validate handle. */
2184 if (index >= req->num_outstanding_cmds) {
2185 ql_log(ql_log_warn, vha, 0x70af,
2186 "Invalid SCSI completion handle 0x%x.\n", index);
2187 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2191 sp = req->outstanding_cmds[index];
2193 ql_log(ql_log_warn, vha, 0x70b0,
2194 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2197 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2201 /* Free outstanding command slot. */
2202 req->outstanding_cmds[index] = NULL;
2203 bsg_job = sp->u.bsg_job;
2204 bsg_request = bsg_job->request;
2205 bsg_reply = bsg_job->reply;
2207 if (IS_FWI2_CAPABLE(ha)) {
2208 comp_status = le16_to_cpu(sts24->comp_status);
2209 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2211 comp_status = le16_to_cpu(sts->comp_status);
2212 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2215 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2216 switch (comp_status) {
2218 if (scsi_status == 0) {
2219 bsg_reply->reply_payload_rcv_len =
2220 bsg_job->reply_payload.payload_len;
2221 vha->qla_stats.input_bytes +=
2222 bsg_reply->reply_payload_rcv_len;
2223 vha->qla_stats.input_requests++;
2224 rval = EXT_STATUS_OK;
2228 case CS_DATA_OVERRUN:
2229 ql_dbg(ql_dbg_user, vha, 0x70b1,
2230 "Command completed with data overrun thread_id=%d\n",
2232 rval = EXT_STATUS_DATA_OVERRUN;
2235 case CS_DATA_UNDERRUN:
2236 ql_dbg(ql_dbg_user, vha, 0x70b2,
2237 "Command completed with data underrun thread_id=%d\n",
2239 rval = EXT_STATUS_DATA_UNDERRUN;
2241 case CS_BIDIR_RD_OVERRUN:
2242 ql_dbg(ql_dbg_user, vha, 0x70b3,
2243 "Command completed with read data overrun thread_id=%d\n",
2245 rval = EXT_STATUS_DATA_OVERRUN;
2248 case CS_BIDIR_RD_WR_OVERRUN:
2249 ql_dbg(ql_dbg_user, vha, 0x70b4,
2250 "Command completed with read and write data overrun "
2251 "thread_id=%d\n", thread_id);
2252 rval = EXT_STATUS_DATA_OVERRUN;
2255 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2256 ql_dbg(ql_dbg_user, vha, 0x70b5,
2257 "Command completed with read data over and write data "
2258 "underrun thread_id=%d\n", thread_id);
2259 rval = EXT_STATUS_DATA_OVERRUN;
2262 case CS_BIDIR_RD_UNDERRUN:
2263 ql_dbg(ql_dbg_user, vha, 0x70b6,
2264 "Command completed with read data underrun "
2265 "thread_id=%d\n", thread_id);
2266 rval = EXT_STATUS_DATA_UNDERRUN;
2269 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2270 ql_dbg(ql_dbg_user, vha, 0x70b7,
2271 "Command completed with read data under and write data "
2272 "overrun thread_id=%d\n", thread_id);
2273 rval = EXT_STATUS_DATA_UNDERRUN;
2276 case CS_BIDIR_RD_WR_UNDERRUN:
2277 ql_dbg(ql_dbg_user, vha, 0x70b8,
2278 "Command completed with read and write data underrun "
2279 "thread_id=%d\n", thread_id);
2280 rval = EXT_STATUS_DATA_UNDERRUN;
2284 ql_dbg(ql_dbg_user, vha, 0x70b9,
2285 "Command completed with data DMA error thread_id=%d\n",
2287 rval = EXT_STATUS_DMA_ERR;
2291 ql_dbg(ql_dbg_user, vha, 0x70ba,
2292 "Command completed with timeout thread_id=%d\n",
2294 rval = EXT_STATUS_TIMEOUT;
2297 ql_dbg(ql_dbg_user, vha, 0x70bb,
2298 "Command completed with completion status=0x%x "
2299 "thread_id=%d\n", comp_status, thread_id);
2300 rval = EXT_STATUS_ERR;
2303 bsg_reply->reply_payload_rcv_len = 0;
2306 /* Return the vendor specific reply to API */
2307 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2308 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2309 /* Always return DID_OK, bsg will send the vendor specific response
2310 * in this case only */
2311 sp->done(sp, DID_OK << 6);
2316 * qla2x00_status_entry() - Process a Status IOCB entry.
2317 * @ha: SCSI driver HA context
2318 * @pkt: Entry pointer
2321 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2325 struct scsi_cmnd *cp;
2327 struct sts_entry_24xx *sts24;
2328 uint16_t comp_status;
2329 uint16_t scsi_status;
2331 uint8_t lscsi_status;
2333 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2335 uint8_t *rsp_info, *sense_data;
2336 struct qla_hw_data *ha = vha->hw;
2339 struct req_que *req;
2342 uint16_t state_flags = 0;
2343 uint16_t retry_delay = 0;
2344 uint8_t no_logout = 0;
2346 sts = (sts_entry_t *) pkt;
2347 sts24 = (struct sts_entry_24xx *) pkt;
2348 if (IS_FWI2_CAPABLE(ha)) {
2349 comp_status = le16_to_cpu(sts24->comp_status);
2350 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2351 state_flags = le16_to_cpu(sts24->state_flags);
2353 comp_status = le16_to_cpu(sts->comp_status);
2354 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2356 handle = (uint32_t) LSW(sts->handle);
2357 que = MSW(sts->handle);
2358 req = ha->req_q_map[que];
2360 /* Check for invalid queue pointer */
2362 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2363 ql_dbg(ql_dbg_io, vha, 0x3059,
2364 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2365 "que=%u.\n", sts->handle, req, que);
2369 /* Validate handle. */
2370 if (handle < req->num_outstanding_cmds) {
2371 sp = req->outstanding_cmds[handle];
2373 ql_dbg(ql_dbg_io, vha, 0x3075,
2374 "%s(%ld): Already returned command for status handle (0x%x).\n",
2375 __func__, vha->host_no, sts->handle);
2379 ql_dbg(ql_dbg_io, vha, 0x3017,
2380 "Invalid status handle, out of range (0x%x).\n",
2383 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2384 if (IS_P3P_TYPE(ha))
2385 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2387 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2388 qla2xxx_wake_dpc(vha);
2393 if (sp->cmd_type != TYPE_SRB) {
2394 req->outstanding_cmds[handle] = NULL;
2395 ql_dbg(ql_dbg_io, vha, 0x3015,
2396 "Unknown sp->cmd_type %x %p).\n",
2401 /* NVME completion. */
2402 if (sp->type == SRB_NVME_CMD) {
2403 qla24xx_nvme_iocb_entry(vha, req, pkt);
2407 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2408 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2412 /* Task Management completion. */
2413 if (sp->type == SRB_TM_CMD) {
2414 qla24xx_tm_iocb_entry(vha, req, pkt);
2418 /* Fast path completion. */
2419 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2420 qla2x00_process_completed_request(vha, req, handle);
2425 req->outstanding_cmds[handle] = NULL;
2426 cp = GET_CMD_SP(sp);
2428 ql_dbg(ql_dbg_io, vha, 0x3018,
2429 "Command already returned (0x%x/%p).\n",
2435 lscsi_status = scsi_status & STATUS_MASK;
2437 fcport = sp->fcport;
2440 sense_len = par_sense_len = rsp_info_len = resid_len =
2442 if (IS_FWI2_CAPABLE(ha)) {
2443 if (scsi_status & SS_SENSE_LEN_VALID)
2444 sense_len = le32_to_cpu(sts24->sense_len);
2445 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2446 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2447 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2448 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2449 if (comp_status == CS_DATA_UNDERRUN)
2450 fw_resid_len = le32_to_cpu(sts24->residual_len);
2451 rsp_info = sts24->data;
2452 sense_data = sts24->data;
2453 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2454 ox_id = le16_to_cpu(sts24->ox_id);
2455 par_sense_len = sizeof(sts24->data);
2456 /* Valid values of the retry delay timer are 0x1-0xffef */
2457 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2458 retry_delay = sts24->retry_delay;
2460 if (scsi_status & SS_SENSE_LEN_VALID)
2461 sense_len = le16_to_cpu(sts->req_sense_length);
2462 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2463 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2464 resid_len = le32_to_cpu(sts->residual_length);
2465 rsp_info = sts->rsp_info;
2466 sense_data = sts->req_sense_data;
2467 par_sense_len = sizeof(sts->req_sense_data);
2470 /* Check for any FCP transport errors. */
2471 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2472 /* Sense data lies beyond any FCP RESPONSE data. */
2473 if (IS_FWI2_CAPABLE(ha)) {
2474 sense_data += rsp_info_len;
2475 par_sense_len -= rsp_info_len;
2477 if (rsp_info_len > 3 && rsp_info[3]) {
2478 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2479 "FCP I/O protocol failure (0x%x/0x%x).\n",
2480 rsp_info_len, rsp_info[3]);
2482 res = DID_BUS_BUSY << 16;
2487 /* Check for overrun. */
2488 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2489 scsi_status & SS_RESIDUAL_OVER)
2490 comp_status = CS_DATA_OVERRUN;
2493 * Check retry_delay_timer value if we receive a busy or
2496 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2497 lscsi_status == SAM_STAT_BUSY)
2498 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2501 * Based on Host and scsi status generate status code for Linux
2503 switch (comp_status) {
2506 if (scsi_status == 0) {
2510 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2512 scsi_set_resid(cp, resid);
2514 if (!lscsi_status &&
2515 ((unsigned)(scsi_bufflen(cp) - resid) <
2517 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2518 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2519 resid, scsi_bufflen(cp));
2521 res = DID_ERROR << 16;
2525 res = DID_OK << 16 | lscsi_status;
2527 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2528 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2529 "QUEUE FULL detected.\n");
2533 if (lscsi_status != SS_CHECK_CONDITION)
2536 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2537 if (!(scsi_status & SS_SENSE_LEN_VALID))
2540 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2544 case CS_DATA_UNDERRUN:
2545 /* Use F/W calculated residual length. */
2546 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2547 scsi_set_resid(cp, resid);
2548 if (scsi_status & SS_RESIDUAL_UNDER) {
2549 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2550 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2551 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2552 resid, scsi_bufflen(cp));
2554 res = DID_ERROR << 16 | lscsi_status;
2555 goto check_scsi_status;
2558 if (!lscsi_status &&
2559 ((unsigned)(scsi_bufflen(cp) - resid) <
2561 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2562 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2563 resid, scsi_bufflen(cp));
2565 res = DID_ERROR << 16;
2568 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2569 lscsi_status != SAM_STAT_BUSY) {
2571 * scsi status of task set and busy are considered to be
2572 * task not completed.
2575 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2576 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2577 resid, scsi_bufflen(cp));
2579 res = DID_ERROR << 16 | lscsi_status;
2580 goto check_scsi_status;
2582 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2583 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2584 scsi_status, lscsi_status);
2587 res = DID_OK << 16 | lscsi_status;
2592 * Check to see if SCSI Status is non zero. If so report SCSI
2595 if (lscsi_status != 0) {
2596 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2597 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2598 "QUEUE FULL detected.\n");
2602 if (lscsi_status != SS_CHECK_CONDITION)
2605 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2606 if (!(scsi_status & SS_SENSE_LEN_VALID))
2609 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2610 sense_len, rsp, res);
2614 case CS_PORT_LOGGED_OUT:
2616 case CS_PORT_CONFIG_CHG:
2619 case CS_PORT_UNAVAILABLE:
2624 * We are going to have the fc class block the rport
2625 * while we try to recover so instruct the mid layer
2626 * to requeue until the class decides how to handle this.
2628 res = DID_TRANSPORT_DISRUPTED << 16;
2630 if (comp_status == CS_TIMEOUT) {
2631 if (IS_FWI2_CAPABLE(ha))
2633 else if ((le16_to_cpu(sts->status_flags) &
2634 SF_LOGOUT_SENT) == 0)
2638 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2639 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2640 "Port to be marked lost on fcport=%02x%02x%02x, current "
2641 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2642 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2643 port_state_str[atomic_read(&fcport->state)],
2647 fcport->logout_on_delete = 0;
2649 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2650 qlt_schedule_sess_for_deletion_lock(fcport);
2656 res = DID_RESET << 16;
2660 logit = qla2x00_handle_dif_error(sp, sts24);
2665 res = DID_ERROR << 16;
2667 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2670 if (state_flags & BIT_4)
2671 scmd_printk(KERN_WARNING, cp,
2672 "Unsupported device '%s' found.\n",
2673 cp->device->vendor);
2677 res = DID_ERROR << 16;
2683 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2684 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2685 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2686 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2687 comp_status, scsi_status, res, vha->host_no,
2688 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2689 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2690 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2691 resid_len, fw_resid_len, sp, cp);
2693 if (rsp->status_srb == NULL)
2698 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2699 * @ha: SCSI driver HA context
2700 * @pkt: Entry pointer
2702 * Extended sense data.
2705 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2707 uint8_t sense_sz = 0;
2708 struct qla_hw_data *ha = rsp->hw;
2709 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2710 srb_t *sp = rsp->status_srb;
2711 struct scsi_cmnd *cp;
2715 if (!sp || !GET_CMD_SENSE_LEN(sp))
2718 sense_len = GET_CMD_SENSE_LEN(sp);
2719 sense_ptr = GET_CMD_SENSE_PTR(sp);
2721 cp = GET_CMD_SP(sp);
2723 ql_log(ql_log_warn, vha, 0x3025,
2724 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2726 rsp->status_srb = NULL;
2730 if (sense_len > sizeof(pkt->data))
2731 sense_sz = sizeof(pkt->data);
2733 sense_sz = sense_len;
2735 /* Move sense data. */
2736 if (IS_FWI2_CAPABLE(ha))
2737 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2738 memcpy(sense_ptr, pkt->data, sense_sz);
2739 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2740 sense_ptr, sense_sz);
2742 sense_len -= sense_sz;
2743 sense_ptr += sense_sz;
2745 SET_CMD_SENSE_PTR(sp, sense_ptr);
2746 SET_CMD_SENSE_LEN(sp, sense_len);
2748 /* Place command on done queue. */
2749 if (sense_len == 0) {
2750 rsp->status_srb = NULL;
2751 sp->done(sp, cp->result);
2756 * qla2x00_error_entry() - Process an error entry.
2757 * @ha: SCSI driver HA context
2758 * @pkt: Entry pointer
2759 * return : 1=allow further error analysis. 0=no additional error analysis.
2762 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2765 struct qla_hw_data *ha = vha->hw;
2766 const char func[] = "ERROR-IOCB";
2767 uint16_t que = MSW(pkt->handle);
2768 struct req_que *req = NULL;
2769 int res = DID_ERROR << 16;
2771 ql_dbg(ql_dbg_async, vha, 0x502a,
2772 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2773 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2775 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2778 req = ha->req_q_map[que];
2780 if (pkt->entry_status & RF_BUSY)
2781 res = DID_BUS_BUSY << 16;
2783 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2786 switch (pkt->entry_type) {
2787 case NOTIFY_ACK_TYPE:
2789 case STATUS_CONT_TYPE:
2790 case LOGINOUT_PORT_IOCB_TYPE:
2793 case ABORT_IOCB_TYPE:
2795 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2802 case ABTS_RESP_24XX:
2809 ql_log(ql_log_warn, vha, 0x5030,
2810 "Error entry - invalid handle/queue (%04x).\n", que);
2815 * qla24xx_mbx_completion() - Process mailbox command completions.
2816 * @ha: SCSI driver HA context
2817 * @mb0: Mailbox0 register
2820 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2824 uint16_t __iomem *wptr;
2825 struct qla_hw_data *ha = vha->hw;
2826 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2828 /* Read all mbox registers? */
2829 mboxes = (1 << ha->mbx_count) - 1;
2831 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2833 mboxes = ha->mcp->in_mb;
2835 /* Load return mailbox registers. */
2836 ha->flags.mbox_int = 1;
2837 ha->mailbox_out[0] = mb0;
2839 wptr = (uint16_t __iomem *)®->mailbox1;
2841 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2843 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2851 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2852 struct abort_entry_24xx *pkt)
2854 const char func[] = "ABT_IOCB";
2856 struct srb_iocb *abt;
2858 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2862 abt = &sp->u.iocb_cmd;
2863 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2867 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2868 struct pt_ls4_request *pkt, struct req_que *req)
2871 const char func[] = "LS4_IOCB";
2872 uint16_t comp_status;
2874 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2878 comp_status = le16_to_cpu(pkt->status);
2879 sp->done(sp, comp_status);
2883 * qla24xx_process_response_queue() - Process response queue entries.
2884 * @ha: SCSI driver HA context
2886 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2887 struct rsp_que *rsp)
2889 struct sts_entry_24xx *pkt;
2890 struct qla_hw_data *ha = vha->hw;
2892 if (!ha->flags.fw_started)
2895 if (rsp->qpair->cpuid != smp_processor_id())
2896 qla_cpu_update(rsp->qpair, smp_processor_id());
2898 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2899 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2902 if (rsp->ring_index == rsp->length) {
2903 rsp->ring_index = 0;
2904 rsp->ring_ptr = rsp->ring;
2909 if (pkt->entry_status != 0) {
2910 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
2913 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2919 switch (pkt->entry_type) {
2921 qla2x00_status_entry(vha, rsp, pkt);
2923 case STATUS_CONT_TYPE:
2924 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2926 case VP_RPT_ID_IOCB_TYPE:
2927 qla24xx_report_id_acquisition(vha,
2928 (struct vp_rpt_id_entry_24xx *)pkt);
2930 case LOGINOUT_PORT_IOCB_TYPE:
2931 qla24xx_logio_entry(vha, rsp->req,
2932 (struct logio_entry_24xx *)pkt);
2935 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2938 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2940 case ABTS_RECV_24XX:
2941 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2942 /* ensure that the ATIO queue is empty */
2943 qlt_handle_abts_recv(vha, rsp,
2948 qlt_24xx_process_atio_queue(vha, 1);
2950 case ABTS_RESP_24XX:
2953 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
2955 case PT_LS4_REQUEST:
2956 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
2959 case NOTIFY_ACK_TYPE:
2960 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
2961 qlt_response_pkt_all_vps(vha, rsp,
2964 qla24xxx_nack_iocb_entry(vha, rsp->req,
2965 (struct nack_to_isp *)pkt);
2968 /* Do nothing in this case, this check is to prevent it
2969 * from falling into default case
2972 case ABORT_IOCB_TYPE:
2973 qla24xx_abort_iocb_entry(vha, rsp->req,
2974 (struct abort_entry_24xx *)pkt);
2977 qla24xx_mbx_iocb_entry(vha, rsp->req,
2978 (struct mbx_24xx_entry *)pkt);
2981 /* Type Not Supported. */
2982 ql_dbg(ql_dbg_async, vha, 0x5042,
2983 "Received unknown response pkt type %x "
2984 "entry status=%x.\n",
2985 pkt->entry_type, pkt->entry_status);
2988 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2992 /* Adjust ring index */
2993 if (IS_P3P_TYPE(ha)) {
2994 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2995 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2997 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3002 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3006 struct qla_hw_data *ha = vha->hw;
3007 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3009 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3014 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3015 RD_REG_DWORD(®->iobase_addr);
3016 WRT_REG_DWORD(®->iobase_window, 0x0001);
3017 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3018 rval == QLA_SUCCESS; cnt--) {
3020 WRT_REG_DWORD(®->iobase_window, 0x0001);
3023 rval = QLA_FUNCTION_TIMEOUT;
3025 if (rval == QLA_SUCCESS)
3029 WRT_REG_DWORD(®->iobase_window, 0x0003);
3030 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3031 rval == QLA_SUCCESS; cnt--) {
3033 WRT_REG_DWORD(®->iobase_window, 0x0003);
3036 rval = QLA_FUNCTION_TIMEOUT;
3038 if (rval != QLA_SUCCESS)
3042 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3043 ql_log(ql_log_info, vha, 0x504c,
3044 "Additional code -- 0x55AA.\n");
3047 WRT_REG_DWORD(®->iobase_window, 0x0000);
3048 RD_REG_DWORD(®->iobase_window);
3052 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3054 * @dev_id: SCSI driver HA context
3056 * Called by system whenever the host adapter generates an interrupt.
3058 * Returns handled flag.
3061 qla24xx_intr_handler(int irq, void *dev_id)
3063 scsi_qla_host_t *vha;
3064 struct qla_hw_data *ha;
3065 struct device_reg_24xx __iomem *reg;
3071 struct rsp_que *rsp;
3072 unsigned long flags;
3074 rsp = (struct rsp_que *) dev_id;
3076 ql_log(ql_log_info, NULL, 0x5059,
3077 "%s: NULL response queue pointer.\n", __func__);
3082 reg = &ha->iobase->isp24;
3085 if (unlikely(pci_channel_offline(ha->pdev)))
3088 spin_lock_irqsave(&ha->hardware_lock, flags);
3089 vha = pci_get_drvdata(ha->pdev);
3090 for (iter = 50; iter--; ) {
3091 stat = RD_REG_DWORD(®->host_status);
3092 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3094 if (stat & HSRX_RISC_PAUSED) {
3095 if (unlikely(pci_channel_offline(ha->pdev)))
3098 hccr = RD_REG_DWORD(®->hccr);
3100 ql_log(ql_log_warn, vha, 0x504b,
3101 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3104 qla2xxx_check_risc_status(vha);
3106 ha->isp_ops->fw_dump(vha, 1);
3107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3109 } else if ((stat & HSRX_RISC_INT) == 0)
3112 switch (stat & 0xff) {
3113 case INTR_ROM_MB_SUCCESS:
3114 case INTR_ROM_MB_FAILED:
3115 case INTR_MB_SUCCESS:
3116 case INTR_MB_FAILED:
3117 qla24xx_mbx_completion(vha, MSW(stat));
3118 status |= MBX_INTERRUPT;
3121 case INTR_ASYNC_EVENT:
3123 mb[1] = RD_REG_WORD(®->mailbox1);
3124 mb[2] = RD_REG_WORD(®->mailbox2);
3125 mb[3] = RD_REG_WORD(®->mailbox3);
3126 qla2x00_async_event(vha, rsp, mb);
3128 case INTR_RSP_QUE_UPDATE:
3129 case INTR_RSP_QUE_UPDATE_83XX:
3130 qla24xx_process_response_queue(vha, rsp);
3132 case INTR_ATIO_QUE_UPDATE:{
3133 unsigned long flags2;
3134 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3135 qlt_24xx_process_atio_queue(vha, 1);
3136 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3139 case INTR_ATIO_RSP_QUE_UPDATE: {
3140 unsigned long flags2;
3141 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3142 qlt_24xx_process_atio_queue(vha, 1);
3143 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3145 qla24xx_process_response_queue(vha, rsp);
3149 ql_dbg(ql_dbg_async, vha, 0x504f,
3150 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3153 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3154 RD_REG_DWORD_RELAXED(®->hccr);
3155 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3158 qla2x00_handle_mbx_completion(ha, status);
3159 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3165 qla24xx_msix_rsp_q(int irq, void *dev_id)
3167 struct qla_hw_data *ha;
3168 struct rsp_que *rsp;
3169 struct device_reg_24xx __iomem *reg;
3170 struct scsi_qla_host *vha;
3171 unsigned long flags;
3173 rsp = (struct rsp_que *) dev_id;
3175 ql_log(ql_log_info, NULL, 0x505a,
3176 "%s: NULL response queue pointer.\n", __func__);
3180 reg = &ha->iobase->isp24;
3182 spin_lock_irqsave(&ha->hardware_lock, flags);
3184 vha = pci_get_drvdata(ha->pdev);
3185 qla24xx_process_response_queue(vha, rsp);
3186 if (!ha->flags.disable_msix_handshake) {
3187 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3188 RD_REG_DWORD_RELAXED(®->hccr);
3190 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3196 qla24xx_msix_default(int irq, void *dev_id)
3198 scsi_qla_host_t *vha;
3199 struct qla_hw_data *ha;
3200 struct rsp_que *rsp;
3201 struct device_reg_24xx __iomem *reg;
3206 unsigned long flags;
3208 rsp = (struct rsp_que *) dev_id;
3210 ql_log(ql_log_info, NULL, 0x505c,
3211 "%s: NULL response queue pointer.\n", __func__);
3215 reg = &ha->iobase->isp24;
3218 spin_lock_irqsave(&ha->hardware_lock, flags);
3219 vha = pci_get_drvdata(ha->pdev);
3221 stat = RD_REG_DWORD(®->host_status);
3222 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3224 if (stat & HSRX_RISC_PAUSED) {
3225 if (unlikely(pci_channel_offline(ha->pdev)))
3228 hccr = RD_REG_DWORD(®->hccr);
3230 ql_log(ql_log_info, vha, 0x5050,
3231 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3234 qla2xxx_check_risc_status(vha);
3236 ha->isp_ops->fw_dump(vha, 1);
3237 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3239 } else if ((stat & HSRX_RISC_INT) == 0)
3242 switch (stat & 0xff) {
3243 case INTR_ROM_MB_SUCCESS:
3244 case INTR_ROM_MB_FAILED:
3245 case INTR_MB_SUCCESS:
3246 case INTR_MB_FAILED:
3247 qla24xx_mbx_completion(vha, MSW(stat));
3248 status |= MBX_INTERRUPT;
3251 case INTR_ASYNC_EVENT:
3253 mb[1] = RD_REG_WORD(®->mailbox1);
3254 mb[2] = RD_REG_WORD(®->mailbox2);
3255 mb[3] = RD_REG_WORD(®->mailbox3);
3256 qla2x00_async_event(vha, rsp, mb);
3258 case INTR_RSP_QUE_UPDATE:
3259 case INTR_RSP_QUE_UPDATE_83XX:
3260 qla24xx_process_response_queue(vha, rsp);
3262 case INTR_ATIO_QUE_UPDATE:{
3263 unsigned long flags2;
3264 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3265 qlt_24xx_process_atio_queue(vha, 1);
3266 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3269 case INTR_ATIO_RSP_QUE_UPDATE: {
3270 unsigned long flags2;
3271 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3272 qlt_24xx_process_atio_queue(vha, 1);
3273 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3275 qla24xx_process_response_queue(vha, rsp);
3279 ql_dbg(ql_dbg_async, vha, 0x5051,
3280 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3283 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3285 qla2x00_handle_mbx_completion(ha, status);
3286 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3292 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3294 struct qla_hw_data *ha;
3295 struct qla_qpair *qpair;
3296 struct device_reg_24xx __iomem *reg;
3297 unsigned long flags;
3301 ql_log(ql_log_info, NULL, 0x505b,
3302 "%s: NULL response queue pointer.\n", __func__);
3307 /* Clear the interrupt, if enabled, for this response queue */
3308 if (unlikely(!ha->flags.disable_msix_handshake)) {
3309 reg = &ha->iobase->isp24;
3310 spin_lock_irqsave(&ha->hardware_lock, flags);
3311 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3312 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3315 queue_work(ha->wq, &qpair->q_work);
3320 /* Interrupt handling helpers. */
3322 struct qla_init_msix_entry {
3324 irq_handler_t handler;
3327 static const struct qla_init_msix_entry msix_entries[] = {
3328 { "default", qla24xx_msix_default },
3329 { "rsp_q", qla24xx_msix_rsp_q },
3330 { "atio_q", qla83xx_msix_atio_q },
3331 { "qpair_multiq", qla2xxx_msix_rsp_q },
3334 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3335 { "qla2xxx (default)", qla82xx_msix_default },
3336 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3340 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3343 struct qla_msix_entry *qentry;
3344 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3345 int min_vecs = QLA_BASE_VECTORS;
3346 struct irq_affinity desc = {
3347 .pre_vectors = QLA_BASE_VECTORS,
3350 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3355 if (USER_CTRL_IRQ(ha)) {
3356 /* user wants to control IRQ setting for target mode */
3357 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3358 ha->msix_count, PCI_IRQ_MSIX);
3360 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3361 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3365 ql_log(ql_log_fatal, vha, 0x00c7,
3366 "MSI-X: Failed to enable support, "
3367 "giving up -- %d/%d.\n",
3368 ha->msix_count, ret);
3370 } else if (ret < ha->msix_count) {
3371 ql_log(ql_log_warn, vha, 0x00c6,
3372 "MSI-X: Failed to enable support "
3373 "with %d vectors, using %d vectors.\n",
3374 ha->msix_count, ret);
3375 ha->msix_count = ret;
3376 /* Recalculate queue values */
3377 if (ha->mqiobase && ql2xmqsupport) {
3378 ha->max_req_queues = ha->msix_count - 1;
3380 /* ATIOQ needs 1 vector. That's 1 less QPair */
3381 if (QLA_TGT_MODE_ENABLED())
3382 ha->max_req_queues--;
3384 ha->max_rsp_queues = ha->max_req_queues;
3386 ha->max_qpairs = ha->max_req_queues - 1;
3387 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3388 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3391 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3392 ha->msix_count, GFP_KERNEL);
3393 if (!ha->msix_entries) {
3394 ql_log(ql_log_fatal, vha, 0x00c8,
3395 "Failed to allocate memory for ha->msix_entries.\n");
3399 ha->flags.msix_enabled = 1;
3401 for (i = 0; i < ha->msix_count; i++) {
3402 qentry = &ha->msix_entries[i];
3403 qentry->vector = pci_irq_vector(ha->pdev, i);
3405 qentry->have_irq = 0;
3407 qentry->handle = NULL;
3410 /* Enable MSI-X vectors for the base queue */
3411 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3412 qentry = &ha->msix_entries[i];
3413 qentry->handle = rsp;
3415 scnprintf(qentry->name, sizeof(qentry->name),
3416 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3417 if (IS_P3P_TYPE(ha))
3418 ret = request_irq(qentry->vector,
3419 qla82xx_msix_entries[i].handler,
3420 0, qla82xx_msix_entries[i].name, rsp);
3422 ret = request_irq(qentry->vector,
3423 msix_entries[i].handler,
3424 0, qentry->name, rsp);
3426 goto msix_register_fail;
3427 qentry->have_irq = 1;
3432 * If target mode is enable, also request the vector for the ATIO
3435 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3436 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3438 qentry->handle = rsp;
3439 scnprintf(qentry->name, sizeof(qentry->name),
3440 "qla2xxx%lu_%s", vha->host_no,
3441 msix_entries[QLA_ATIO_VECTOR].name);
3443 ret = request_irq(qentry->vector,
3444 msix_entries[QLA_ATIO_VECTOR].handler,
3445 0, qentry->name, rsp);
3446 qentry->have_irq = 1;
3451 ql_log(ql_log_fatal, vha, 0x00cb,
3452 "MSI-X: unable to register handler -- %x/%d.\n",
3453 qentry->vector, ret);
3454 qla2x00_free_irqs(vha);
3459 /* Enable MSI-X vector for response queue update for queue 0 */
3460 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3461 if (ha->msixbase && ha->mqiobase &&
3462 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3467 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3470 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3471 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3472 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3473 ql_dbg(ql_dbg_init, vha, 0x0055,
3474 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3475 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3482 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3484 int ret = QLA_FUNCTION_FAILED;
3485 device_reg_t *reg = ha->iobase;
3486 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3488 /* If possible, enable MSI-X. */
3489 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3490 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3494 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3495 (ha->pdev->subsystem_device == 0x7040 ||
3496 ha->pdev->subsystem_device == 0x7041 ||
3497 ha->pdev->subsystem_device == 0x1705)) {
3498 ql_log(ql_log_warn, vha, 0x0034,
3499 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3500 ha->pdev->subsystem_vendor,
3501 ha->pdev->subsystem_device);
3505 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3506 ql_log(ql_log_warn, vha, 0x0035,
3507 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3508 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3512 ret = qla24xx_enable_msix(ha, rsp);
3514 ql_dbg(ql_dbg_init, vha, 0x0036,
3515 "MSI-X: Enabled (0x%X, 0x%X).\n",
3516 ha->chip_revision, ha->fw_attributes);
3517 goto clear_risc_ints;
3522 ql_log(ql_log_info, vha, 0x0037,
3523 "Falling back-to MSI mode -%d.\n", ret);
3525 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3526 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3530 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3532 ql_dbg(ql_dbg_init, vha, 0x0038,
3534 ha->flags.msi_enabled = 1;
3536 ql_log(ql_log_warn, vha, 0x0039,
3537 "Falling back-to INTa mode -- %d.\n", ret);
3540 /* Skip INTx on ISP82xx. */
3541 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3542 return QLA_FUNCTION_FAILED;
3544 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3545 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3546 QLA2XXX_DRIVER_NAME, rsp);
3548 ql_log(ql_log_warn, vha, 0x003a,
3549 "Failed to reserve interrupt %d already in use.\n",
3552 } else if (!ha->flags.msi_enabled) {
3553 ql_dbg(ql_dbg_init, vha, 0x0125,
3554 "INTa mode: Enabled.\n");
3555 ha->flags.mr_intr_valid = 1;
3559 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3562 spin_lock_irq(&ha->hardware_lock);
3563 WRT_REG_WORD(®->isp.semaphore, 0);
3564 spin_unlock_irq(&ha->hardware_lock);
3571 qla2x00_free_irqs(scsi_qla_host_t *vha)
3573 struct qla_hw_data *ha = vha->hw;
3574 struct rsp_que *rsp;
3575 struct qla_msix_entry *qentry;
3579 * We need to check that ha->rsp_q_map is valid in case we are called
3580 * from a probe failure context.
3582 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3584 rsp = ha->rsp_q_map[0];
3586 if (ha->flags.msix_enabled) {
3587 for (i = 0; i < ha->msix_count; i++) {
3588 qentry = &ha->msix_entries[i];
3589 if (qentry->have_irq) {
3590 irq_set_affinity_notifier(qentry->vector, NULL);
3591 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3594 kfree(ha->msix_entries);
3595 ha->msix_entries = NULL;
3596 ha->flags.msix_enabled = 0;
3597 ql_dbg(ql_dbg_init, vha, 0x0042,
3598 "Disabled MSI-X.\n");
3600 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3604 pci_free_irq_vectors(ha->pdev);
3607 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3608 struct qla_msix_entry *msix, int vector_type)
3610 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3611 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3614 scnprintf(msix->name, sizeof(msix->name),
3615 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3616 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3618 ql_log(ql_log_fatal, vha, 0x00e6,
3619 "MSI-X: Unable to register handler -- %x/%d.\n",
3624 msix->handle = qpair;