2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 static void qla_irq_affinity_notify(struct irq_affinity_notify *,
23 static void qla_irq_affinity_release(struct kref *);
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29 * @dev_id: SCSI driver HA context
31 * Called by system whenever the host adapter generates an interrupt.
33 * Returns handled flag.
36 qla2100_intr_handler(int irq, void *dev_id)
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
48 rsp = (struct rsp_que *) dev_id;
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
56 reg = &ha->iobase->isp;
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
70 * Issue a "HARD" reset in order for the RISC interrupt
71 * bit to be cleared. Schedule a big hammer to get
72 * out of the RISC PAUSED state.
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
87 /* Get mailbox data. */
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
103 /* Release mailbox registers. */
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
107 qla2x00_process_response_queue(rsp);
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
116 return (IRQ_HANDLED);
120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 /* Check for PCI disconnection */
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128 * Schedule this (only once) on the default system
129 * workqueue so that all the adapter workqueues and the
130 * DPC thread can be shutdown cleanly.
132 schedule_work(&vha->hw->board_disable);
140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
148 * @dev_id: SCSI driver HA context
150 * Called by system whenever the host adapter generates an interrupt.
152 * Returns handled flag.
155 qla2300_intr_handler(int irq, void *dev_id)
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
165 struct qla_hw_data *ha;
168 rsp = (struct rsp_que *) dev_id;
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
176 reg = &ha->iobase->isp;
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
189 hccr = RD_REG_WORD(®->hccr);
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
201 * Issue a "HARD" reset in order for the RISC
202 * interrupt bit to be cleared. Schedule a big
203 * hammer to get out of the RISC PAUSED state.
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
211 } else if ((stat & HSR_RISC_INT) == 0)
214 switch (stat & 0xff) {
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
222 /* Release mailbox registers. */
223 WRT_REG_WORD(®->semaphore, 0);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
233 qla2x00_process_response_queue(rsp);
236 mb[0] = MBA_CMPLT_1_16BIT;
238 qla2x00_async_event(vha, rsp, mb);
241 mb[0] = MBA_SCSI_COMPLETION;
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
257 return (IRQ_HANDLED);
261 * qla2x00_mbx_completion() - Process mailbox command completions.
262 * @ha: SCSI driver HA context
263 * @mb0: Mailbox0 register
266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274 /* Read all mbox registers? */
275 mboxes = (1 << ha->mbx_count) - 1;
277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
279 mboxes = ha->mcp->in_mb;
281 /* Load return mailbox registers. */
282 ha->flags.mbox_int = 1;
283 ha->mailbox_out[0] = mb0;
285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
287 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
288 if (IS_QLA2200(ha) && cnt == 8)
289 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
290 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
291 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
292 else if (mboxes & BIT_0)
293 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
301 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
303 static char *event[] =
304 { "Complete", "Request Notification", "Time Extension" };
306 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
307 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
308 uint16_t __iomem *wptr;
309 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
311 /* Seed data -- mailbox1 -> mailbox7. */
312 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
313 wptr = (uint16_t __iomem *)®24->mailbox1;
314 else if (IS_QLA8044(vha->hw))
315 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
319 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
320 mb[cnt] = RD_REG_WORD(wptr);
322 ql_dbg(ql_dbg_async, vha, 0x5021,
323 "Inter-Driver Communication %s -- "
324 "%04x %04x %04x %04x %04x %04x %04x.\n",
325 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
326 mb[4], mb[5], mb[6]);
328 /* Handle IDC Error completion case. */
329 case MBA_IDC_COMPLETE:
331 vha->hw->flags.idc_compl_status = 1;
332 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
333 complete(&vha->hw->dcbx_comp);
338 /* Acknowledgement needed? [Notify && non-zero timeout]. */
339 timeout = (descr >> 8) & 0xf;
340 ql_dbg(ql_dbg_async, vha, 0x5022,
341 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
342 vha->host_no, event[aen & 0xff], timeout);
346 rval = qla2x00_post_idc_ack_work(vha, mb);
347 if (rval != QLA_SUCCESS)
348 ql_log(ql_log_warn, vha, 0x5023,
349 "IDC failed to post ACK.\n");
351 case MBA_IDC_TIME_EXT:
352 vha->hw->idc_extend_tmo = descr;
353 ql_dbg(ql_dbg_async, vha, 0x5087,
354 "%lu Inter-Driver Communication %s -- "
355 "Extend timeout by=%d.\n",
356 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
363 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
365 static const char *const link_speeds[] = {
366 "1", "2", "?", "4", "8", "16", "32", "10"
368 #define QLA_LAST_SPEED 7
370 if (IS_QLA2100(ha) || IS_QLA2200(ha))
371 return link_speeds[0];
372 else if (speed == 0x13)
373 return link_speeds[QLA_LAST_SPEED];
374 else if (speed < QLA_LAST_SPEED)
375 return link_speeds[speed];
377 return link_speeds[LS_UNKNOWN];
381 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
383 struct qla_hw_data *ha = vha->hw;
386 * 8200 AEN Interpretation:
388 * mb[1] = AEN Reason code
389 * mb[2] = LSW of Peg-Halt Status-1 Register
390 * mb[6] = MSW of Peg-Halt Status-1 Register
391 * mb[3] = LSW of Peg-Halt Status-2 register
392 * mb[7] = MSW of Peg-Halt Status-2 register
393 * mb[4] = IDC Device-State Register value
394 * mb[5] = IDC Driver-Presence Register value
396 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
397 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
398 mb[0], mb[1], mb[2], mb[6]);
399 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
400 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
401 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
403 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
404 IDC_HEARTBEAT_FAILURE)) {
405 ha->flags.nic_core_hung = 1;
406 ql_log(ql_log_warn, vha, 0x5060,
407 "83XX: F/W Error Reported: Check if reset required.\n");
409 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
410 uint32_t protocol_engine_id, fw_err_code, err_level;
413 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
414 * - PEG-Halt Status-1 Register:
415 * (LSW = mb[2], MSW = mb[6])
416 * Bits 0-7 = protocol-engine ID
417 * Bits 8-28 = f/w error code
418 * Bits 29-31 = Error-level
419 * Error-level 0x1 = Non-Fatal error
420 * Error-level 0x2 = Recoverable Fatal error
421 * Error-level 0x4 = UnRecoverable Fatal error
422 * - PEG-Halt Status-2 Register:
423 * (LSW = mb[3], MSW = mb[7])
425 protocol_engine_id = (mb[2] & 0xff);
426 fw_err_code = (((mb[2] & 0xff00) >> 8) |
427 ((mb[6] & 0x1fff) << 8));
428 err_level = ((mb[6] & 0xe000) >> 13);
429 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
430 "Register: protocol_engine_id=0x%x "
431 "fw_err_code=0x%x err_level=0x%x.\n",
432 protocol_engine_id, fw_err_code, err_level);
433 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
434 "Register: 0x%x%x.\n", mb[7], mb[3]);
435 if (err_level == ERR_LEVEL_NON_FATAL) {
436 ql_log(ql_log_warn, vha, 0x5063,
437 "Not a fatal error, f/w has recovered "
439 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
440 ql_log(ql_log_fatal, vha, 0x5064,
441 "Recoverable Fatal error: Chip reset "
443 qla83xx_schedule_work(vha,
444 QLA83XX_NIC_CORE_RESET);
445 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
446 ql_log(ql_log_fatal, vha, 0x5065,
447 "Unrecoverable Fatal error: Set FAILED "
448 "state, reboot required.\n");
449 qla83xx_schedule_work(vha,
450 QLA83XX_NIC_CORE_UNRECOVERABLE);
454 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
455 uint16_t peg_fw_state, nw_interface_link_up;
456 uint16_t nw_interface_signal_detect, sfp_status;
457 uint16_t htbt_counter, htbt_monitor_enable;
458 uint16_t sfp_additonal_info, sfp_multirate;
459 uint16_t sfp_tx_fault, link_speed, dcbx_status;
462 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
463 * - PEG-to-FC Status Register:
464 * (LSW = mb[2], MSW = mb[6])
465 * Bits 0-7 = Peg-Firmware state
466 * Bit 8 = N/W Interface Link-up
467 * Bit 9 = N/W Interface signal detected
468 * Bits 10-11 = SFP Status
469 * SFP Status 0x0 = SFP+ transceiver not expected
470 * SFP Status 0x1 = SFP+ transceiver not present
471 * SFP Status 0x2 = SFP+ transceiver invalid
472 * SFP Status 0x3 = SFP+ transceiver present and
474 * Bits 12-14 = Heartbeat Counter
475 * Bit 15 = Heartbeat Monitor Enable
476 * Bits 16-17 = SFP Additional Info
477 * SFP info 0x0 = Unregocnized transceiver for
479 * SFP info 0x1 = SFP+ brand validation failed
480 * SFP info 0x2 = SFP+ speed validation failed
481 * SFP info 0x3 = SFP+ access error
482 * Bit 18 = SFP Multirate
483 * Bit 19 = SFP Tx Fault
484 * Bits 20-22 = Link Speed
485 * Bits 23-27 = Reserved
486 * Bits 28-30 = DCBX Status
487 * DCBX Status 0x0 = DCBX Disabled
488 * DCBX Status 0x1 = DCBX Enabled
489 * DCBX Status 0x2 = DCBX Exchange error
492 peg_fw_state = (mb[2] & 0x00ff);
493 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
494 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
495 sfp_status = ((mb[2] & 0x0c00) >> 10);
496 htbt_counter = ((mb[2] & 0x7000) >> 12);
497 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
498 sfp_additonal_info = (mb[6] & 0x0003);
499 sfp_multirate = ((mb[6] & 0x0004) >> 2);
500 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
501 link_speed = ((mb[6] & 0x0070) >> 4);
502 dcbx_status = ((mb[6] & 0x7000) >> 12);
504 ql_log(ql_log_warn, vha, 0x5066,
505 "Peg-to-Fc Status Register:\n"
506 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
507 "nw_interface_signal_detect=0x%x"
508 "\nsfp_statis=0x%x.\n ", peg_fw_state,
509 nw_interface_link_up, nw_interface_signal_detect,
511 ql_log(ql_log_warn, vha, 0x5067,
512 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
513 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
514 htbt_counter, htbt_monitor_enable,
515 sfp_additonal_info, sfp_multirate);
516 ql_log(ql_log_warn, vha, 0x5068,
517 "sfp_tx_fault=0x%x, link_state=0x%x, "
518 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
521 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
524 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
525 ql_log(ql_log_warn, vha, 0x5069,
526 "Heartbeat Failure encountered, chip reset "
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
533 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
534 ql_log(ql_log_info, vha, 0x506a,
535 "IDC Device-State changed = 0x%x.\n", mb[4]);
536 if (ha->flags.nic_core_reset_owner)
538 qla83xx_schedule_work(vha, MBA_IDC_AEN);
543 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
545 struct qla_hw_data *ha = vha->hw;
554 spin_lock_irqsave(&ha->vport_slock, flags);
555 list_for_each_entry(vp, &ha->vp_list, list) {
556 vp_did = vp->d_id.b24;
557 if (vp_did == rscn_entry) {
562 spin_unlock_irqrestore(&ha->vport_slock, flags);
567 static inline fc_port_t *
568 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
572 list_for_each_entry(fcport, &vha->vp_fcports, list)
573 if (fcport->loop_id == loop_id)
579 * qla2x00_async_event() - Process aynchronous events.
580 * @ha: SCSI driver HA context
581 * @mb: Mailbox registers (0 - 3)
584 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
589 struct qla_hw_data *ha = vha->hw;
590 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
591 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
592 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
593 uint32_t rscn_entry, host_pid;
595 fc_port_t *fcport = NULL;
597 /* Setup to process RIO completion. */
599 if (IS_CNA_CAPABLE(ha))
602 case MBA_SCSI_COMPLETION:
603 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
606 case MBA_CMPLT_1_16BIT:
609 mb[0] = MBA_SCSI_COMPLETION;
611 case MBA_CMPLT_2_16BIT:
615 mb[0] = MBA_SCSI_COMPLETION;
617 case MBA_CMPLT_3_16BIT:
622 mb[0] = MBA_SCSI_COMPLETION;
624 case MBA_CMPLT_4_16BIT:
628 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
630 mb[0] = MBA_SCSI_COMPLETION;
632 case MBA_CMPLT_5_16BIT:
636 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
637 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
639 mb[0] = MBA_SCSI_COMPLETION;
641 case MBA_CMPLT_2_32BIT:
642 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
643 handles[1] = le32_to_cpu(
644 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
645 RD_MAILBOX_REG(ha, reg, 6));
647 mb[0] = MBA_SCSI_COMPLETION;
654 case MBA_SCSI_COMPLETION: /* Fast Post */
655 if (!vha->flags.online)
658 for (cnt = 0; cnt < handle_cnt; cnt++)
659 qla2x00_process_completed_request(vha, rsp->req,
663 case MBA_RESET: /* Reset */
664 ql_dbg(ql_dbg_async, vha, 0x5002,
665 "Asynchronous RESET.\n");
667 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
670 case MBA_SYSTEM_ERR: /* System Error */
671 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
672 RD_REG_WORD(®24->mailbox7) : 0;
673 ql_log(ql_log_warn, vha, 0x5003,
674 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
675 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
677 ha->isp_ops->fw_dump(vha, 1);
679 if (IS_FWI2_CAPABLE(ha)) {
680 if (mb[1] == 0 && mb[2] == 0) {
681 ql_log(ql_log_fatal, vha, 0x5004,
682 "Unrecoverable Hardware Error: adapter "
683 "marked OFFLINE!\n");
684 vha->flags.online = 0;
685 vha->device_flags |= DFLG_DEV_FAILED;
687 /* Check to see if MPI timeout occurred */
688 if ((mbx & MBX_3) && (ha->port_no == 0))
689 set_bit(MPI_RESET_NEEDED,
692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
694 } else if (mb[1] == 0) {
695 ql_log(ql_log_fatal, vha, 0x5005,
696 "Unrecoverable Hardware Error: adapter marked "
698 vha->flags.online = 0;
699 vha->device_flags |= DFLG_DEV_FAILED;
701 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
704 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
705 ql_log(ql_log_warn, vha, 0x5006,
706 "ISP Request Transfer Error (%x).\n", mb[1]);
708 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
711 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
712 ql_log(ql_log_warn, vha, 0x5007,
713 "ISP Response Transfer Error (%x).\n", mb[1]);
715 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
718 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
719 ql_dbg(ql_dbg_async, vha, 0x5008,
720 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
723 case MBA_LOOP_INIT_ERR:
724 ql_log(ql_log_warn, vha, 0x5090,
725 "LOOP INIT ERROR (%x).\n", mb[1]);
726 ha->isp_ops->fw_dump(vha, 1);
727 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
730 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
731 ql_dbg(ql_dbg_async, vha, 0x5009,
732 "LIP occurred (%x).\n", mb[1]);
734 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
735 atomic_set(&vha->loop_state, LOOP_DOWN);
736 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
737 qla2x00_mark_all_devices_lost(vha, 1);
741 atomic_set(&vha->vp_state, VP_FAILED);
742 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
745 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
746 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
748 vha->flags.management_server_logged_in = 0;
749 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
752 case MBA_LOOP_UP: /* Loop Up Event */
753 if (IS_QLA2100(ha) || IS_QLA2200(ha))
754 ha->link_data_rate = PORT_SPEED_1GB;
756 ha->link_data_rate = mb[1];
758 ql_log(ql_log_info, vha, 0x500a,
759 "LOOP UP detected (%s Gbps).\n",
760 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
762 vha->flags.management_server_logged_in = 0;
763 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
766 case MBA_LOOP_DOWN: /* Loop Down Event */
767 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
768 ? RD_REG_WORD(®24->mailbox4) : 0;
769 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
771 ql_log(ql_log_info, vha, 0x500b,
772 "LOOP DOWN detected (%x %x %x %x).\n",
773 mb[1], mb[2], mb[3], mbx);
775 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
776 atomic_set(&vha->loop_state, LOOP_DOWN);
777 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
779 * In case of loop down, restore WWPN from
780 * NVRAM in case of FA-WWPN capable ISP
781 * Restore for Physical Port only
784 if (ha->flags.fawwpn_enabled) {
785 void *wwpn = ha->init_cb->port_name;
786 memcpy(vha->port_name, wwpn, WWN_SIZE);
787 fc_host_port_name(vha->host) =
788 wwn_to_u64(vha->port_name);
789 ql_dbg(ql_dbg_init + ql_dbg_verbose,
790 vha, 0x0144, "LOOP DOWN detected,"
791 "restore WWPN %016llx\n",
792 wwn_to_u64(vha->port_name));
795 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
798 vha->device_flags |= DFLG_NO_CABLE;
799 qla2x00_mark_all_devices_lost(vha, 1);
803 atomic_set(&vha->vp_state, VP_FAILED);
804 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
807 vha->flags.management_server_logged_in = 0;
808 ha->link_data_rate = PORT_SPEED_UNKNOWN;
809 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
812 case MBA_LIP_RESET: /* LIP reset occurred */
813 ql_dbg(ql_dbg_async, vha, 0x500c,
814 "LIP reset occurred (%x).\n", mb[1]);
816 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
817 atomic_set(&vha->loop_state, LOOP_DOWN);
818 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
819 qla2x00_mark_all_devices_lost(vha, 1);
823 atomic_set(&vha->vp_state, VP_FAILED);
824 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
827 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
829 ha->operating_mode = LOOP;
830 vha->flags.management_server_logged_in = 0;
831 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
834 /* case MBA_DCBX_COMPLETE: */
835 case MBA_POINT_TO_POINT: /* Point-to-Point */
839 if (IS_CNA_CAPABLE(ha)) {
840 ql_dbg(ql_dbg_async, vha, 0x500d,
841 "DCBX Completed -- %04x %04x %04x.\n",
842 mb[1], mb[2], mb[3]);
843 if (ha->notify_dcbx_comp && !vha->vp_idx)
844 complete(&ha->dcbx_comp);
847 ql_dbg(ql_dbg_async, vha, 0x500e,
848 "Asynchronous P2P MODE received.\n");
851 * Until there's a transition from loop down to loop up, treat
852 * this as loop down only.
854 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
855 atomic_set(&vha->loop_state, LOOP_DOWN);
856 if (!atomic_read(&vha->loop_down_timer))
857 atomic_set(&vha->loop_down_timer,
859 qla2x00_mark_all_devices_lost(vha, 1);
863 atomic_set(&vha->vp_state, VP_FAILED);
864 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
867 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
868 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
870 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
871 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
873 ha->flags.gpsc_supported = 1;
874 vha->flags.management_server_logged_in = 0;
877 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
881 ql_dbg(ql_dbg_async, vha, 0x500f,
882 "Configuration change detected: value=%x.\n", mb[1]);
884 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
885 atomic_set(&vha->loop_state, LOOP_DOWN);
886 if (!atomic_read(&vha->loop_down_timer))
887 atomic_set(&vha->loop_down_timer,
889 qla2x00_mark_all_devices_lost(vha, 1);
893 atomic_set(&vha->vp_state, VP_FAILED);
894 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
897 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
898 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
901 case MBA_PORT_UPDATE: /* Port database update */
903 * Handle only global and vn-port update events
906 * mb[1] = N_Port handle of changed port
907 * OR 0xffff for global event
908 * mb[2] = New login state
909 * 7 = Port logged out
910 * mb[3] = LSB is vp_idx, 0xff = all vps
912 * Skip processing if:
913 * Event is global, vp_idx is NOT all vps,
914 * vp_idx does not match
915 * Event is not global, vp_idx does not match
917 if (IS_QLA2XXX_MIDTYPE(ha) &&
918 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
919 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
923 ql_dbg(ql_dbg_async, vha, 0x5010,
924 "Port %s %04x %04x %04x.\n",
925 mb[1] == 0xffff ? "unavailable" : "logout",
926 mb[1], mb[2], mb[3]);
929 goto global_port_update;
932 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
935 if (atomic_read(&fcport->state) != FCS_ONLINE)
937 ql_dbg(ql_dbg_async, vha, 0x508a,
938 "Marking port lost loopid=%04x portid=%06x.\n",
939 fcport->loop_id, fcport->d_id.b24);
940 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
944 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
945 atomic_set(&vha->loop_state, LOOP_DOWN);
946 atomic_set(&vha->loop_down_timer,
948 vha->device_flags |= DFLG_NO_CABLE;
949 qla2x00_mark_all_devices_lost(vha, 1);
953 atomic_set(&vha->vp_state, VP_FAILED);
954 fc_vport_set_state(vha->fc_vport,
956 qla2x00_mark_all_devices_lost(vha, 1);
959 vha->flags.management_server_logged_in = 0;
960 ha->link_data_rate = PORT_SPEED_UNKNOWN;
965 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
966 * event etc. earlier indicating loop is down) then process
967 * it. Otherwise ignore it and Wait for RSCN to come in.
969 atomic_set(&vha->loop_down_timer, 0);
970 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
971 atomic_read(&vha->loop_state) != LOOP_DEAD) {
972 ql_dbg(ql_dbg_async, vha, 0x5011,
973 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
974 mb[1], mb[2], mb[3]);
976 qlt_async_event(mb[0], vha, mb);
980 ql_dbg(ql_dbg_async, vha, 0x5012,
981 "Port database changed %04x %04x %04x.\n",
982 mb[1], mb[2], mb[3]);
985 * Mark all devices as missing so we will login again.
987 atomic_set(&vha->loop_state, LOOP_UP);
989 qla2x00_mark_all_devices_lost(vha, 1);
991 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
992 set_bit(SCR_PENDING, &vha->dpc_flags);
994 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
995 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
996 set_bit(VP_CONFIG_OK, &vha->vp_flags);
998 qlt_async_event(mb[0], vha, mb);
1001 case MBA_RSCN_UPDATE: /* State Change Registration */
1002 /* Check if the Vport has issued a SCR */
1003 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1005 /* Only handle SCNs for our Vport index. */
1006 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1009 ql_dbg(ql_dbg_async, vha, 0x5013,
1010 "RSCN database changed -- %04x %04x %04x.\n",
1011 mb[1], mb[2], mb[3]);
1013 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1014 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1015 | vha->d_id.b.al_pa;
1016 if (rscn_entry == host_pid) {
1017 ql_dbg(ql_dbg_async, vha, 0x5014,
1018 "Ignoring RSCN update to local host "
1019 "port ID (%06x).\n", host_pid);
1023 /* Ignore reserved bits from RSCN-payload. */
1024 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1026 /* Skip RSCNs for virtual ports on the same physical port */
1027 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1031 * Search for the rport related to this RSCN entry and mark it
1034 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1035 if (atomic_read(&fcport->state) != FCS_ONLINE)
1037 if (fcport->d_id.b24 == rscn_entry) {
1038 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1043 atomic_set(&vha->loop_down_timer, 0);
1044 vha->flags.management_server_logged_in = 0;
1046 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1047 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1048 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1051 /* case MBA_RIO_RESPONSE: */
1052 case MBA_ZIO_RESPONSE:
1053 ql_dbg(ql_dbg_async, vha, 0x5015,
1054 "[R|Z]IO update completion.\n");
1056 if (IS_FWI2_CAPABLE(ha))
1057 qla24xx_process_response_queue(vha, rsp);
1059 qla2x00_process_response_queue(rsp);
1062 case MBA_DISCARD_RND_FRAME:
1063 ql_dbg(ql_dbg_async, vha, 0x5016,
1064 "Discard RND Frame -- %04x %04x %04x.\n",
1065 mb[1], mb[2], mb[3]);
1068 case MBA_TRACE_NOTIFICATION:
1069 ql_dbg(ql_dbg_async, vha, 0x5017,
1070 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1073 case MBA_ISP84XX_ALERT:
1074 ql_dbg(ql_dbg_async, vha, 0x5018,
1075 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1076 mb[1], mb[2], mb[3]);
1078 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1080 case A84_PANIC_RECOVERY:
1081 ql_log(ql_log_info, vha, 0x5019,
1082 "Alert 84XX: panic recovery %04x %04x.\n",
1085 case A84_OP_LOGIN_COMPLETE:
1086 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1087 ql_log(ql_log_info, vha, 0x501a,
1088 "Alert 84XX: firmware version %x.\n",
1089 ha->cs84xx->op_fw_version);
1091 case A84_DIAG_LOGIN_COMPLETE:
1092 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1093 ql_log(ql_log_info, vha, 0x501b,
1094 "Alert 84XX: diagnostic firmware version %x.\n",
1095 ha->cs84xx->diag_fw_version);
1097 case A84_GOLD_LOGIN_COMPLETE:
1098 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1099 ha->cs84xx->fw_update = 1;
1100 ql_log(ql_log_info, vha, 0x501c,
1101 "Alert 84XX: gold firmware version %x.\n",
1102 ha->cs84xx->gold_fw_version);
1105 ql_log(ql_log_warn, vha, 0x501d,
1106 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1107 mb[1], mb[2], mb[3]);
1109 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1111 case MBA_DCBX_START:
1112 ql_dbg(ql_dbg_async, vha, 0x501e,
1113 "DCBX Started -- %04x %04x %04x.\n",
1114 mb[1], mb[2], mb[3]);
1116 case MBA_DCBX_PARAM_UPDATE:
1117 ql_dbg(ql_dbg_async, vha, 0x501f,
1118 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1119 mb[1], mb[2], mb[3]);
1121 case MBA_FCF_CONF_ERR:
1122 ql_dbg(ql_dbg_async, vha, 0x5020,
1123 "FCF Configuration Error -- %04x %04x %04x.\n",
1124 mb[1], mb[2], mb[3]);
1126 case MBA_IDC_NOTIFY:
1127 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1128 mb[4] = RD_REG_WORD(®24->mailbox4);
1129 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1130 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1131 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1132 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1134 * Extend loop down timer since port is active.
1136 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1137 atomic_set(&vha->loop_down_timer,
1139 qla2xxx_wake_dpc(vha);
1142 case MBA_IDC_COMPLETE:
1143 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1144 complete(&ha->lb_portup_comp);
1146 case MBA_IDC_TIME_EXT:
1147 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1149 qla81xx_idc_event(vha, mb[0], mb[1]);
1153 mb[4] = RD_REG_WORD(®24->mailbox4);
1154 mb[5] = RD_REG_WORD(®24->mailbox5);
1155 mb[6] = RD_REG_WORD(®24->mailbox6);
1156 mb[7] = RD_REG_WORD(®24->mailbox7);
1157 qla83xx_handle_8200_aen(vha, mb);
1160 case MBA_DPORT_DIAGNOSTICS:
1161 ql_dbg(ql_dbg_async, vha, 0x5052,
1162 "D-Port Diagnostics: %04x result=%s\n",
1164 mb[1] == 0 ? "start" :
1165 mb[1] == 1 ? "done (pass)" :
1166 mb[1] == 2 ? "done (error)" : "other");
1169 case MBA_TEMPERATURE_ALERT:
1170 ql_dbg(ql_dbg_async, vha, 0x505e,
1171 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1173 schedule_work(&ha->board_disable);
1177 ql_dbg(ql_dbg_async, vha, 0x5057,
1178 "Unknown AEN:%04x %04x %04x %04x\n",
1179 mb[0], mb[1], mb[2], mb[3]);
1182 qlt_async_event(mb[0], vha, mb);
1184 if (!vha->vp_idx && ha->num_vhosts)
1185 qla2x00_alert_all_vps(rsp, mb);
1189 * qla2x00_process_completed_request() - Process a Fast Post response.
1190 * @ha: SCSI driver HA context
1194 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1195 struct req_que *req, uint32_t index)
1198 struct qla_hw_data *ha = vha->hw;
1200 /* Validate handle. */
1201 if (index >= req->num_outstanding_cmds) {
1202 ql_log(ql_log_warn, vha, 0x3014,
1203 "Invalid SCSI command index (%x).\n", index);
1205 if (IS_P3P_TYPE(ha))
1206 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1208 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1212 sp = req->outstanding_cmds[index];
1214 /* Free outstanding command slot. */
1215 req->outstanding_cmds[index] = NULL;
1217 /* Save ISP completion status */
1218 sp->done(ha, sp, DID_OK << 16);
1220 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1222 if (IS_P3P_TYPE(ha))
1223 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1225 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1230 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1231 struct req_que *req, void *iocb)
1233 struct qla_hw_data *ha = vha->hw;
1234 sts_entry_t *pkt = iocb;
1238 index = LSW(pkt->handle);
1239 if (index >= req->num_outstanding_cmds) {
1240 ql_log(ql_log_warn, vha, 0x5031,
1241 "Invalid command index (%x).\n", index);
1242 if (IS_P3P_TYPE(ha))
1243 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1245 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1248 sp = req->outstanding_cmds[index];
1250 ql_log(ql_log_warn, vha, 0x5032,
1251 "Invalid completion handle (%x) -- timed-out.\n", index);
1254 if (sp->handle != index) {
1255 ql_log(ql_log_warn, vha, 0x5033,
1256 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1260 req->outstanding_cmds[index] = NULL;
1267 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1268 struct mbx_entry *mbx)
1270 const char func[] = "MBX-IOCB";
1274 struct srb_iocb *lio;
1278 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1282 lio = &sp->u.iocb_cmd;
1284 fcport = sp->fcport;
1285 data = lio->u.logio.data;
1287 data[0] = MBS_COMMAND_ERROR;
1288 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1289 QLA_LOGIO_LOGIN_RETRIED : 0;
1290 if (mbx->entry_status) {
1291 ql_dbg(ql_dbg_async, vha, 0x5043,
1292 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1293 "entry-status=%x status=%x state-flag=%x "
1294 "status-flags=%x.\n", type, sp->handle,
1295 fcport->d_id.b.domain, fcport->d_id.b.area,
1296 fcport->d_id.b.al_pa, mbx->entry_status,
1297 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1298 le16_to_cpu(mbx->status_flags));
1300 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1301 (uint8_t *)mbx, sizeof(*mbx));
1306 status = le16_to_cpu(mbx->status);
1307 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1308 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1310 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1311 ql_dbg(ql_dbg_async, vha, 0x5045,
1312 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1313 type, sp->handle, fcport->d_id.b.domain,
1314 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1315 le16_to_cpu(mbx->mb1));
1317 data[0] = MBS_COMMAND_COMPLETE;
1318 if (sp->type == SRB_LOGIN_CMD) {
1319 fcport->port_type = FCT_TARGET;
1320 if (le16_to_cpu(mbx->mb1) & BIT_0)
1321 fcport->port_type = FCT_INITIATOR;
1322 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1323 fcport->flags |= FCF_FCP2_DEVICE;
1328 data[0] = le16_to_cpu(mbx->mb0);
1330 case MBS_PORT_ID_USED:
1331 data[1] = le16_to_cpu(mbx->mb1);
1333 case MBS_LOOP_ID_USED:
1336 data[0] = MBS_COMMAND_ERROR;
1340 ql_log(ql_log_warn, vha, 0x5046,
1341 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1342 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1343 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1344 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1345 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1346 le16_to_cpu(mbx->mb7));
1349 sp->done(vha, sp, 0);
1353 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1354 sts_entry_t *pkt, int iocb_type)
1356 const char func[] = "CT_IOCB";
1359 struct fc_bsg_job *bsg_job;
1360 uint16_t comp_status;
1363 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1367 bsg_job = sp->u.bsg_job;
1369 type = "ct pass-through";
1371 comp_status = le16_to_cpu(pkt->comp_status);
1373 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1374 * fc payload to the caller
1376 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1377 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1379 if (comp_status != CS_COMPLETE) {
1380 if (comp_status == CS_DATA_UNDERRUN) {
1382 bsg_job->reply->reply_payload_rcv_len =
1383 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1385 ql_log(ql_log_warn, vha, 0x5048,
1386 "CT pass-through-%s error "
1387 "comp_status-status=0x%x total_byte = 0x%x.\n",
1389 bsg_job->reply->reply_payload_rcv_len);
1391 ql_log(ql_log_warn, vha, 0x5049,
1392 "CT pass-through-%s error "
1393 "comp_status-status=0x%x.\n", type, comp_status);
1394 res = DID_ERROR << 16;
1395 bsg_job->reply->reply_payload_rcv_len = 0;
1397 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1398 (uint8_t *)pkt, sizeof(*pkt));
1401 bsg_job->reply->reply_payload_rcv_len =
1402 bsg_job->reply_payload.payload_len;
1403 bsg_job->reply_len = 0;
1406 sp->done(vha, sp, res);
1410 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1411 struct sts_entry_24xx *pkt, int iocb_type)
1413 const char func[] = "ELS_CT_IOCB";
1416 struct fc_bsg_job *bsg_job;
1417 uint16_t comp_status;
1418 uint32_t fw_status[3];
1419 uint8_t* fw_sts_ptr;
1422 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1425 bsg_job = sp->u.bsg_job;
1429 case SRB_ELS_CMD_RPT:
1430 case SRB_ELS_CMD_HST:
1434 type = "ct pass-through";
1437 type = "Driver ELS logo";
1438 ql_dbg(ql_dbg_user, vha, 0x5047,
1439 "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1440 sp->done(vha, sp, 0);
1443 ql_dbg(ql_dbg_user, vha, 0x503e,
1444 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1448 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1449 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1450 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1452 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1453 * fc payload to the caller
1455 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1456 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1458 if (comp_status != CS_COMPLETE) {
1459 if (comp_status == CS_DATA_UNDERRUN) {
1461 bsg_job->reply->reply_payload_rcv_len =
1462 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1464 ql_dbg(ql_dbg_user, vha, 0x503f,
1465 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1466 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1467 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1468 le16_to_cpu(((struct els_sts_entry_24xx *)
1469 pkt)->total_byte_count));
1470 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1471 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1474 ql_dbg(ql_dbg_user, vha, 0x5040,
1475 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1476 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1477 type, sp->handle, comp_status,
1478 le16_to_cpu(((struct els_sts_entry_24xx *)
1479 pkt)->error_subcode_1),
1480 le16_to_cpu(((struct els_sts_entry_24xx *)
1481 pkt)->error_subcode_2));
1482 res = DID_ERROR << 16;
1483 bsg_job->reply->reply_payload_rcv_len = 0;
1484 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1485 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1487 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1488 (uint8_t *)pkt, sizeof(*pkt));
1492 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1493 bsg_job->reply_len = 0;
1496 sp->done(vha, sp, res);
1500 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1501 struct logio_entry_24xx *logio)
1503 const char func[] = "LOGIO-IOCB";
1507 struct srb_iocb *lio;
1511 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1515 lio = &sp->u.iocb_cmd;
1517 fcport = sp->fcport;
1518 data = lio->u.logio.data;
1520 data[0] = MBS_COMMAND_ERROR;
1521 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1522 QLA_LOGIO_LOGIN_RETRIED : 0;
1523 if (logio->entry_status) {
1524 ql_log(ql_log_warn, fcport->vha, 0x5034,
1525 "Async-%s error entry - hdl=%x"
1526 "portid=%02x%02x%02x entry-status=%x.\n",
1527 type, sp->handle, fcport->d_id.b.domain,
1528 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1529 logio->entry_status);
1530 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1531 (uint8_t *)logio, sizeof(*logio));
1536 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1537 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1538 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1539 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1540 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1541 le32_to_cpu(logio->io_parameter[0]));
1543 data[0] = MBS_COMMAND_COMPLETE;
1544 if (sp->type != SRB_LOGIN_CMD)
1547 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1548 if (iop[0] & BIT_4) {
1549 fcport->port_type = FCT_TARGET;
1551 fcport->flags |= FCF_FCP2_DEVICE;
1552 } else if (iop[0] & BIT_5)
1553 fcport->port_type = FCT_INITIATOR;
1556 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1558 if (logio->io_parameter[7] || logio->io_parameter[8])
1559 fcport->supported_classes |= FC_COS_CLASS2;
1560 if (logio->io_parameter[9] || logio->io_parameter[10])
1561 fcport->supported_classes |= FC_COS_CLASS3;
1566 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1567 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1569 case LSC_SCODE_PORTID_USED:
1570 data[0] = MBS_PORT_ID_USED;
1571 data[1] = LSW(iop[1]);
1573 case LSC_SCODE_NPORT_USED:
1574 data[0] = MBS_LOOP_ID_USED;
1577 data[0] = MBS_COMMAND_ERROR;
1581 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1582 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1583 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1584 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1585 le16_to_cpu(logio->comp_status),
1586 le32_to_cpu(logio->io_parameter[0]),
1587 le32_to_cpu(logio->io_parameter[1]));
1590 sp->done(vha, sp, 0);
1594 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1596 const char func[] = "TMF-IOCB";
1600 struct srb_iocb *iocb;
1601 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1603 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1607 iocb = &sp->u.iocb_cmd;
1609 fcport = sp->fcport;
1610 iocb->u.tmf.data = QLA_SUCCESS;
1612 if (sts->entry_status) {
1613 ql_log(ql_log_warn, fcport->vha, 0x5038,
1614 "Async-%s error - hdl=%x entry-status(%x).\n",
1615 type, sp->handle, sts->entry_status);
1616 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1617 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1618 ql_log(ql_log_warn, fcport->vha, 0x5039,
1619 "Async-%s error - hdl=%x completion status(%x).\n",
1620 type, sp->handle, sts->comp_status);
1621 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1622 } else if ((le16_to_cpu(sts->scsi_status) &
1623 SS_RESPONSE_INFO_LEN_VALID)) {
1624 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1625 ql_log(ql_log_warn, fcport->vha, 0x503b,
1626 "Async-%s error - hdl=%x not enough response(%d).\n",
1627 type, sp->handle, sts->rsp_data_len);
1628 } else if (sts->data[3]) {
1629 ql_log(ql_log_warn, fcport->vha, 0x503c,
1630 "Async-%s error - hdl=%x response(%x).\n",
1631 type, sp->handle, sts->data[3]);
1632 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1636 if (iocb->u.tmf.data != QLA_SUCCESS)
1637 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1638 (uint8_t *)sts, sizeof(*sts));
1640 sp->done(vha, sp, 0);
1644 * qla2x00_process_response_queue() - Process response queue entries.
1645 * @ha: SCSI driver HA context
1648 qla2x00_process_response_queue(struct rsp_que *rsp)
1650 struct scsi_qla_host *vha;
1651 struct qla_hw_data *ha = rsp->hw;
1652 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1654 uint16_t handle_cnt;
1657 vha = pci_get_drvdata(ha->pdev);
1659 if (!vha->flags.online)
1662 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1663 pkt = (sts_entry_t *)rsp->ring_ptr;
1666 if (rsp->ring_index == rsp->length) {
1667 rsp->ring_index = 0;
1668 rsp->ring_ptr = rsp->ring;
1673 if (pkt->entry_status != 0) {
1674 qla2x00_error_entry(vha, rsp, pkt);
1675 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1680 switch (pkt->entry_type) {
1682 qla2x00_status_entry(vha, rsp, pkt);
1684 case STATUS_TYPE_21:
1685 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1686 for (cnt = 0; cnt < handle_cnt; cnt++) {
1687 qla2x00_process_completed_request(vha, rsp->req,
1688 ((sts21_entry_t *)pkt)->handle[cnt]);
1691 case STATUS_TYPE_22:
1692 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1693 for (cnt = 0; cnt < handle_cnt; cnt++) {
1694 qla2x00_process_completed_request(vha, rsp->req,
1695 ((sts22_entry_t *)pkt)->handle[cnt]);
1698 case STATUS_CONT_TYPE:
1699 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1702 qla2x00_mbx_iocb_entry(vha, rsp->req,
1703 (struct mbx_entry *)pkt);
1706 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1709 /* Type Not Supported. */
1710 ql_log(ql_log_warn, vha, 0x504a,
1711 "Received unknown response pkt type %x "
1712 "entry status=%x.\n",
1713 pkt->entry_type, pkt->entry_status);
1716 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1720 /* Adjust ring index */
1721 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1725 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1726 uint32_t sense_len, struct rsp_que *rsp, int res)
1728 struct scsi_qla_host *vha = sp->fcport->vha;
1729 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1730 uint32_t track_sense_len;
1732 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1733 sense_len = SCSI_SENSE_BUFFERSIZE;
1735 SET_CMD_SENSE_LEN(sp, sense_len);
1736 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1737 track_sense_len = sense_len;
1739 if (sense_len > par_sense_len)
1740 sense_len = par_sense_len;
1742 memcpy(cp->sense_buffer, sense_data, sense_len);
1744 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1745 track_sense_len -= sense_len;
1746 SET_CMD_SENSE_LEN(sp, track_sense_len);
1748 if (track_sense_len != 0) {
1749 rsp->status_srb = sp;
1754 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1755 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1756 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1758 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1759 cp->sense_buffer, sense_len);
1763 struct scsi_dif_tuple {
1764 __be16 guard; /* Checksum */
1765 __be16 app_tag; /* APPL identifier */
1766 __be32 ref_tag; /* Target LBA or indirect LBA */
1770 * Checks the guard or meta-data for the type of error
1771 * detected by the HBA. In case of errors, we set the
1772 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1773 * to indicate to the kernel that the HBA detected error.
1776 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1778 struct scsi_qla_host *vha = sp->fcport->vha;
1779 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1780 uint8_t *ap = &sts24->data[12];
1781 uint8_t *ep = &sts24->data[20];
1782 uint32_t e_ref_tag, a_ref_tag;
1783 uint16_t e_app_tag, a_app_tag;
1784 uint16_t e_guard, a_guard;
1787 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1788 * would make guard field appear at offset 2
1790 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1791 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1792 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1793 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1794 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1795 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1797 ql_dbg(ql_dbg_io, vha, 0x3023,
1798 "iocb(s) %p Returned STATUS.\n", sts24);
1800 ql_dbg(ql_dbg_io, vha, 0x3024,
1801 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1802 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1803 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1804 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1805 a_app_tag, e_app_tag, a_guard, e_guard);
1809 * For type 3: ref & app tag is all 'f's
1810 * For type 0,1,2: app tag is all 'f's
1812 if ((a_app_tag == 0xffff) &&
1813 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1814 (a_ref_tag == 0xffffffff))) {
1815 uint32_t blocks_done, resid;
1816 sector_t lba_s = scsi_get_lba(cmd);
1818 /* 2TB boundary case covered automatically with this */
1819 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1821 resid = scsi_bufflen(cmd) - (blocks_done *
1822 cmd->device->sector_size);
1824 scsi_set_resid(cmd, resid);
1825 cmd->result = DID_OK << 16;
1827 /* Update protection tag */
1828 if (scsi_prot_sg_count(cmd)) {
1829 uint32_t i, j = 0, k = 0, num_ent;
1830 struct scatterlist *sg;
1831 struct sd_dif_tuple *spt;
1833 /* Patch the corresponding protection tags */
1834 scsi_for_each_prot_sg(cmd, sg,
1835 scsi_prot_sg_count(cmd), i) {
1836 num_ent = sg_dma_len(sg) / 8;
1837 if (k + num_ent < blocks_done) {
1841 j = blocks_done - k - 1;
1846 if (k != blocks_done) {
1847 ql_log(ql_log_warn, vha, 0x302f,
1848 "unexpected tag values tag:lba=%x:%llx)\n",
1849 e_ref_tag, (unsigned long long)lba_s);
1853 spt = page_address(sg_page(sg)) + sg->offset;
1856 spt->app_tag = 0xffff;
1857 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1858 spt->ref_tag = 0xffffffff;
1865 if (e_guard != a_guard) {
1866 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1868 set_driver_byte(cmd, DRIVER_SENSE);
1869 set_host_byte(cmd, DID_ABORT);
1870 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1875 if (e_ref_tag != a_ref_tag) {
1876 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1878 set_driver_byte(cmd, DRIVER_SENSE);
1879 set_host_byte(cmd, DID_ABORT);
1880 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1884 /* check appl tag */
1885 if (e_app_tag != a_app_tag) {
1886 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1888 set_driver_byte(cmd, DRIVER_SENSE);
1889 set_host_byte(cmd, DID_ABORT);
1890 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1898 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1899 struct req_que *req, uint32_t index)
1901 struct qla_hw_data *ha = vha->hw;
1903 uint16_t comp_status;
1904 uint16_t scsi_status;
1906 uint32_t rval = EXT_STATUS_OK;
1907 struct fc_bsg_job *bsg_job = NULL;
1909 struct sts_entry_24xx *sts24;
1910 sts = (sts_entry_t *) pkt;
1911 sts24 = (struct sts_entry_24xx *) pkt;
1913 /* Validate handle. */
1914 if (index >= req->num_outstanding_cmds) {
1915 ql_log(ql_log_warn, vha, 0x70af,
1916 "Invalid SCSI completion handle 0x%x.\n", index);
1917 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1921 sp = req->outstanding_cmds[index];
1923 /* Free outstanding command slot. */
1924 req->outstanding_cmds[index] = NULL;
1925 bsg_job = sp->u.bsg_job;
1927 ql_log(ql_log_warn, vha, 0x70b0,
1928 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1931 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1935 if (IS_FWI2_CAPABLE(ha)) {
1936 comp_status = le16_to_cpu(sts24->comp_status);
1937 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1939 comp_status = le16_to_cpu(sts->comp_status);
1940 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1943 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1944 switch (comp_status) {
1946 if (scsi_status == 0) {
1947 bsg_job->reply->reply_payload_rcv_len =
1948 bsg_job->reply_payload.payload_len;
1949 vha->qla_stats.input_bytes +=
1950 bsg_job->reply->reply_payload_rcv_len;
1951 vha->qla_stats.input_requests++;
1952 rval = EXT_STATUS_OK;
1956 case CS_DATA_OVERRUN:
1957 ql_dbg(ql_dbg_user, vha, 0x70b1,
1958 "Command completed with date overrun thread_id=%d\n",
1960 rval = EXT_STATUS_DATA_OVERRUN;
1963 case CS_DATA_UNDERRUN:
1964 ql_dbg(ql_dbg_user, vha, 0x70b2,
1965 "Command completed with date underrun thread_id=%d\n",
1967 rval = EXT_STATUS_DATA_UNDERRUN;
1969 case CS_BIDIR_RD_OVERRUN:
1970 ql_dbg(ql_dbg_user, vha, 0x70b3,
1971 "Command completed with read data overrun thread_id=%d\n",
1973 rval = EXT_STATUS_DATA_OVERRUN;
1976 case CS_BIDIR_RD_WR_OVERRUN:
1977 ql_dbg(ql_dbg_user, vha, 0x70b4,
1978 "Command completed with read and write data overrun "
1979 "thread_id=%d\n", thread_id);
1980 rval = EXT_STATUS_DATA_OVERRUN;
1983 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1984 ql_dbg(ql_dbg_user, vha, 0x70b5,
1985 "Command completed with read data over and write data "
1986 "underrun thread_id=%d\n", thread_id);
1987 rval = EXT_STATUS_DATA_OVERRUN;
1990 case CS_BIDIR_RD_UNDERRUN:
1991 ql_dbg(ql_dbg_user, vha, 0x70b6,
1992 "Command completed with read data data underrun "
1993 "thread_id=%d\n", thread_id);
1994 rval = EXT_STATUS_DATA_UNDERRUN;
1997 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1998 ql_dbg(ql_dbg_user, vha, 0x70b7,
1999 "Command completed with read data under and write data "
2000 "overrun thread_id=%d\n", thread_id);
2001 rval = EXT_STATUS_DATA_UNDERRUN;
2004 case CS_BIDIR_RD_WR_UNDERRUN:
2005 ql_dbg(ql_dbg_user, vha, 0x70b8,
2006 "Command completed with read and write data underrun "
2007 "thread_id=%d\n", thread_id);
2008 rval = EXT_STATUS_DATA_UNDERRUN;
2012 ql_dbg(ql_dbg_user, vha, 0x70b9,
2013 "Command completed with data DMA error thread_id=%d\n",
2015 rval = EXT_STATUS_DMA_ERR;
2019 ql_dbg(ql_dbg_user, vha, 0x70ba,
2020 "Command completed with timeout thread_id=%d\n",
2022 rval = EXT_STATUS_TIMEOUT;
2025 ql_dbg(ql_dbg_user, vha, 0x70bb,
2026 "Command completed with completion status=0x%x "
2027 "thread_id=%d\n", comp_status, thread_id);
2028 rval = EXT_STATUS_ERR;
2031 bsg_job->reply->reply_payload_rcv_len = 0;
2034 /* Return the vendor specific reply to API */
2035 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2036 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2037 /* Always return DID_OK, bsg will send the vendor specific response
2038 * in this case only */
2039 sp->done(vha, sp, (DID_OK << 6));
2044 * qla2x00_status_entry() - Process a Status IOCB entry.
2045 * @ha: SCSI driver HA context
2046 * @pkt: Entry pointer
2049 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2053 struct scsi_cmnd *cp;
2055 struct sts_entry_24xx *sts24;
2056 uint16_t comp_status;
2057 uint16_t scsi_status;
2059 uint8_t lscsi_status;
2061 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2063 uint8_t *rsp_info, *sense_data;
2064 struct qla_hw_data *ha = vha->hw;
2067 struct req_que *req;
2070 uint16_t state_flags = 0;
2071 uint16_t retry_delay = 0;
2073 sts = (sts_entry_t *) pkt;
2074 sts24 = (struct sts_entry_24xx *) pkt;
2075 if (IS_FWI2_CAPABLE(ha)) {
2076 comp_status = le16_to_cpu(sts24->comp_status);
2077 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2078 state_flags = le16_to_cpu(sts24->state_flags);
2080 comp_status = le16_to_cpu(sts->comp_status);
2081 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2083 handle = (uint32_t) LSW(sts->handle);
2084 que = MSW(sts->handle);
2085 req = ha->req_q_map[que];
2087 /* Check for invalid queue pointer */
2089 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2090 ql_dbg(ql_dbg_io, vha, 0x3059,
2091 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2092 "que=%u.\n", sts->handle, req, que);
2096 /* Validate handle. */
2097 if (handle < req->num_outstanding_cmds) {
2098 sp = req->outstanding_cmds[handle];
2100 ql_dbg(ql_dbg_io, vha, 0x3075,
2101 "%s(%ld): Already returned command for status handle (0x%x).\n",
2102 __func__, vha->host_no, sts->handle);
2106 ql_dbg(ql_dbg_io, vha, 0x3017,
2107 "Invalid status handle, out of range (0x%x).\n",
2110 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2111 if (IS_P3P_TYPE(ha))
2112 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2114 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2115 qla2xxx_wake_dpc(vha);
2120 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2121 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2125 /* Task Management completion. */
2126 if (sp->type == SRB_TM_CMD) {
2127 qla24xx_tm_iocb_entry(vha, req, pkt);
2131 /* Fast path completion. */
2132 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2133 qla2x00_process_completed_request(vha, req, handle);
2138 req->outstanding_cmds[handle] = NULL;
2139 cp = GET_CMD_SP(sp);
2141 ql_dbg(ql_dbg_io, vha, 0x3018,
2142 "Command already returned (0x%x/%p).\n",
2148 lscsi_status = scsi_status & STATUS_MASK;
2150 fcport = sp->fcport;
2153 sense_len = par_sense_len = rsp_info_len = resid_len =
2155 if (IS_FWI2_CAPABLE(ha)) {
2156 if (scsi_status & SS_SENSE_LEN_VALID)
2157 sense_len = le32_to_cpu(sts24->sense_len);
2158 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2159 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2160 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2161 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2162 if (comp_status == CS_DATA_UNDERRUN)
2163 fw_resid_len = le32_to_cpu(sts24->residual_len);
2164 rsp_info = sts24->data;
2165 sense_data = sts24->data;
2166 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2167 ox_id = le16_to_cpu(sts24->ox_id);
2168 par_sense_len = sizeof(sts24->data);
2169 /* Valid values of the retry delay timer are 0x1-0xffef */
2170 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2171 retry_delay = sts24->retry_delay;
2173 if (scsi_status & SS_SENSE_LEN_VALID)
2174 sense_len = le16_to_cpu(sts->req_sense_length);
2175 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2176 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2177 resid_len = le32_to_cpu(sts->residual_length);
2178 rsp_info = sts->rsp_info;
2179 sense_data = sts->req_sense_data;
2180 par_sense_len = sizeof(sts->req_sense_data);
2183 /* Check for any FCP transport errors. */
2184 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2185 /* Sense data lies beyond any FCP RESPONSE data. */
2186 if (IS_FWI2_CAPABLE(ha)) {
2187 sense_data += rsp_info_len;
2188 par_sense_len -= rsp_info_len;
2190 if (rsp_info_len > 3 && rsp_info[3]) {
2191 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2192 "FCP I/O protocol failure (0x%x/0x%x).\n",
2193 rsp_info_len, rsp_info[3]);
2195 res = DID_BUS_BUSY << 16;
2200 /* Check for overrun. */
2201 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2202 scsi_status & SS_RESIDUAL_OVER)
2203 comp_status = CS_DATA_OVERRUN;
2206 * Check retry_delay_timer value if we receive a busy or
2209 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2210 lscsi_status == SAM_STAT_BUSY)
2211 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2214 * Based on Host and scsi status generate status code for Linux
2216 switch (comp_status) {
2219 if (scsi_status == 0) {
2223 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2225 scsi_set_resid(cp, resid);
2227 if (!lscsi_status &&
2228 ((unsigned)(scsi_bufflen(cp) - resid) <
2230 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2231 "Mid-layer underflow "
2232 "detected (0x%x of 0x%x bytes).\n",
2233 resid, scsi_bufflen(cp));
2235 res = DID_ERROR << 16;
2239 res = DID_OK << 16 | lscsi_status;
2241 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2242 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2243 "QUEUE FULL detected.\n");
2247 if (lscsi_status != SS_CHECK_CONDITION)
2250 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2251 if (!(scsi_status & SS_SENSE_LEN_VALID))
2254 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2258 case CS_DATA_UNDERRUN:
2259 /* Use F/W calculated residual length. */
2260 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2261 scsi_set_resid(cp, resid);
2262 if (scsi_status & SS_RESIDUAL_UNDER) {
2263 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2264 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2265 "Dropped frame(s) detected "
2266 "(0x%x of 0x%x bytes).\n",
2267 resid, scsi_bufflen(cp));
2269 res = DID_ERROR << 16 | lscsi_status;
2270 goto check_scsi_status;
2273 if (!lscsi_status &&
2274 ((unsigned)(scsi_bufflen(cp) - resid) <
2276 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2277 "Mid-layer underflow "
2278 "detected (0x%x of 0x%x bytes).\n",
2279 resid, scsi_bufflen(cp));
2281 res = DID_ERROR << 16;
2284 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2285 lscsi_status != SAM_STAT_BUSY) {
2287 * scsi status of task set and busy are considered to be
2288 * task not completed.
2291 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2292 "Dropped frame(s) detected (0x%x "
2293 "of 0x%x bytes).\n", resid,
2296 res = DID_ERROR << 16 | lscsi_status;
2297 goto check_scsi_status;
2299 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2300 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2301 scsi_status, lscsi_status);
2304 res = DID_OK << 16 | lscsi_status;
2309 * Check to see if SCSI Status is non zero. If so report SCSI
2312 if (lscsi_status != 0) {
2313 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2314 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2315 "QUEUE FULL detected.\n");
2319 if (lscsi_status != SS_CHECK_CONDITION)
2322 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2323 if (!(scsi_status & SS_SENSE_LEN_VALID))
2326 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2327 sense_len, rsp, res);
2331 case CS_PORT_LOGGED_OUT:
2332 case CS_PORT_CONFIG_CHG:
2335 case CS_PORT_UNAVAILABLE:
2340 * We are going to have the fc class block the rport
2341 * while we try to recover so instruct the mid layer
2342 * to requeue until the class decides how to handle this.
2344 res = DID_TRANSPORT_DISRUPTED << 16;
2346 if (comp_status == CS_TIMEOUT) {
2347 if (IS_FWI2_CAPABLE(ha))
2349 else if ((le16_to_cpu(sts->status_flags) &
2350 SF_LOGOUT_SENT) == 0)
2354 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2355 "Port to be marked lost on fcport=%02x%02x%02x, current "
2356 "port state= %s.\n", fcport->d_id.b.domain,
2357 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2358 port_state_str[atomic_read(&fcport->state)]);
2360 if (atomic_read(&fcport->state) == FCS_ONLINE)
2361 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2365 res = DID_RESET << 16;
2369 logit = qla2x00_handle_dif_error(sp, sts24);
2374 res = DID_ERROR << 16;
2376 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2379 if (state_flags & BIT_4)
2380 scmd_printk(KERN_WARNING, cp,
2381 "Unsupported device '%s' found.\n",
2382 cp->device->vendor);
2386 res = DID_ERROR << 16;
2392 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2393 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2394 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2395 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2396 comp_status, scsi_status, res, vha->host_no,
2397 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2398 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2399 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2400 resid_len, fw_resid_len, sp, cp);
2402 if (rsp->status_srb == NULL)
2403 sp->done(ha, sp, res);
2407 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2408 * @ha: SCSI driver HA context
2409 * @pkt: Entry pointer
2411 * Extended sense data.
2414 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2416 uint8_t sense_sz = 0;
2417 struct qla_hw_data *ha = rsp->hw;
2418 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2419 srb_t *sp = rsp->status_srb;
2420 struct scsi_cmnd *cp;
2424 if (!sp || !GET_CMD_SENSE_LEN(sp))
2427 sense_len = GET_CMD_SENSE_LEN(sp);
2428 sense_ptr = GET_CMD_SENSE_PTR(sp);
2430 cp = GET_CMD_SP(sp);
2432 ql_log(ql_log_warn, vha, 0x3025,
2433 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2435 rsp->status_srb = NULL;
2439 if (sense_len > sizeof(pkt->data))
2440 sense_sz = sizeof(pkt->data);
2442 sense_sz = sense_len;
2444 /* Move sense data. */
2445 if (IS_FWI2_CAPABLE(ha))
2446 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2447 memcpy(sense_ptr, pkt->data, sense_sz);
2448 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2449 sense_ptr, sense_sz);
2451 sense_len -= sense_sz;
2452 sense_ptr += sense_sz;
2454 SET_CMD_SENSE_PTR(sp, sense_ptr);
2455 SET_CMD_SENSE_LEN(sp, sense_len);
2457 /* Place command on done queue. */
2458 if (sense_len == 0) {
2459 rsp->status_srb = NULL;
2460 sp->done(ha, sp, cp->result);
2465 * qla2x00_error_entry() - Process an error entry.
2466 * @ha: SCSI driver HA context
2467 * @pkt: Entry pointer
2470 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2473 struct qla_hw_data *ha = vha->hw;
2474 const char func[] = "ERROR-IOCB";
2475 uint16_t que = MSW(pkt->handle);
2476 struct req_que *req = NULL;
2477 int res = DID_ERROR << 16;
2479 ql_dbg(ql_dbg_async, vha, 0x502a,
2480 "type of error status in response: 0x%x\n", pkt->entry_status);
2482 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2485 req = ha->req_q_map[que];
2487 if (pkt->entry_status & RF_BUSY)
2488 res = DID_BUS_BUSY << 16;
2490 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2492 sp->done(ha, sp, res);
2496 ql_log(ql_log_warn, vha, 0x5030,
2497 "Error entry - invalid handle/queue (%04x).\n", que);
2501 * qla24xx_mbx_completion() - Process mailbox command completions.
2502 * @ha: SCSI driver HA context
2503 * @mb0: Mailbox0 register
2506 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2510 uint16_t __iomem *wptr;
2511 struct qla_hw_data *ha = vha->hw;
2512 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2514 /* Read all mbox registers? */
2515 mboxes = (1 << ha->mbx_count) - 1;
2517 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2519 mboxes = ha->mcp->in_mb;
2521 /* Load return mailbox registers. */
2522 ha->flags.mbox_int = 1;
2523 ha->mailbox_out[0] = mb0;
2525 wptr = (uint16_t __iomem *)®->mailbox1;
2527 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2529 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2537 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2538 struct abort_entry_24xx *pkt)
2540 const char func[] = "ABT_IOCB";
2542 struct srb_iocb *abt;
2544 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2548 abt = &sp->u.iocb_cmd;
2549 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2550 sp->done(vha, sp, 0);
2554 * qla24xx_process_response_queue() - Process response queue entries.
2555 * @ha: SCSI driver HA context
2557 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2558 struct rsp_que *rsp)
2560 struct sts_entry_24xx *pkt;
2561 struct qla_hw_data *ha = vha->hw;
2563 if (!vha->flags.online)
2566 if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2567 /* if kernel does not notify qla of IRQ's CPU change,
2570 rsp->msix->cpuid = smp_processor_id();
2571 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2574 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2575 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2578 if (rsp->ring_index == rsp->length) {
2579 rsp->ring_index = 0;
2580 rsp->ring_ptr = rsp->ring;
2585 if (pkt->entry_status != 0) {
2586 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2588 if (qlt_24xx_process_response_error(vha, pkt))
2591 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2597 switch (pkt->entry_type) {
2599 qla2x00_status_entry(vha, rsp, pkt);
2601 case STATUS_CONT_TYPE:
2602 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2604 case VP_RPT_ID_IOCB_TYPE:
2605 qla24xx_report_id_acquisition(vha,
2606 (struct vp_rpt_id_entry_24xx *)pkt);
2608 case LOGINOUT_PORT_IOCB_TYPE:
2609 qla24xx_logio_entry(vha, rsp->req,
2610 (struct logio_entry_24xx *)pkt);
2613 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2616 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2618 case ABTS_RECV_24XX:
2619 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2620 /* ensure that the ATIO queue is empty */
2621 qlt_handle_abts_recv(vha, (response_t *)pkt);
2625 qlt_24xx_process_atio_queue(vha, 1);
2627 case ABTS_RESP_24XX:
2629 case NOTIFY_ACK_TYPE:
2631 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2634 /* Do nothing in this case, this check is to prevent it
2635 * from falling into default case
2638 case ABORT_IOCB_TYPE:
2639 qla24xx_abort_iocb_entry(vha, rsp->req,
2640 (struct abort_entry_24xx *)pkt);
2643 /* Type Not Supported. */
2644 ql_dbg(ql_dbg_async, vha, 0x5042,
2645 "Received unknown response pkt type %x "
2646 "entry status=%x.\n",
2647 pkt->entry_type, pkt->entry_status);
2650 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2654 /* Adjust ring index */
2655 if (IS_P3P_TYPE(ha)) {
2656 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2657 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2659 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2663 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2667 struct qla_hw_data *ha = vha->hw;
2668 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2670 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2675 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2676 RD_REG_DWORD(®->iobase_addr);
2677 WRT_REG_DWORD(®->iobase_window, 0x0001);
2678 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2679 rval == QLA_SUCCESS; cnt--) {
2681 WRT_REG_DWORD(®->iobase_window, 0x0001);
2684 rval = QLA_FUNCTION_TIMEOUT;
2686 if (rval == QLA_SUCCESS)
2690 WRT_REG_DWORD(®->iobase_window, 0x0003);
2691 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2692 rval == QLA_SUCCESS; cnt--) {
2694 WRT_REG_DWORD(®->iobase_window, 0x0003);
2697 rval = QLA_FUNCTION_TIMEOUT;
2699 if (rval != QLA_SUCCESS)
2703 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2704 ql_log(ql_log_info, vha, 0x504c,
2705 "Additional code -- 0x55AA.\n");
2708 WRT_REG_DWORD(®->iobase_window, 0x0000);
2709 RD_REG_DWORD(®->iobase_window);
2713 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2715 * @dev_id: SCSI driver HA context
2717 * Called by system whenever the host adapter generates an interrupt.
2719 * Returns handled flag.
2722 qla24xx_intr_handler(int irq, void *dev_id)
2724 scsi_qla_host_t *vha;
2725 struct qla_hw_data *ha;
2726 struct device_reg_24xx __iomem *reg;
2732 struct rsp_que *rsp;
2733 unsigned long flags;
2735 rsp = (struct rsp_que *) dev_id;
2737 ql_log(ql_log_info, NULL, 0x5059,
2738 "%s: NULL response queue pointer.\n", __func__);
2743 reg = &ha->iobase->isp24;
2746 if (unlikely(pci_channel_offline(ha->pdev)))
2749 spin_lock_irqsave(&ha->hardware_lock, flags);
2750 vha = pci_get_drvdata(ha->pdev);
2751 for (iter = 50; iter--; ) {
2752 stat = RD_REG_DWORD(®->host_status);
2753 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2755 if (stat & HSRX_RISC_PAUSED) {
2756 if (unlikely(pci_channel_offline(ha->pdev)))
2759 hccr = RD_REG_DWORD(®->hccr);
2761 ql_log(ql_log_warn, vha, 0x504b,
2762 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2765 qla2xxx_check_risc_status(vha);
2767 ha->isp_ops->fw_dump(vha, 1);
2768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2770 } else if ((stat & HSRX_RISC_INT) == 0)
2773 switch (stat & 0xff) {
2774 case INTR_ROM_MB_SUCCESS:
2775 case INTR_ROM_MB_FAILED:
2776 case INTR_MB_SUCCESS:
2777 case INTR_MB_FAILED:
2778 qla24xx_mbx_completion(vha, MSW(stat));
2779 status |= MBX_INTERRUPT;
2782 case INTR_ASYNC_EVENT:
2784 mb[1] = RD_REG_WORD(®->mailbox1);
2785 mb[2] = RD_REG_WORD(®->mailbox2);
2786 mb[3] = RD_REG_WORD(®->mailbox3);
2787 qla2x00_async_event(vha, rsp, mb);
2789 case INTR_RSP_QUE_UPDATE:
2790 case INTR_RSP_QUE_UPDATE_83XX:
2791 qla24xx_process_response_queue(vha, rsp);
2793 case INTR_ATIO_QUE_UPDATE:{
2794 unsigned long flags2;
2795 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2796 qlt_24xx_process_atio_queue(vha, 1);
2797 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2800 case INTR_ATIO_RSP_QUE_UPDATE: {
2801 unsigned long flags2;
2802 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2803 qlt_24xx_process_atio_queue(vha, 1);
2804 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2806 qla24xx_process_response_queue(vha, rsp);
2810 ql_dbg(ql_dbg_async, vha, 0x504f,
2811 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2814 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2815 RD_REG_DWORD_RELAXED(®->hccr);
2816 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2819 qla2x00_handle_mbx_completion(ha, status);
2820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2826 qla24xx_msix_rsp_q(int irq, void *dev_id)
2828 struct qla_hw_data *ha;
2829 struct rsp_que *rsp;
2830 struct device_reg_24xx __iomem *reg;
2831 struct scsi_qla_host *vha;
2832 unsigned long flags;
2835 rsp = (struct rsp_que *) dev_id;
2837 ql_log(ql_log_info, NULL, 0x505a,
2838 "%s: NULL response queue pointer.\n", __func__);
2842 reg = &ha->iobase->isp24;
2844 spin_lock_irqsave(&ha->hardware_lock, flags);
2846 vha = pci_get_drvdata(ha->pdev);
2848 * Use host_status register to check to PCI disconnection before we
2849 * we process the response queue.
2851 stat = RD_REG_DWORD(®->host_status);
2852 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2854 qla24xx_process_response_queue(vha, rsp);
2855 if (!ha->flags.disable_msix_handshake) {
2856 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2857 RD_REG_DWORD_RELAXED(®->hccr);
2860 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2866 qla25xx_msix_rsp_q(int irq, void *dev_id)
2868 struct qla_hw_data *ha;
2869 scsi_qla_host_t *vha;
2870 struct rsp_que *rsp;
2871 struct device_reg_24xx __iomem *reg;
2872 unsigned long flags;
2875 rsp = (struct rsp_que *) dev_id;
2877 ql_log(ql_log_info, NULL, 0x505b,
2878 "%s: NULL response queue pointer.\n", __func__);
2882 vha = pci_get_drvdata(ha->pdev);
2884 /* Clear the interrupt, if enabled, for this response queue */
2885 if (!ha->flags.disable_msix_handshake) {
2886 reg = &ha->iobase->isp24;
2887 spin_lock_irqsave(&ha->hardware_lock, flags);
2888 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2889 hccr = RD_REG_DWORD_RELAXED(®->hccr);
2890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2892 if (qla2x00_check_reg32_for_disconnect(vha, hccr))
2894 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2901 qla24xx_msix_default(int irq, void *dev_id)
2903 scsi_qla_host_t *vha;
2904 struct qla_hw_data *ha;
2905 struct rsp_que *rsp;
2906 struct device_reg_24xx __iomem *reg;
2911 unsigned long flags;
2913 rsp = (struct rsp_que *) dev_id;
2915 ql_log(ql_log_info, NULL, 0x505c,
2916 "%s: NULL response queue pointer.\n", __func__);
2920 reg = &ha->iobase->isp24;
2923 spin_lock_irqsave(&ha->hardware_lock, flags);
2924 vha = pci_get_drvdata(ha->pdev);
2926 stat = RD_REG_DWORD(®->host_status);
2927 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2929 if (stat & HSRX_RISC_PAUSED) {
2930 if (unlikely(pci_channel_offline(ha->pdev)))
2933 hccr = RD_REG_DWORD(®->hccr);
2935 ql_log(ql_log_info, vha, 0x5050,
2936 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2939 qla2xxx_check_risc_status(vha);
2941 ha->isp_ops->fw_dump(vha, 1);
2942 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2944 } else if ((stat & HSRX_RISC_INT) == 0)
2947 switch (stat & 0xff) {
2948 case INTR_ROM_MB_SUCCESS:
2949 case INTR_ROM_MB_FAILED:
2950 case INTR_MB_SUCCESS:
2951 case INTR_MB_FAILED:
2952 qla24xx_mbx_completion(vha, MSW(stat));
2953 status |= MBX_INTERRUPT;
2956 case INTR_ASYNC_EVENT:
2958 mb[1] = RD_REG_WORD(®->mailbox1);
2959 mb[2] = RD_REG_WORD(®->mailbox2);
2960 mb[3] = RD_REG_WORD(®->mailbox3);
2961 qla2x00_async_event(vha, rsp, mb);
2963 case INTR_RSP_QUE_UPDATE:
2964 case INTR_RSP_QUE_UPDATE_83XX:
2965 qla24xx_process_response_queue(vha, rsp);
2967 case INTR_ATIO_QUE_UPDATE:{
2968 unsigned long flags2;
2969 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2970 qlt_24xx_process_atio_queue(vha, 1);
2971 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2974 case INTR_ATIO_RSP_QUE_UPDATE: {
2975 unsigned long flags2;
2976 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2977 qlt_24xx_process_atio_queue(vha, 1);
2978 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2980 qla24xx_process_response_queue(vha, rsp);
2984 ql_dbg(ql_dbg_async, vha, 0x5051,
2985 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2988 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2990 qla2x00_handle_mbx_completion(ha, status);
2991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2996 /* Interrupt handling helpers. */
2998 struct qla_init_msix_entry {
3000 irq_handler_t handler;
3003 static struct qla_init_msix_entry msix_entries[3] = {
3004 { "qla2xxx (default)", qla24xx_msix_default },
3005 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3006 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
3009 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
3010 { "qla2xxx (default)", qla82xx_msix_default },
3011 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3014 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
3015 { "qla2xxx (default)", qla24xx_msix_default },
3016 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3017 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3021 qla24xx_disable_msix(struct qla_hw_data *ha)
3024 struct qla_msix_entry *qentry;
3025 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3027 for (i = 0; i < ha->msix_count; i++) {
3028 qentry = &ha->msix_entries[i];
3029 if (qentry->have_irq) {
3030 /* un-register irq cpu affinity notification */
3031 irq_set_affinity_notifier(qentry->vector, NULL);
3032 free_irq(qentry->vector, qentry->rsp);
3035 pci_disable_msix(ha->pdev);
3036 kfree(ha->msix_entries);
3037 ha->msix_entries = NULL;
3038 ha->flags.msix_enabled = 0;
3039 ql_dbg(ql_dbg_init, vha, 0x0042,
3040 "Disabled the MSI.\n");
3044 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3046 #define MIN_MSIX_COUNT 2
3047 #define ATIO_VECTOR 2
3049 struct msix_entry *entries;
3050 struct qla_msix_entry *qentry;
3051 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3053 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
3056 ql_log(ql_log_warn, vha, 0x00bc,
3057 "Failed to allocate memory for msix_entry.\n");
3061 for (i = 0; i < ha->msix_count; i++)
3062 entries[i].entry = i;
3064 ret = pci_enable_msix_range(ha->pdev,
3065 entries, MIN_MSIX_COUNT, ha->msix_count);
3067 ql_log(ql_log_fatal, vha, 0x00c7,
3068 "MSI-X: Failed to enable support, "
3069 "giving up -- %d/%d.\n",
3070 ha->msix_count, ret);
3072 } else if (ret < ha->msix_count) {
3073 ql_log(ql_log_warn, vha, 0x00c6,
3074 "MSI-X: Failed to enable support "
3075 "-- %d/%d\n Retry with %d vectors.\n",
3076 ha->msix_count, ret, ret);
3077 ha->msix_count = ret;
3078 ha->max_rsp_queues = ha->msix_count - 1;
3080 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3081 ha->msix_count, GFP_KERNEL);
3082 if (!ha->msix_entries) {
3083 ql_log(ql_log_fatal, vha, 0x00c8,
3084 "Failed to allocate memory for ha->msix_entries.\n");
3088 ha->flags.msix_enabled = 1;
3090 for (i = 0; i < ha->msix_count; i++) {
3091 qentry = &ha->msix_entries[i];
3092 qentry->vector = entries[i].vector;
3093 qentry->entry = entries[i].entry;
3094 qentry->have_irq = 0;
3096 qentry->irq_notify.notify = qla_irq_affinity_notify;
3097 qentry->irq_notify.release = qla_irq_affinity_release;
3101 /* Enable MSI-X vectors for the base queue */
3102 for (i = 0; i < 2; i++) {
3103 qentry = &ha->msix_entries[i];
3106 if (IS_P3P_TYPE(ha))
3107 ret = request_irq(qentry->vector,
3108 qla82xx_msix_entries[i].handler,
3109 0, qla82xx_msix_entries[i].name, rsp);
3111 ret = request_irq(qentry->vector,
3112 msix_entries[i].handler,
3113 0, msix_entries[i].name, rsp);
3115 goto msix_register_fail;
3116 qentry->have_irq = 1;
3118 /* Register for CPU affinity notification. */
3119 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3121 /* Schedule work (ie. trigger a notification) to read cpu
3122 * mask for this specific irq.
3123 * kref_get is required because
3124 * irq_affinity_notify() will do
3127 kref_get(&qentry->irq_notify.kref);
3128 schedule_work(&qentry->irq_notify.work);
3132 * If target mode is enable, also request the vector for the ATIO
3135 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3136 qentry = &ha->msix_entries[ATIO_VECTOR];
3139 ret = request_irq(qentry->vector,
3140 qla83xx_msix_entries[ATIO_VECTOR].handler,
3141 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
3142 qentry->have_irq = 1;
3147 ql_log(ql_log_fatal, vha, 0x00cb,
3148 "MSI-X: unable to register handler -- %x/%d.\n",
3149 qentry->vector, ret);
3150 qla24xx_disable_msix(ha);
3155 /* Enable MSI-X vector for response queue update for queue 0 */
3156 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3157 if (ha->msixbase && ha->mqiobase &&
3158 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3162 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3164 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3165 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3166 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3167 ql_dbg(ql_dbg_init, vha, 0x0055,
3168 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3169 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3177 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3179 int ret = QLA_FUNCTION_FAILED;
3180 device_reg_t *reg = ha->iobase;
3181 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3183 /* If possible, enable MSI-X. */
3184 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3185 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3189 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3190 (ha->pdev->subsystem_device == 0x7040 ||
3191 ha->pdev->subsystem_device == 0x7041 ||
3192 ha->pdev->subsystem_device == 0x1705)) {
3193 ql_log(ql_log_warn, vha, 0x0034,
3194 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3195 ha->pdev->subsystem_vendor,
3196 ha->pdev->subsystem_device);
3200 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3201 ql_log(ql_log_warn, vha, 0x0035,
3202 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3203 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3207 ret = qla24xx_enable_msix(ha, rsp);
3209 ql_dbg(ql_dbg_init, vha, 0x0036,
3210 "MSI-X: Enabled (0x%X, 0x%X).\n",
3211 ha->chip_revision, ha->fw_attributes);
3212 goto clear_risc_ints;
3217 ql_log(ql_log_info, vha, 0x0037,
3218 "Falling back-to MSI mode -%d.\n", ret);
3220 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3221 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3225 ret = pci_enable_msi(ha->pdev);
3227 ql_dbg(ql_dbg_init, vha, 0x0038,
3229 ha->flags.msi_enabled = 1;
3231 ql_log(ql_log_warn, vha, 0x0039,
3232 "Falling back-to INTa mode -- %d.\n", ret);
3235 /* Skip INTx on ISP82xx. */
3236 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3237 return QLA_FUNCTION_FAILED;
3239 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3240 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3241 QLA2XXX_DRIVER_NAME, rsp);
3243 ql_log(ql_log_warn, vha, 0x003a,
3244 "Failed to reserve interrupt %d already in use.\n",
3247 } else if (!ha->flags.msi_enabled) {
3248 ql_dbg(ql_dbg_init, vha, 0x0125,
3249 "INTa mode: Enabled.\n");
3250 ha->flags.mr_intr_valid = 1;
3254 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3257 spin_lock_irq(&ha->hardware_lock);
3258 WRT_REG_WORD(®->isp.semaphore, 0);
3259 spin_unlock_irq(&ha->hardware_lock);
3266 qla2x00_free_irqs(scsi_qla_host_t *vha)
3268 struct qla_hw_data *ha = vha->hw;
3269 struct rsp_que *rsp;
3272 * We need to check that ha->rsp_q_map is valid in case we are called
3273 * from a probe failure context.
3275 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3277 rsp = ha->rsp_q_map[0];
3279 if (ha->flags.msix_enabled)
3280 qla24xx_disable_msix(ha);
3281 else if (ha->flags.msi_enabled) {
3282 free_irq(ha->pdev->irq, rsp);
3283 pci_disable_msi(ha->pdev);
3285 free_irq(ha->pdev->irq, rsp);
3289 int qla25xx_request_irq(struct rsp_que *rsp)
3291 struct qla_hw_data *ha = rsp->hw;
3292 struct qla_init_msix_entry *intr = &msix_entries[2];
3293 struct qla_msix_entry *msix = rsp->msix;
3294 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3297 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3299 ql_log(ql_log_fatal, vha, 0x00e6,
3300 "MSI-X: Unable to register handler -- %x/%d.\n",
3310 /* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3311 static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3312 const cpumask_t *mask)
3314 struct qla_msix_entry *e =
3315 container_of(notify, struct qla_msix_entry, irq_notify);
3316 struct qla_hw_data *ha;
3317 struct scsi_qla_host *base_vha;
3319 /* user is recommended to set mask to just 1 cpu */
3320 e->cpuid = cpumask_first(mask);
3323 base_vha = pci_get_drvdata(ha->pdev);
3325 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3326 "%s: host %ld : vector %d cpu %d \n", __func__,
3327 base_vha->host_no, e->vector, e->cpuid);
3330 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3331 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3332 ha->tgt.rspq_vector_cpuid = e->cpuid;
3333 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3334 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3335 __func__, base_vha->host_no, e->vector, e->cpuid);
3340 static void qla_irq_affinity_release(struct kref *ref)
3342 struct irq_affinity_notify *notify =
3343 container_of(ref, struct irq_affinity_notify, kref);
3344 struct qla_msix_entry *e =
3345 container_of(notify, struct qla_msix_entry, irq_notify);
3346 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
3348 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3349 "%s: host%ld: vector %d cpu %d \n", __func__,
3350 base_vha->host_no, e->vector, e->cpuid);