Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
46152ceb 3 * Copyright (c) 2003-2012 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4 9
05236a05 10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
df7baa50 12#include <scsi/scsi_tcq.h>
9a069e19 13#include <scsi/scsi_bsg_fc.h>
bad75002 14#include <scsi/scsi_eh.h>
df7baa50 15
1da177e4 16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
17static void qla2x00_process_completed_request(struct scsi_qla_host *,
18 struct req_que *, uint32_t);
19static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 20static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
21static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
22 sts_entry_t *);
9a853f71 23
1da177e4
LT
24/**
25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
26 * @irq:
27 * @dev_id: SCSI driver HA context
1da177e4
LT
28 *
29 * Called by system whenever the host adapter generates an interrupt.
30 *
31 * Returns handled flag.
32 */
33irqreturn_t
7d12e780 34qla2100_intr_handler(int irq, void *dev_id)
1da177e4 35{
e315cd28
AC
36 scsi_qla_host_t *vha;
37 struct qla_hw_data *ha;
3d71644c 38 struct device_reg_2xxx __iomem *reg;
1da177e4 39 int status;
1da177e4 40 unsigned long iter;
14e660e6 41 uint16_t hccr;
9a853f71 42 uint16_t mb[4];
e315cd28 43 struct rsp_que *rsp;
43fac4d9 44 unsigned long flags;
1da177e4 45
e315cd28
AC
46 rsp = (struct rsp_que *) dev_id;
47 if (!rsp) {
3256b435
CD
48 ql_log(ql_log_info, NULL, 0x505d,
49 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
50 return (IRQ_NONE);
51 }
52
e315cd28 53 ha = rsp->hw;
3d71644c 54 reg = &ha->iobase->isp;
1da177e4
LT
55 status = 0;
56
43fac4d9 57 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 58 vha = pci_get_drvdata(ha->pdev);
1da177e4 59 for (iter = 50; iter--; ) {
14e660e6
SJ
60 hccr = RD_REG_WORD(&reg->hccr);
61 if (hccr & HCCR_RISC_PAUSE) {
62 if (pci_channel_offline(ha->pdev))
63 break;
64
65 /*
66 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 67 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
68 * out of the RISC PAUSED state.
69 */
70 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
71 RD_REG_WORD(&reg->hccr);
72
e315cd28
AC
73 ha->isp_ops->fw_dump(vha, 1);
74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
75 break;
76 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
77 break;
78
79 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
80 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
81 RD_REG_WORD(&reg->hccr);
82
83 /* Get mailbox data. */
9a853f71
AV
84 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
85 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 86 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 87 status |= MBX_INTERRUPT;
9a853f71
AV
88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
89 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
90 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
91 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 92 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
93 } else {
94 /*EMPTY*/
7c3df132
SK
95 ql_dbg(ql_dbg_async, vha, 0x5025,
96 "Unrecognized interrupt type (%d).\n",
97 mb[0]);
1da177e4
LT
98 }
99 /* Release mailbox registers. */
100 WRT_REG_WORD(&reg->semaphore, 0);
101 RD_REG_WORD(&reg->semaphore);
102 } else {
73208dfd 103 qla2x00_process_response_queue(rsp);
1da177e4
LT
104
105 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
106 RD_REG_WORD(&reg->hccr);
107 }
108 }
43fac4d9 109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 110
1da177e4
LT
111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 114 complete(&ha->mbx_intr_comp);
1da177e4
LT
115 }
116
1da177e4
LT
117 return (IRQ_HANDLED);
118}
119
120/**
121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
122 * @irq:
123 * @dev_id: SCSI driver HA context
1da177e4
LT
124 *
125 * Called by system whenever the host adapter generates an interrupt.
126 *
127 * Returns handled flag.
128 */
129irqreturn_t
7d12e780 130qla2300_intr_handler(int irq, void *dev_id)
1da177e4 131{
e315cd28 132 scsi_qla_host_t *vha;
3d71644c 133 struct device_reg_2xxx __iomem *reg;
1da177e4 134 int status;
1da177e4
LT
135 unsigned long iter;
136 uint32_t stat;
1da177e4 137 uint16_t hccr;
9a853f71 138 uint16_t mb[4];
e315cd28
AC
139 struct rsp_que *rsp;
140 struct qla_hw_data *ha;
43fac4d9 141 unsigned long flags;
1da177e4 142
e315cd28
AC
143 rsp = (struct rsp_que *) dev_id;
144 if (!rsp) {
3256b435
CD
145 ql_log(ql_log_info, NULL, 0x5058,
146 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
147 return (IRQ_NONE);
148 }
149
e315cd28 150 ha = rsp->hw;
3d71644c 151 reg = &ha->iobase->isp;
1da177e4
LT
152 status = 0;
153
43fac4d9 154 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 155 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
156 for (iter = 50; iter--; ) {
157 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
158 if (stat & HSR_RISC_PAUSED) {
85880801 159 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
160 break;
161
1da177e4
LT
162 hccr = RD_REG_WORD(&reg->hccr);
163 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
164 ql_log(ql_log_warn, vha, 0x5026,
165 "Parity error -- HCCR=%x, Dumping "
166 "firmware.\n", hccr);
1da177e4 167 else
7c3df132
SK
168 ql_log(ql_log_warn, vha, 0x5027,
169 "RISC paused -- HCCR=%x, Dumping "
170 "firmware.\n", hccr);
1da177e4
LT
171
172 /*
173 * Issue a "HARD" reset in order for the RISC
174 * interrupt bit to be cleared. Schedule a big
a06a0f8e 175 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
176 */
177 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
178 RD_REG_WORD(&reg->hccr);
07f31805 179
e315cd28
AC
180 ha->isp_ops->fw_dump(vha, 1);
181 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
182 break;
183 } else if ((stat & HSR_RISC_INT) == 0)
184 break;
185
1da177e4 186 switch (stat & 0xff) {
1da177e4
LT
187 case 0x1:
188 case 0x2:
189 case 0x10:
190 case 0x11:
e315cd28 191 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
192 status |= MBX_INTERRUPT;
193
194 /* Release mailbox registers. */
195 WRT_REG_WORD(&reg->semaphore, 0);
196 break;
197 case 0x12:
9a853f71
AV
198 mb[0] = MSW(stat);
199 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
200 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
201 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 202 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
203 break;
204 case 0x13:
73208dfd 205 qla2x00_process_response_queue(rsp);
1da177e4
LT
206 break;
207 case 0x15:
9a853f71
AV
208 mb[0] = MBA_CMPLT_1_16BIT;
209 mb[1] = MSW(stat);
73208dfd 210 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
211 break;
212 case 0x16:
9a853f71
AV
213 mb[0] = MBA_SCSI_COMPLETION;
214 mb[1] = MSW(stat);
215 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 216 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
217 break;
218 default:
7c3df132
SK
219 ql_dbg(ql_dbg_async, vha, 0x5028,
220 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
221 break;
222 }
223 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
224 RD_REG_WORD_RELAXED(&reg->hccr);
225 }
43fac4d9 226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 227
1da177e4
LT
228 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
229 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 230 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 231 complete(&ha->mbx_intr_comp);
1da177e4
LT
232 }
233
1da177e4
LT
234 return (IRQ_HANDLED);
235}
236
237/**
238 * qla2x00_mbx_completion() - Process mailbox command completions.
239 * @ha: SCSI driver HA context
240 * @mb0: Mailbox0 register
241 */
242static void
e315cd28 243qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
244{
245 uint16_t cnt;
4fa94f83 246 uint32_t mboxes;
1da177e4 247 uint16_t __iomem *wptr;
e315cd28 248 struct qla_hw_data *ha = vha->hw;
3d71644c 249 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 250
4fa94f83
AV
251 /* Read all mbox registers? */
252 mboxes = (1 << ha->mbx_count) - 1;
253 if (!ha->mcp)
254 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
255 else
256 mboxes = ha->mcp->in_mb;
257
1da177e4
LT
258 /* Load return mailbox registers. */
259 ha->flags.mbox_int = 1;
260 ha->mailbox_out[0] = mb0;
4fa94f83 261 mboxes >>= 1;
1da177e4
LT
262 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
263
264 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 265 if (IS_QLA2200(ha) && cnt == 8)
1da177e4 266 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
4fa94f83 267 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
1da177e4 268 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
4fa94f83 269 else if (mboxes & BIT_0)
1da177e4 270 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 271
1da177e4 272 wptr++;
4fa94f83 273 mboxes >>= 1;
1da177e4 274 }
1da177e4
LT
275}
276
8a659571
AV
277static void
278qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
279{
280 static char *event[] =
281 { "Complete", "Request Notification", "Time Extension" };
282 int rval;
283 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
284 uint16_t __iomem *wptr;
285 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
286
287 /* Seed data -- mailbox1 -> mailbox7. */
288 wptr = (uint16_t __iomem *)&reg24->mailbox1;
289 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
290 mb[cnt] = RD_REG_WORD(wptr);
291
7c3df132 292 ql_dbg(ql_dbg_async, vha, 0x5021,
6246b8a1 293 "Inter-Driver Communication %s -- "
7c3df132
SK
294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
296 mb[4], mb[5], mb[6]);
bf5b8ad7
CD
297 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
298 vha->hw->flags.idc_compl_status = 1;
299 if (vha->hw->notify_dcbx_comp)
300 complete(&vha->hw->dcbx_comp);
301 }
8a659571 302
bf5b8ad7
CD
303 /* Acknowledgement needed? [Notify && non-zero timeout]. */
304 timeout = (descr >> 8) & 0xf;
305 if (aen != MBA_IDC_NOTIFY || !timeout)
306 return;
8fcd6b8b 307
bf5b8ad7
CD
308 ql_dbg(ql_dbg_async, vha, 0x5022,
309 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
310 vha->host_no, event[aen & 0xff], timeout);
8a659571
AV
311
312 rval = qla2x00_post_idc_ack_work(vha, mb);
313 if (rval != QLA_SUCCESS)
7c3df132 314 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
315 "IDC failed to post ACK.\n");
316}
317
daae62a3
CD
318#define LS_UNKNOWN 2
319char *
320qla2x00_get_link_speed_str(struct qla_hw_data *ha)
321{
322 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
323 char *link_speed;
324 int fw_speed = ha->link_data_rate;
325
326 if (IS_QLA2100(ha) || IS_QLA2200(ha))
327 link_speed = link_speeds[0];
328 else if (fw_speed == 0x13)
329 link_speed = link_speeds[6];
330 else {
331 link_speed = link_speeds[LS_UNKNOWN];
332 if (fw_speed < 6)
333 link_speed =
334 link_speeds[fw_speed];
335 }
336
337 return link_speed;
338}
339
7d613ac6
SV
340void
341qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
342{
343 struct qla_hw_data *ha = vha->hw;
344
345 /*
346 * 8200 AEN Interpretation:
347 * mb[0] = AEN code
348 * mb[1] = AEN Reason code
349 * mb[2] = LSW of Peg-Halt Status-1 Register
350 * mb[6] = MSW of Peg-Halt Status-1 Register
351 * mb[3] = LSW of Peg-Halt Status-2 register
352 * mb[7] = MSW of Peg-Halt Status-2 register
353 * mb[4] = IDC Device-State Register value
354 * mb[5] = IDC Driver-Presence Register value
355 */
356 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
357 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
358 mb[0], mb[1], mb[2], mb[6]);
359 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
360 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
361 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
362
363 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
364 IDC_HEARTBEAT_FAILURE)) {
365 ha->flags.nic_core_hung = 1;
366 ql_log(ql_log_warn, vha, 0x5060,
367 "83XX: F/W Error Reported: Check if reset required.\n");
368
369 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
370 uint32_t protocol_engine_id, fw_err_code, err_level;
371
372 /*
373 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
374 * - PEG-Halt Status-1 Register:
375 * (LSW = mb[2], MSW = mb[6])
376 * Bits 0-7 = protocol-engine ID
377 * Bits 8-28 = f/w error code
378 * Bits 29-31 = Error-level
379 * Error-level 0x1 = Non-Fatal error
380 * Error-level 0x2 = Recoverable Fatal error
381 * Error-level 0x4 = UnRecoverable Fatal error
382 * - PEG-Halt Status-2 Register:
383 * (LSW = mb[3], MSW = mb[7])
384 */
385 protocol_engine_id = (mb[2] & 0xff);
386 fw_err_code = (((mb[2] & 0xff00) >> 8) |
387 ((mb[6] & 0x1fff) << 8));
388 err_level = ((mb[6] & 0xe000) >> 13);
389 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
390 "Register: protocol_engine_id=0x%x "
391 "fw_err_code=0x%x err_level=0x%x.\n",
392 protocol_engine_id, fw_err_code, err_level);
393 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
394 "Register: 0x%x%x.\n", mb[7], mb[3]);
395 if (err_level == ERR_LEVEL_NON_FATAL) {
396 ql_log(ql_log_warn, vha, 0x5063,
397 "Not a fatal error, f/w has recovered "
398 "iteself.\n");
399 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
400 ql_log(ql_log_fatal, vha, 0x5064,
401 "Recoverable Fatal error: Chip reset "
402 "required.\n");
403 qla83xx_schedule_work(vha,
404 QLA83XX_NIC_CORE_RESET);
405 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
406 ql_log(ql_log_fatal, vha, 0x5065,
407 "Unrecoverable Fatal error: Set FAILED "
408 "state, reboot required.\n");
409 qla83xx_schedule_work(vha,
410 QLA83XX_NIC_CORE_UNRECOVERABLE);
411 }
412 }
413
414 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
415 uint16_t peg_fw_state, nw_interface_link_up;
416 uint16_t nw_interface_signal_detect, sfp_status;
417 uint16_t htbt_counter, htbt_monitor_enable;
418 uint16_t sfp_additonal_info, sfp_multirate;
419 uint16_t sfp_tx_fault, link_speed, dcbx_status;
420
421 /*
422 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
423 * - PEG-to-FC Status Register:
424 * (LSW = mb[2], MSW = mb[6])
425 * Bits 0-7 = Peg-Firmware state
426 * Bit 8 = N/W Interface Link-up
427 * Bit 9 = N/W Interface signal detected
428 * Bits 10-11 = SFP Status
429 * SFP Status 0x0 = SFP+ transceiver not expected
430 * SFP Status 0x1 = SFP+ transceiver not present
431 * SFP Status 0x2 = SFP+ transceiver invalid
432 * SFP Status 0x3 = SFP+ transceiver present and
433 * valid
434 * Bits 12-14 = Heartbeat Counter
435 * Bit 15 = Heartbeat Monitor Enable
436 * Bits 16-17 = SFP Additional Info
437 * SFP info 0x0 = Unregocnized transceiver for
438 * Ethernet
439 * SFP info 0x1 = SFP+ brand validation failed
440 * SFP info 0x2 = SFP+ speed validation failed
441 * SFP info 0x3 = SFP+ access error
442 * Bit 18 = SFP Multirate
443 * Bit 19 = SFP Tx Fault
444 * Bits 20-22 = Link Speed
445 * Bits 23-27 = Reserved
446 * Bits 28-30 = DCBX Status
447 * DCBX Status 0x0 = DCBX Disabled
448 * DCBX Status 0x1 = DCBX Enabled
449 * DCBX Status 0x2 = DCBX Exchange error
450 * Bit 31 = Reserved
451 */
452 peg_fw_state = (mb[2] & 0x00ff);
453 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
454 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
455 sfp_status = ((mb[2] & 0x0c00) >> 10);
456 htbt_counter = ((mb[2] & 0x7000) >> 12);
457 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
458 sfp_additonal_info = (mb[6] & 0x0003);
459 sfp_multirate = ((mb[6] & 0x0004) >> 2);
460 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
461 link_speed = ((mb[6] & 0x0070) >> 4);
462 dcbx_status = ((mb[6] & 0x7000) >> 12);
463
464 ql_log(ql_log_warn, vha, 0x5066,
465 "Peg-to-Fc Status Register:\n"
466 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
467 "nw_interface_signal_detect=0x%x"
468 "\nsfp_statis=0x%x.\n ", peg_fw_state,
469 nw_interface_link_up, nw_interface_signal_detect,
470 sfp_status);
471 ql_log(ql_log_warn, vha, 0x5067,
472 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
473 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
474 htbt_counter, htbt_monitor_enable,
475 sfp_additonal_info, sfp_multirate);
476 ql_log(ql_log_warn, vha, 0x5068,
477 "sfp_tx_fault=0x%x, link_state=0x%x, "
478 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
479 dcbx_status);
480
481 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
482 }
483
484 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
485 ql_log(ql_log_warn, vha, 0x5069,
486 "Heartbeat Failure encountered, chip reset "
487 "required.\n");
488
489 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
490 }
491 }
492
493 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
494 ql_log(ql_log_info, vha, 0x506a,
495 "IDC Device-State changed = 0x%x.\n", mb[4]);
496 qla83xx_schedule_work(vha, MBA_IDC_AEN);
497 }
498}
499
1da177e4
LT
500/**
501 * qla2x00_async_event() - Process aynchronous events.
502 * @ha: SCSI driver HA context
9a853f71 503 * @mb: Mailbox registers (0 - 3)
1da177e4 504 */
2c3dfe3f 505void
73208dfd 506qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 507{
1da177e4 508 uint16_t handle_cnt;
bdab23da 509 uint16_t cnt, mbx;
1da177e4 510 uint32_t handles[5];
e315cd28 511 struct qla_hw_data *ha = vha->hw;
3d71644c 512 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 513 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 514 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4 515 uint32_t rscn_entry, host_pid;
4d4df193 516 unsigned long flags;
1da177e4
LT
517
518 /* Setup to process RIO completion. */
519 handle_cnt = 0;
6246b8a1 520 if (IS_CNA_CAPABLE(ha))
3a03eb79 521 goto skip_rio;
1da177e4
LT
522 switch (mb[0]) {
523 case MBA_SCSI_COMPLETION:
9a853f71 524 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
525 handle_cnt = 1;
526 break;
527 case MBA_CMPLT_1_16BIT:
9a853f71 528 handles[0] = mb[1];
1da177e4
LT
529 handle_cnt = 1;
530 mb[0] = MBA_SCSI_COMPLETION;
531 break;
532 case MBA_CMPLT_2_16BIT:
9a853f71
AV
533 handles[0] = mb[1];
534 handles[1] = mb[2];
1da177e4
LT
535 handle_cnt = 2;
536 mb[0] = MBA_SCSI_COMPLETION;
537 break;
538 case MBA_CMPLT_3_16BIT:
9a853f71
AV
539 handles[0] = mb[1];
540 handles[1] = mb[2];
541 handles[2] = mb[3];
1da177e4
LT
542 handle_cnt = 3;
543 mb[0] = MBA_SCSI_COMPLETION;
544 break;
545 case MBA_CMPLT_4_16BIT:
9a853f71
AV
546 handles[0] = mb[1];
547 handles[1] = mb[2];
548 handles[2] = mb[3];
1da177e4
LT
549 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
550 handle_cnt = 4;
551 mb[0] = MBA_SCSI_COMPLETION;
552 break;
553 case MBA_CMPLT_5_16BIT:
9a853f71
AV
554 handles[0] = mb[1];
555 handles[1] = mb[2];
556 handles[2] = mb[3];
1da177e4
LT
557 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
558 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
559 handle_cnt = 5;
560 mb[0] = MBA_SCSI_COMPLETION;
561 break;
562 case MBA_CMPLT_2_32BIT:
9a853f71 563 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
564 handles[1] = le32_to_cpu(
565 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
566 RD_MAILBOX_REG(ha, reg, 6));
567 handle_cnt = 2;
568 mb[0] = MBA_SCSI_COMPLETION;
569 break;
570 default:
571 break;
572 }
3a03eb79 573skip_rio:
1da177e4
LT
574 switch (mb[0]) {
575 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 576 if (!vha->flags.online)
1da177e4
LT
577 break;
578
579 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
580 qla2x00_process_completed_request(vha, rsp->req,
581 handles[cnt]);
1da177e4
LT
582 break;
583
584 case MBA_RESET: /* Reset */
7c3df132
SK
585 ql_dbg(ql_dbg_async, vha, 0x5002,
586 "Asynchronous RESET.\n");
1da177e4 587
e315cd28 588 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
589 break;
590
591 case MBA_SYSTEM_ERR: /* System Error */
6246b8a1
GM
592 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
593 RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 594 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
595 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
596 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 597
e315cd28 598 ha->isp_ops->fw_dump(vha, 1);
1da177e4 599
e428924c 600 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 601 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 602 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
603 "Unrecoverable Hardware Error: adapter "
604 "marked OFFLINE!\n");
e315cd28 605 vha->flags.online = 0;
6246b8a1 606 vha->device_flags |= DFLG_DEV_FAILED;
b1d46989 607 } else {
25985edc 608 /* Check to see if MPI timeout occurred */
b1d46989
MI
609 if ((mbx & MBX_3) && (ha->flags.port0))
610 set_bit(MPI_RESET_NEEDED,
611 &vha->dpc_flags);
612
e315cd28 613 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 614 }
9a853f71 615 } else if (mb[1] == 0) {
7c3df132 616 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
617 "Unrecoverable Hardware Error: adapter marked "
618 "OFFLINE!\n");
e315cd28 619 vha->flags.online = 0;
6246b8a1 620 vha->device_flags |= DFLG_DEV_FAILED;
1da177e4 621 } else
e315cd28 622 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
623 break;
624
625 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
626 ql_log(ql_log_warn, vha, 0x5006,
627 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 628
e315cd28 629 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
630 break;
631
632 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
633 ql_log(ql_log_warn, vha, 0x5007,
634 "ISP Response Transfer Error.\n");
1da177e4 635
e315cd28 636 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
637 break;
638
639 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
640 ql_dbg(ql_dbg_async, vha, 0x5008,
641 "Asynchronous WAKEUP_THRES.\n");
1da177e4 642
2d70c103 643 break;
1da177e4 644 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
cfb0919c 645 ql_dbg(ql_dbg_async, vha, 0x5009,
7c3df132 646 "LIP occurred (%x).\n", mb[1]);
1da177e4 647
e315cd28
AC
648 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
649 atomic_set(&vha->loop_state, LOOP_DOWN);
650 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
651 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
652 }
653
e315cd28
AC
654 if (vha->vp_idx) {
655 atomic_set(&vha->vp_state, VP_FAILED);
656 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
657 }
658
e315cd28
AC
659 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
660 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 661
e315cd28
AC
662 vha->flags.management_server_logged_in = 0;
663 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
664 break;
665
666 case MBA_LOOP_UP: /* Loop Up Event */
daae62a3 667 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d8b45213 668 ha->link_data_rate = PORT_SPEED_1GB;
daae62a3 669 else
1da177e4 670 ha->link_data_rate = mb[1];
1da177e4 671
cfb0919c 672 ql_dbg(ql_dbg_async, vha, 0x500a,
daae62a3
CD
673 "LOOP UP detected (%s Gbps).\n",
674 qla2x00_get_link_speed_str(ha));
1da177e4 675
e315cd28
AC
676 vha->flags.management_server_logged_in = 0;
677 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
678 break;
679
680 case MBA_LOOP_DOWN: /* Loop Down Event */
6246b8a1
GM
681 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
682 ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 683 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
cfb0919c 684 ql_dbg(ql_dbg_async, vha, 0x500b,
7c3df132
SK
685 "LOOP DOWN detected (%x %x %x %x).\n",
686 mb[1], mb[2], mb[3], mbx);
1da177e4 687
e315cd28
AC
688 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
689 atomic_set(&vha->loop_state, LOOP_DOWN);
690 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
691 vha->device_flags |= DFLG_NO_CABLE;
692 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
693 }
694
e315cd28
AC
695 if (vha->vp_idx) {
696 atomic_set(&vha->vp_state, VP_FAILED);
697 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
698 }
699
e315cd28 700 vha->flags.management_server_logged_in = 0;
d8b45213 701 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 702 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
703 break;
704
705 case MBA_LIP_RESET: /* LIP reset occurred */
cfb0919c 706 ql_dbg(ql_dbg_async, vha, 0x500c,
cc3ef7bc 707 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 708
e315cd28
AC
709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
710 atomic_set(&vha->loop_state, LOOP_DOWN);
711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
712 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
713 }
714
e315cd28
AC
715 if (vha->vp_idx) {
716 atomic_set(&vha->vp_state, VP_FAILED);
717 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
718 }
719
e315cd28 720 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
721
722 ha->operating_mode = LOOP;
e315cd28
AC
723 vha->flags.management_server_logged_in = 0;
724 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
725 break;
726
3a03eb79 727 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
728 case MBA_POINT_TO_POINT: /* Point-to-Point */
729 if (IS_QLA2100(ha))
730 break;
731
6246b8a1 732 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
7c3df132
SK
733 ql_dbg(ql_dbg_async, vha, 0x500d,
734 "DCBX Completed -- %04x %04x %04x.\n",
735 mb[1], mb[2], mb[3]);
23f2ebd1
SR
736 if (ha->notify_dcbx_comp)
737 complete(&ha->dcbx_comp);
738
739 } else
7c3df132
SK
740 ql_dbg(ql_dbg_async, vha, 0x500e,
741 "Asynchronous P2P MODE received.\n");
1da177e4
LT
742
743 /*
744 * Until there's a transition from loop down to loop up, treat
745 * this as loop down only.
746 */
e315cd28
AC
747 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
748 atomic_set(&vha->loop_state, LOOP_DOWN);
749 if (!atomic_read(&vha->loop_down_timer))
750 atomic_set(&vha->loop_down_timer,
1da177e4 751 LOOP_DOWN_TIME);
e315cd28 752 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
753 }
754
e315cd28
AC
755 if (vha->vp_idx) {
756 atomic_set(&vha->vp_state, VP_FAILED);
757 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
758 }
759
e315cd28
AC
760 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
761 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
762
763 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
764 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
765
766 ha->flags.gpsc_supported = 1;
e315cd28 767 vha->flags.management_server_logged_in = 0;
1da177e4
LT
768 break;
769
770 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
771 if (IS_QLA2100(ha))
772 break;
773
cfb0919c 774 ql_dbg(ql_dbg_async, vha, 0x500f,
1da177e4
LT
775 "Configuration change detected: value=%x.\n", mb[1]);
776
e315cd28
AC
777 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
778 atomic_set(&vha->loop_state, LOOP_DOWN);
779 if (!atomic_read(&vha->loop_down_timer))
780 atomic_set(&vha->loop_down_timer,
1da177e4 781 LOOP_DOWN_TIME);
e315cd28 782 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
783 }
784
e315cd28
AC
785 if (vha->vp_idx) {
786 atomic_set(&vha->vp_state, VP_FAILED);
787 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
788 }
789
e315cd28
AC
790 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
791 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
792 break;
793
794 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
795 /*
796 * Handle only global and vn-port update events
797 *
798 * Relevant inputs:
799 * mb[1] = N_Port handle of changed port
800 * OR 0xffff for global event
801 * mb[2] = New login state
802 * 7 = Port logged out
803 * mb[3] = LSB is vp_idx, 0xff = all vps
804 *
805 * Skip processing if:
806 * Event is global, vp_idx is NOT all vps,
807 * vp_idx does not match
808 * Event is not global, vp_idx does not match
809 */
12cec63e
AV
810 if (IS_QLA2XXX_MIDTYPE(ha) &&
811 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
812 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
813 break;
73208dfd 814
9764ff88
AV
815 /* Global event -- port logout or port unavailable. */
816 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
817 ql_dbg(ql_dbg_async, vha, 0x5010,
818 "Port unavailable %04x %04x %04x.\n",
819 mb[1], mb[2], mb[3]);
daae62a3
CD
820 ql_log(ql_log_warn, vha, 0x505e,
821 "Link is offline.\n");
9764ff88
AV
822
823 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
824 atomic_set(&vha->loop_state, LOOP_DOWN);
825 atomic_set(&vha->loop_down_timer,
826 LOOP_DOWN_TIME);
827 vha->device_flags |= DFLG_NO_CABLE;
828 qla2x00_mark_all_devices_lost(vha, 1);
829 }
830
831 if (vha->vp_idx) {
832 atomic_set(&vha->vp_state, VP_FAILED);
833 fc_vport_set_state(vha->fc_vport,
834 FC_VPORT_FAILED);
faadc5e7 835 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
836 }
837
838 vha->flags.management_server_logged_in = 0;
839 ha->link_data_rate = PORT_SPEED_UNKNOWN;
840 break;
841 }
842
1da177e4 843 /*
cc3ef7bc 844 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
845 * event etc. earlier indicating loop is down) then process
846 * it. Otherwise ignore it and Wait for RSCN to come in.
847 */
e315cd28 848 atomic_set(&vha->loop_down_timer, 0);
79cc785f 849 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
7c3df132
SK
850 ql_dbg(ql_dbg_async, vha, 0x5011,
851 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
852 mb[1], mb[2], mb[3]);
2d70c103
NB
853
854 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
855 break;
856 }
857
7c3df132
SK
858 ql_dbg(ql_dbg_async, vha, 0x5012,
859 "Port database changed %04x %04x %04x.\n",
860 mb[1], mb[2], mb[3]);
daae62a3
CD
861 ql_log(ql_log_warn, vha, 0x505f,
862 "Link is operational (%s Gbps).\n",
863 qla2x00_get_link_speed_str(ha));
1da177e4
LT
864
865 /*
866 * Mark all devices as missing so we will login again.
867 */
e315cd28 868 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 869
e315cd28 870 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 871
2d70c103
NB
872 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
873 set_bit(SCR_PENDING, &vha->dpc_flags);
874
e315cd28
AC
875 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
876 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2d70c103
NB
877
878 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
879 break;
880
881 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 882 /* Check if the Vport has issued a SCR */
e315cd28 883 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
884 break;
885 /* Only handle SCNs for our Vport index. */
0d6e61bc 886 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 887 break;
0d6e61bc 888
7c3df132
SK
889 ql_dbg(ql_dbg_async, vha, 0x5013,
890 "RSCN database changed -- %04x %04x %04x.\n",
891 mb[1], mb[2], mb[3]);
1da177e4 892
59d72d87 893 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
894 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
895 | vha->d_id.b.al_pa;
1da177e4 896 if (rscn_entry == host_pid) {
7c3df132
SK
897 ql_dbg(ql_dbg_async, vha, 0x5014,
898 "Ignoring RSCN update to local host "
899 "port ID (%06x).\n", host_pid);
1da177e4
LT
900 break;
901 }
902
59d72d87
RA
903 /* Ignore reserved bits from RSCN-payload. */
904 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1da177e4 905
e315cd28
AC
906 atomic_set(&vha->loop_down_timer, 0);
907 vha->flags.management_server_logged_in = 0;
1da177e4 908
e315cd28
AC
909 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
910 set_bit(RSCN_UPDATE, &vha->dpc_flags);
911 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
912 break;
913
914 /* case MBA_RIO_RESPONSE: */
915 case MBA_ZIO_RESPONSE:
7c3df132
SK
916 ql_dbg(ql_dbg_async, vha, 0x5015,
917 "[R|Z]IO update completion.\n");
1da177e4 918
e428924c 919 if (IS_FWI2_CAPABLE(ha))
2afa19a9 920 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 921 else
73208dfd 922 qla2x00_process_response_queue(rsp);
1da177e4 923 break;
9a853f71
AV
924
925 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
926 ql_dbg(ql_dbg_async, vha, 0x5016,
927 "Discard RND Frame -- %04x %04x %04x.\n",
928 mb[1], mb[2], mb[3]);
9a853f71 929 break;
45ebeb56
AV
930
931 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
932 ql_dbg(ql_dbg_async, vha, 0x5017,
933 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 934 break;
4d4df193
HK
935
936 case MBA_ISP84XX_ALERT:
7c3df132
SK
937 ql_dbg(ql_dbg_async, vha, 0x5018,
938 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
939 mb[1], mb[2], mb[3]);
4d4df193
HK
940
941 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
942 switch (mb[1]) {
943 case A84_PANIC_RECOVERY:
7c3df132
SK
944 ql_log(ql_log_info, vha, 0x5019,
945 "Alert 84XX: panic recovery %04x %04x.\n",
946 mb[2], mb[3]);
4d4df193
HK
947 break;
948 case A84_OP_LOGIN_COMPLETE:
949 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
950 ql_log(ql_log_info, vha, 0x501a,
951 "Alert 84XX: firmware version %x.\n",
952 ha->cs84xx->op_fw_version);
4d4df193
HK
953 break;
954 case A84_DIAG_LOGIN_COMPLETE:
955 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
956 ql_log(ql_log_info, vha, 0x501b,
957 "Alert 84XX: diagnostic firmware version %x.\n",
958 ha->cs84xx->diag_fw_version);
4d4df193
HK
959 break;
960 case A84_GOLD_LOGIN_COMPLETE:
961 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
962 ha->cs84xx->fw_update = 1;
7c3df132
SK
963 ql_log(ql_log_info, vha, 0x501c,
964 "Alert 84XX: gold firmware version %x.\n",
965 ha->cs84xx->gold_fw_version);
4d4df193
HK
966 break;
967 default:
7c3df132
SK
968 ql_log(ql_log_warn, vha, 0x501d,
969 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
970 mb[1], mb[2], mb[3]);
971 }
972 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
973 break;
3a03eb79 974 case MBA_DCBX_START:
7c3df132
SK
975 ql_dbg(ql_dbg_async, vha, 0x501e,
976 "DCBX Started -- %04x %04x %04x.\n",
977 mb[1], mb[2], mb[3]);
3a03eb79
AV
978 break;
979 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
980 ql_dbg(ql_dbg_async, vha, 0x501f,
981 "DCBX Parameters Updated -- %04x %04x %04x.\n",
982 mb[1], mb[2], mb[3]);
3a03eb79
AV
983 break;
984 case MBA_FCF_CONF_ERR:
7c3df132
SK
985 ql_dbg(ql_dbg_async, vha, 0x5020,
986 "FCF Configuration Error -- %04x %04x %04x.\n",
987 mb[1], mb[2], mb[3]);
3a03eb79 988 break;
3a03eb79 989 case MBA_IDC_NOTIFY:
8fcd6b8b
CD
990 /* See if we need to quiesce any I/O */
991 if (IS_QLA8031(vha->hw))
992 if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
993 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
994 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
8fcd6b8b
CD
995 qla2xxx_wake_dpc(vha);
996 }
997 case MBA_IDC_COMPLETE:
3a03eb79 998 case MBA_IDC_TIME_EXT:
bf5b8ad7 999 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
7d613ac6
SV
1000 qla81xx_idc_event(vha, mb[0], mb[1]);
1001 break;
1002
1003 case MBA_IDC_AEN:
1004 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1005 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1006 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1007 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1008 qla83xx_handle_8200_aen(vha, mb);
3a03eb79 1009 break;
7d613ac6 1010
6246b8a1
GM
1011 default:
1012 ql_dbg(ql_dbg_async, vha, 0x5057,
1013 "Unknown AEN:%04x %04x %04x %04x\n",
1014 mb[0], mb[1], mb[2], mb[3]);
1da177e4 1015 }
2c3dfe3f 1016
2d70c103
NB
1017 qlt_async_event(mb[0], vha, mb);
1018
e315cd28 1019 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 1020 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
1021}
1022
1023/**
1024 * qla2x00_process_completed_request() - Process a Fast Post response.
1025 * @ha: SCSI driver HA context
1026 * @index: SRB index
1027 */
1028static void
73208dfd
AC
1029qla2x00_process_completed_request(struct scsi_qla_host *vha,
1030 struct req_que *req, uint32_t index)
1da177e4
LT
1031{
1032 srb_t *sp;
e315cd28 1033 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1034
1035 /* Validate handle. */
1036 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
1037 ql_log(ql_log_warn, vha, 0x3014,
1038 "Invalid SCSI command index (%x).\n", index);
1da177e4 1039
8f7daead
GM
1040 if (IS_QLA82XX(ha))
1041 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1042 else
1043 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1044 return;
1045 }
1046
e315cd28 1047 sp = req->outstanding_cmds[index];
1da177e4
LT
1048 if (sp) {
1049 /* Free outstanding command slot. */
e315cd28 1050 req->outstanding_cmds[index] = NULL;
1da177e4 1051
1da177e4 1052 /* Save ISP completion status */
9ba56b95 1053 sp->done(ha, sp, DID_OK << 16);
1da177e4 1054 } else {
7c3df132 1055 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 1056
8f7daead
GM
1057 if (IS_QLA82XX(ha))
1058 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1059 else
1060 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1061 }
1062}
1063
ac280b67
AV
1064static srb_t *
1065qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1066 struct req_que *req, void *iocb)
1067{
1068 struct qla_hw_data *ha = vha->hw;
1069 sts_entry_t *pkt = iocb;
1070 srb_t *sp = NULL;
1071 uint16_t index;
1072
1073 index = LSW(pkt->handle);
1074 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
1075 ql_log(ql_log_warn, vha, 0x5031,
1076 "Invalid command index (%x).\n", index);
8f7daead
GM
1077 if (IS_QLA82XX(ha))
1078 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1079 else
1080 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
1081 goto done;
1082 }
1083 sp = req->outstanding_cmds[index];
1084 if (!sp) {
7c3df132
SK
1085 ql_log(ql_log_warn, vha, 0x5032,
1086 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
1087 return sp;
1088 }
1089 if (sp->handle != index) {
7c3df132
SK
1090 ql_log(ql_log_warn, vha, 0x5033,
1091 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
1092 return NULL;
1093 }
9a069e19 1094
ac280b67 1095 req->outstanding_cmds[index] = NULL;
9a069e19 1096
ac280b67
AV
1097done:
1098 return sp;
1099}
1100
1101static void
1102qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1103 struct mbx_entry *mbx)
1104{
1105 const char func[] = "MBX-IOCB";
1106 const char *type;
ac280b67
AV
1107 fc_port_t *fcport;
1108 srb_t *sp;
4916392b 1109 struct srb_iocb *lio;
99b0bec7 1110 uint16_t *data;
5ff1d584 1111 uint16_t status;
ac280b67
AV
1112
1113 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1114 if (!sp)
1115 return;
1116
9ba56b95
GM
1117 lio = &sp->u.iocb_cmd;
1118 type = sp->name;
ac280b67 1119 fcport = sp->fcport;
4916392b 1120 data = lio->u.logio.data;
ac280b67 1121
5ff1d584 1122 data[0] = MBS_COMMAND_ERROR;
4916392b 1123 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1124 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1125 if (mbx->entry_status) {
7c3df132 1126 ql_dbg(ql_dbg_async, vha, 0x5043,
cfb0919c 1127 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
d3fa9e7d 1128 "entry-status=%x status=%x state-flag=%x "
cfb0919c
CD
1129 "status-flags=%x.\n", type, sp->handle,
1130 fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
1131 fcport->d_id.b.al_pa, mbx->entry_status,
1132 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 1133 le16_to_cpu(mbx->status_flags));
d3fa9e7d 1134
cfb0919c 1135 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
7c3df132 1136 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 1137
99b0bec7 1138 goto logio_done;
ac280b67
AV
1139 }
1140
5ff1d584 1141 status = le16_to_cpu(mbx->status);
9ba56b95 1142 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
5ff1d584
AV
1143 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1144 status = 0;
1145 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132 1146 ql_dbg(ql_dbg_async, vha, 0x5045,
cfb0919c
CD
1147 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1148 type, sp->handle, fcport->d_id.b.domain,
1149 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1150 le16_to_cpu(mbx->mb1));
ac280b67
AV
1151
1152 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1153 if (sp->type == SRB_LOGIN_CMD) {
99b0bec7
AV
1154 fcport->port_type = FCT_TARGET;
1155 if (le16_to_cpu(mbx->mb1) & BIT_0)
1156 fcport->port_type = FCT_INITIATOR;
6ac52608 1157 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 1158 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 1159 }
99b0bec7 1160 goto logio_done;
ac280b67
AV
1161 }
1162
1163 data[0] = le16_to_cpu(mbx->mb0);
1164 switch (data[0]) {
1165 case MBS_PORT_ID_USED:
1166 data[1] = le16_to_cpu(mbx->mb1);
1167 break;
1168 case MBS_LOOP_ID_USED:
1169 break;
1170 default:
1171 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1172 break;
1173 }
1174
7c3df132 1175 ql_log(ql_log_warn, vha, 0x5046,
cfb0919c
CD
1176 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1177 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1178 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1179 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
ac280b67 1180 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 1181 le16_to_cpu(mbx->mb7));
ac280b67 1182
99b0bec7 1183logio_done:
9ba56b95 1184 sp->done(vha, sp, 0);
ac280b67
AV
1185}
1186
9bc4f4fb
HZ
1187static void
1188qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1189 sts_entry_t *pkt, int iocb_type)
1190{
1191 const char func[] = "CT_IOCB";
1192 const char *type;
9bc4f4fb 1193 srb_t *sp;
9bc4f4fb
HZ
1194 struct fc_bsg_job *bsg_job;
1195 uint16_t comp_status;
9ba56b95 1196 int res;
9bc4f4fb
HZ
1197
1198 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1199 if (!sp)
1200 return;
1201
9ba56b95 1202 bsg_job = sp->u.bsg_job;
9bc4f4fb 1203
9ba56b95 1204 type = "ct pass-through";
9bc4f4fb
HZ
1205
1206 comp_status = le16_to_cpu(pkt->comp_status);
1207
1208 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1209 * fc payload to the caller
1210 */
1211 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1212 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1213
1214 if (comp_status != CS_COMPLETE) {
1215 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1216 res = DID_OK << 16;
9bc4f4fb
HZ
1217 bsg_job->reply->reply_payload_rcv_len =
1218 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1219
7c3df132
SK
1220 ql_log(ql_log_warn, vha, 0x5048,
1221 "CT pass-through-%s error "
9bc4f4fb 1222 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1223 type, comp_status,
1224 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1225 } else {
7c3df132
SK
1226 ql_log(ql_log_warn, vha, 0x5049,
1227 "CT pass-through-%s error "
1228 "comp_status-status=0x%x.\n", type, comp_status);
9ba56b95 1229 res = DID_ERROR << 16;
9bc4f4fb
HZ
1230 bsg_job->reply->reply_payload_rcv_len = 0;
1231 }
cfb0919c 1232 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
7c3df132 1233 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1234 } else {
9ba56b95 1235 res = DID_OK << 16;
9bc4f4fb
HZ
1236 bsg_job->reply->reply_payload_rcv_len =
1237 bsg_job->reply_payload.payload_len;
1238 bsg_job->reply_len = 0;
1239 }
1240
9ba56b95 1241 sp->done(vha, sp, res);
9bc4f4fb
HZ
1242}
1243
9a069e19
GM
1244static void
1245qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1246 struct sts_entry_24xx *pkt, int iocb_type)
1247{
1248 const char func[] = "ELS_CT_IOCB";
1249 const char *type;
9a069e19 1250 srb_t *sp;
9a069e19
GM
1251 struct fc_bsg_job *bsg_job;
1252 uint16_t comp_status;
1253 uint32_t fw_status[3];
1254 uint8_t* fw_sts_ptr;
9ba56b95 1255 int res;
9a069e19
GM
1256
1257 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1258 if (!sp)
1259 return;
9ba56b95 1260 bsg_job = sp->u.bsg_job;
9a069e19
GM
1261
1262 type = NULL;
9ba56b95 1263 switch (sp->type) {
9a069e19
GM
1264 case SRB_ELS_CMD_RPT:
1265 case SRB_ELS_CMD_HST:
1266 type = "els";
1267 break;
1268 case SRB_CT_CMD:
1269 type = "ct pass-through";
1270 break;
1271 default:
37fed3ee 1272 ql_dbg(ql_dbg_user, vha, 0x503e,
9ba56b95 1273 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
9a069e19
GM
1274 return;
1275 }
1276
1277 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1278 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1279 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1280
1281 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1282 * fc payload to the caller
1283 */
1284 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1285 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1286
1287 if (comp_status != CS_COMPLETE) {
1288 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1289 res = DID_OK << 16;
9a069e19 1290 bsg_job->reply->reply_payload_rcv_len =
9ba56b95 1291 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
9a069e19 1292
37fed3ee 1293 ql_dbg(ql_dbg_user, vha, 0x503f,
cfb0919c 1294 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1295 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
cfb0919c 1296 type, sp->handle, comp_status, fw_status[1], fw_status[2],
7c3df132
SK
1297 le16_to_cpu(((struct els_sts_entry_24xx *)
1298 pkt)->total_byte_count));
9a069e19
GM
1299 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1300 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1301 }
1302 else {
37fed3ee 1303 ql_dbg(ql_dbg_user, vha, 0x5040,
cfb0919c 1304 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1305 "error subcode 1=0x%x error subcode 2=0x%x.\n",
cfb0919c 1306 type, sp->handle, comp_status,
7c3df132
SK
1307 le16_to_cpu(((struct els_sts_entry_24xx *)
1308 pkt)->error_subcode_1),
1309 le16_to_cpu(((struct els_sts_entry_24xx *)
1310 pkt)->error_subcode_2));
9ba56b95 1311 res = DID_ERROR << 16;
9a069e19
GM
1312 bsg_job->reply->reply_payload_rcv_len = 0;
1313 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1314 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1315 }
37fed3ee 1316 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
7c3df132 1317 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1318 }
1319 else {
9ba56b95 1320 res = DID_OK << 16;
9a069e19
GM
1321 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1322 bsg_job->reply_len = 0;
1323 }
1324
9ba56b95 1325 sp->done(vha, sp, res);
9a069e19
GM
1326}
1327
ac280b67
AV
1328static void
1329qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1330 struct logio_entry_24xx *logio)
1331{
1332 const char func[] = "LOGIO-IOCB";
1333 const char *type;
ac280b67
AV
1334 fc_port_t *fcport;
1335 srb_t *sp;
4916392b 1336 struct srb_iocb *lio;
99b0bec7 1337 uint16_t *data;
ac280b67
AV
1338 uint32_t iop[2];
1339
1340 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1341 if (!sp)
1342 return;
1343
9ba56b95
GM
1344 lio = &sp->u.iocb_cmd;
1345 type = sp->name;
ac280b67 1346 fcport = sp->fcport;
4916392b 1347 data = lio->u.logio.data;
ac280b67 1348
5ff1d584 1349 data[0] = MBS_COMMAND_ERROR;
4916392b 1350 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1351 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1352 if (logio->entry_status) {
5e19ed90 1353 ql_log(ql_log_warn, fcport->vha, 0x5034,
cfb0919c 1354 "Async-%s error entry - hdl=%x"
d3fa9e7d 1355 "portid=%02x%02x%02x entry-status=%x.\n",
cfb0919c
CD
1356 type, sp->handle, fcport->d_id.b.domain,
1357 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1358 logio->entry_status);
1359 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
7c3df132 1360 (uint8_t *)logio, sizeof(*logio));
ac280b67 1361
99b0bec7 1362 goto logio_done;
ac280b67
AV
1363 }
1364
1365 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
5e19ed90 1366 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
cfb0919c
CD
1367 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1368 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1369 fcport->d_id.b.area, fcport->d_id.b.al_pa,
7c3df132 1370 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1371
1372 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1373 if (sp->type != SRB_LOGIN_CMD)
99b0bec7 1374 goto logio_done;
ac280b67
AV
1375
1376 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1377 if (iop[0] & BIT_4) {
1378 fcport->port_type = FCT_TARGET;
1379 if (iop[0] & BIT_8)
8474f3a0 1380 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1381 } else if (iop[0] & BIT_5)
ac280b67 1382 fcport->port_type = FCT_INITIATOR;
b0cd579c 1383
2d70c103
NB
1384 if (iop[0] & BIT_7)
1385 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1386
ac280b67
AV
1387 if (logio->io_parameter[7] || logio->io_parameter[8])
1388 fcport->supported_classes |= FC_COS_CLASS2;
1389 if (logio->io_parameter[9] || logio->io_parameter[10])
1390 fcport->supported_classes |= FC_COS_CLASS3;
1391
99b0bec7 1392 goto logio_done;
ac280b67
AV
1393 }
1394
1395 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1396 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1397 switch (iop[0]) {
1398 case LSC_SCODE_PORTID_USED:
1399 data[0] = MBS_PORT_ID_USED;
1400 data[1] = LSW(iop[1]);
1401 break;
1402 case LSC_SCODE_NPORT_USED:
1403 data[0] = MBS_LOOP_ID_USED;
1404 break;
ac280b67
AV
1405 default:
1406 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1407 break;
1408 }
1409
5e19ed90 1410 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
cfb0919c
CD
1411 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1412 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
d3fa9e7d 1413 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1414 le16_to_cpu(logio->comp_status),
1415 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1416 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1417
99b0bec7 1418logio_done:
9ba56b95 1419 sp->done(vha, sp, 0);
ac280b67
AV
1420}
1421
3822263e
MI
1422static void
1423qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1424 struct tsk_mgmt_entry *tsk)
1425{
1426 const char func[] = "TMF-IOCB";
1427 const char *type;
1428 fc_port_t *fcport;
1429 srb_t *sp;
1430 struct srb_iocb *iocb;
3822263e
MI
1431 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1432 int error = 1;
1433
1434 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1435 if (!sp)
1436 return;
1437
9ba56b95
GM
1438 iocb = &sp->u.iocb_cmd;
1439 type = sp->name;
3822263e
MI
1440 fcport = sp->fcport;
1441
1442 if (sts->entry_status) {
5e19ed90 1443 ql_log(ql_log_warn, fcport->vha, 0x5038,
cfb0919c
CD
1444 "Async-%s error - hdl=%x entry-status(%x).\n",
1445 type, sp->handle, sts->entry_status);
3822263e 1446 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
5e19ed90 1447 ql_log(ql_log_warn, fcport->vha, 0x5039,
cfb0919c
CD
1448 "Async-%s error - hdl=%x completion status(%x).\n",
1449 type, sp->handle, sts->comp_status);
3822263e
MI
1450 } else if (!(le16_to_cpu(sts->scsi_status) &
1451 SS_RESPONSE_INFO_LEN_VALID)) {
5e19ed90 1452 ql_log(ql_log_warn, fcport->vha, 0x503a,
cfb0919c
CD
1453 "Async-%s error - hdl=%x no response info(%x).\n",
1454 type, sp->handle, sts->scsi_status);
3822263e 1455 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
5e19ed90 1456 ql_log(ql_log_warn, fcport->vha, 0x503b,
cfb0919c
CD
1457 "Async-%s error - hdl=%x not enough response(%d).\n",
1458 type, sp->handle, sts->rsp_data_len);
3822263e 1459 } else if (sts->data[3]) {
5e19ed90 1460 ql_log(ql_log_warn, fcport->vha, 0x503c,
cfb0919c
CD
1461 "Async-%s error - hdl=%x response(%x).\n",
1462 type, sp->handle, sts->data[3]);
3822263e
MI
1463 } else {
1464 error = 0;
1465 }
1466
1467 if (error) {
1468 iocb->u.tmf.data = error;
7c3df132
SK
1469 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1470 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1471 }
1472
9ba56b95 1473 sp->done(vha, sp, 0);
3822263e
MI
1474}
1475
1da177e4
LT
1476/**
1477 * qla2x00_process_response_queue() - Process response queue entries.
1478 * @ha: SCSI driver HA context
1479 */
1480void
73208dfd 1481qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1482{
73208dfd
AC
1483 struct scsi_qla_host *vha;
1484 struct qla_hw_data *ha = rsp->hw;
3d71644c 1485 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1486 sts_entry_t *pkt;
1487 uint16_t handle_cnt;
1488 uint16_t cnt;
73208dfd 1489
2afa19a9 1490 vha = pci_get_drvdata(ha->pdev);
1da177e4 1491
e315cd28 1492 if (!vha->flags.online)
1da177e4
LT
1493 return;
1494
e315cd28
AC
1495 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1496 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1497
e315cd28
AC
1498 rsp->ring_index++;
1499 if (rsp->ring_index == rsp->length) {
1500 rsp->ring_index = 0;
1501 rsp->ring_ptr = rsp->ring;
1da177e4 1502 } else {
e315cd28 1503 rsp->ring_ptr++;
1da177e4
LT
1504 }
1505
1506 if (pkt->entry_status != 0) {
73208dfd 1507 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1508 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1509 wmb();
1510 continue;
1511 }
1512
1513 switch (pkt->entry_type) {
1514 case STATUS_TYPE:
73208dfd 1515 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1516 break;
1517 case STATUS_TYPE_21:
1518 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1519 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1520 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1521 ((sts21_entry_t *)pkt)->handle[cnt]);
1522 }
1523 break;
1524 case STATUS_TYPE_22:
1525 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1526 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1527 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1528 ((sts22_entry_t *)pkt)->handle[cnt]);
1529 }
1530 break;
1531 case STATUS_CONT_TYPE:
2afa19a9 1532 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1533 break;
ac280b67
AV
1534 case MBX_IOCB_TYPE:
1535 qla2x00_mbx_iocb_entry(vha, rsp->req,
1536 (struct mbx_entry *)pkt);
3822263e 1537 break;
9bc4f4fb
HZ
1538 case CT_IOCB_TYPE:
1539 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1540 break;
1da177e4
LT
1541 default:
1542 /* Type Not Supported. */
7c3df132
SK
1543 ql_log(ql_log_warn, vha, 0x504a,
1544 "Received unknown response pkt type %x "
1da177e4 1545 "entry status=%x.\n",
7c3df132 1546 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1547 break;
1548 }
1549 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1550 wmb();
1551 }
1552
1553 /* Adjust ring index */
e315cd28 1554 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1555}
1556
4733fcb1 1557static inline void
5544213b 1558qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
9ba56b95 1559 uint32_t sense_len, struct rsp_que *rsp, int res)
4733fcb1 1560{
7c3df132 1561 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95
GM
1562 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1563 uint32_t track_sense_len;
4733fcb1
AV
1564
1565 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1566 sense_len = SCSI_SENSE_BUFFERSIZE;
1567
9ba56b95
GM
1568 SET_CMD_SENSE_LEN(sp, sense_len);
1569 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1570 track_sense_len = sense_len;
1571
1572 if (sense_len > par_sense_len)
5544213b 1573 sense_len = par_sense_len;
4733fcb1
AV
1574
1575 memcpy(cp->sense_buffer, sense_data, sense_len);
1576
9ba56b95
GM
1577 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1578 track_sense_len -= sense_len;
1579 SET_CMD_SENSE_LEN(sp, track_sense_len);
1580
1581 if (track_sense_len != 0) {
2afa19a9 1582 rsp->status_srb = sp;
9ba56b95
GM
1583 cp->result = res;
1584 }
4733fcb1 1585
cfb0919c
CD
1586 if (sense_len) {
1587 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1588 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1589 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1590 cp);
7c3df132
SK
1591 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1592 cp->sense_buffer, sense_len);
cfb0919c 1593 }
4733fcb1
AV
1594}
1595
bad75002
AE
1596struct scsi_dif_tuple {
1597 __be16 guard; /* Checksum */
d6a03581 1598 __be16 app_tag; /* APPL identifier */
bad75002
AE
1599 __be32 ref_tag; /* Target LBA or indirect LBA */
1600};
1601
1602/*
1603 * Checks the guard or meta-data for the type of error
1604 * detected by the HBA. In case of errors, we set the
1605 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1606 * to indicate to the kernel that the HBA detected error.
1607 */
8cb2049c 1608static inline int
bad75002
AE
1609qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1610{
7c3df132 1611 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95 1612 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
8cb2049c
AE
1613 uint8_t *ap = &sts24->data[12];
1614 uint8_t *ep = &sts24->data[20];
bad75002
AE
1615 uint32_t e_ref_tag, a_ref_tag;
1616 uint16_t e_app_tag, a_app_tag;
1617 uint16_t e_guard, a_guard;
1618
8cb2049c
AE
1619 /*
1620 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1621 * would make guard field appear at offset 2
1622 */
1623 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1624 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1625 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1626 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1627 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1628 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1629
7c3df132
SK
1630 ql_dbg(ql_dbg_io, vha, 0x3023,
1631 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1632
7c3df132
SK
1633 ql_dbg(ql_dbg_io, vha, 0x3024,
1634 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1635 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1636 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1637 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1638 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1639
8cb2049c
AE
1640 /*
1641 * Ignore sector if:
1642 * For type 3: ref & app tag is all 'f's
1643 * For type 0,1,2: app tag is all 'f's
1644 */
1645 if ((a_app_tag == 0xffff) &&
1646 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1647 (a_ref_tag == 0xffffffff))) {
1648 uint32_t blocks_done, resid;
1649 sector_t lba_s = scsi_get_lba(cmd);
1650
1651 /* 2TB boundary case covered automatically with this */
1652 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1653
1654 resid = scsi_bufflen(cmd) - (blocks_done *
1655 cmd->device->sector_size);
1656
1657 scsi_set_resid(cmd, resid);
1658 cmd->result = DID_OK << 16;
1659
1660 /* Update protection tag */
1661 if (scsi_prot_sg_count(cmd)) {
1662 uint32_t i, j = 0, k = 0, num_ent;
1663 struct scatterlist *sg;
1664 struct sd_dif_tuple *spt;
1665
1666 /* Patch the corresponding protection tags */
1667 scsi_for_each_prot_sg(cmd, sg,
1668 scsi_prot_sg_count(cmd), i) {
1669 num_ent = sg_dma_len(sg) / 8;
1670 if (k + num_ent < blocks_done) {
1671 k += num_ent;
1672 continue;
1673 }
1674 j = blocks_done - k - 1;
1675 k = blocks_done;
1676 break;
1677 }
1678
1679 if (k != blocks_done) {
cfb0919c 1680 ql_log(ql_log_warn, vha, 0x302f,
8ec9c7fb
RD
1681 "unexpected tag values tag:lba=%x:%llx)\n",
1682 e_ref_tag, (unsigned long long)lba_s);
8cb2049c
AE
1683 return 1;
1684 }
1685
1686 spt = page_address(sg_page(sg)) + sg->offset;
1687 spt += j;
1688
1689 spt->app_tag = 0xffff;
1690 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1691 spt->ref_tag = 0xffffffff;
1692 }
1693
1694 return 0;
1695 }
1696
bad75002
AE
1697 /* check guard */
1698 if (e_guard != a_guard) {
1699 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1700 0x10, 0x1);
1701 set_driver_byte(cmd, DRIVER_SENSE);
1702 set_host_byte(cmd, DID_ABORT);
1703 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1704 return 1;
bad75002
AE
1705 }
1706
e02587d7
AE
1707 /* check ref tag */
1708 if (e_ref_tag != a_ref_tag) {
bad75002 1709 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1710 0x10, 0x3);
bad75002
AE
1711 set_driver_byte(cmd, DRIVER_SENSE);
1712 set_host_byte(cmd, DID_ABORT);
1713 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1714 return 1;
bad75002
AE
1715 }
1716
e02587d7
AE
1717 /* check appl tag */
1718 if (e_app_tag != a_app_tag) {
bad75002 1719 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1720 0x10, 0x2);
bad75002
AE
1721 set_driver_byte(cmd, DRIVER_SENSE);
1722 set_host_byte(cmd, DID_ABORT);
1723 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1724 return 1;
bad75002 1725 }
e02587d7 1726
8cb2049c 1727 return 1;
bad75002
AE
1728}
1729
a9b6f722
SK
1730static void
1731qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1732 struct req_que *req, uint32_t index)
1733{
1734 struct qla_hw_data *ha = vha->hw;
1735 srb_t *sp;
1736 uint16_t comp_status;
1737 uint16_t scsi_status;
1738 uint16_t thread_id;
1739 uint32_t rval = EXT_STATUS_OK;
1740 struct fc_bsg_job *bsg_job = NULL;
1741 sts_entry_t *sts;
1742 struct sts_entry_24xx *sts24;
1743 sts = (sts_entry_t *) pkt;
1744 sts24 = (struct sts_entry_24xx *) pkt;
1745
1746 /* Validate handle. */
1747 if (index >= MAX_OUTSTANDING_COMMANDS) {
1748 ql_log(ql_log_warn, vha, 0x70af,
1749 "Invalid SCSI completion handle 0x%x.\n", index);
1750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1751 return;
1752 }
1753
1754 sp = req->outstanding_cmds[index];
1755 if (sp) {
1756 /* Free outstanding command slot. */
1757 req->outstanding_cmds[index] = NULL;
1758 bsg_job = sp->u.bsg_job;
1759 } else {
1760 ql_log(ql_log_warn, vha, 0x70b0,
1761 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1762 req->id, index);
1763
1764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1765 return;
1766 }
1767
1768 if (IS_FWI2_CAPABLE(ha)) {
1769 comp_status = le16_to_cpu(sts24->comp_status);
1770 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1771 } else {
1772 comp_status = le16_to_cpu(sts->comp_status);
1773 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1774 }
1775
1776 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1777 switch (comp_status) {
1778 case CS_COMPLETE:
1779 if (scsi_status == 0) {
1780 bsg_job->reply->reply_payload_rcv_len =
1781 bsg_job->reply_payload.payload_len;
1782 rval = EXT_STATUS_OK;
1783 }
1784 goto done;
1785
1786 case CS_DATA_OVERRUN:
1787 ql_dbg(ql_dbg_user, vha, 0x70b1,
1788 "Command completed with date overrun thread_id=%d\n",
1789 thread_id);
1790 rval = EXT_STATUS_DATA_OVERRUN;
1791 break;
1792
1793 case CS_DATA_UNDERRUN:
1794 ql_dbg(ql_dbg_user, vha, 0x70b2,
1795 "Command completed with date underrun thread_id=%d\n",
1796 thread_id);
1797 rval = EXT_STATUS_DATA_UNDERRUN;
1798 break;
1799 case CS_BIDIR_RD_OVERRUN:
1800 ql_dbg(ql_dbg_user, vha, 0x70b3,
1801 "Command completed with read data overrun thread_id=%d\n",
1802 thread_id);
1803 rval = EXT_STATUS_DATA_OVERRUN;
1804 break;
1805
1806 case CS_BIDIR_RD_WR_OVERRUN:
1807 ql_dbg(ql_dbg_user, vha, 0x70b4,
1808 "Command completed with read and write data overrun "
1809 "thread_id=%d\n", thread_id);
1810 rval = EXT_STATUS_DATA_OVERRUN;
1811 break;
1812
1813 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1814 ql_dbg(ql_dbg_user, vha, 0x70b5,
1815 "Command completed with read data over and write data "
1816 "underrun thread_id=%d\n", thread_id);
1817 rval = EXT_STATUS_DATA_OVERRUN;
1818 break;
1819
1820 case CS_BIDIR_RD_UNDERRUN:
1821 ql_dbg(ql_dbg_user, vha, 0x70b6,
1822 "Command completed with read data data underrun "
1823 "thread_id=%d\n", thread_id);
1824 rval = EXT_STATUS_DATA_UNDERRUN;
1825 break;
1826
1827 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1828 ql_dbg(ql_dbg_user, vha, 0x70b7,
1829 "Command completed with read data under and write data "
1830 "overrun thread_id=%d\n", thread_id);
1831 rval = EXT_STATUS_DATA_UNDERRUN;
1832 break;
1833
1834 case CS_BIDIR_RD_WR_UNDERRUN:
1835 ql_dbg(ql_dbg_user, vha, 0x70b8,
1836 "Command completed with read and write data underrun "
1837 "thread_id=%d\n", thread_id);
1838 rval = EXT_STATUS_DATA_UNDERRUN;
1839 break;
1840
1841 case CS_BIDIR_DMA:
1842 ql_dbg(ql_dbg_user, vha, 0x70b9,
1843 "Command completed with data DMA error thread_id=%d\n",
1844 thread_id);
1845 rval = EXT_STATUS_DMA_ERR;
1846 break;
1847
1848 case CS_TIMEOUT:
1849 ql_dbg(ql_dbg_user, vha, 0x70ba,
1850 "Command completed with timeout thread_id=%d\n",
1851 thread_id);
1852 rval = EXT_STATUS_TIMEOUT;
1853 break;
1854 default:
1855 ql_dbg(ql_dbg_user, vha, 0x70bb,
1856 "Command completed with completion status=0x%x "
1857 "thread_id=%d\n", comp_status, thread_id);
1858 rval = EXT_STATUS_ERR;
1859 break;
1860 }
1861 bsg_job->reply->reply_payload_rcv_len = 0;
1862
1863done:
1864 /* Return the vendor specific reply to API */
1865 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1866 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1867 /* Always return DID_OK, bsg will send the vendor specific response
1868 * in this case only */
1869 sp->done(vha, sp, (DID_OK << 6));
1870
1871}
1872
1da177e4
LT
1873/**
1874 * qla2x00_status_entry() - Process a Status IOCB entry.
1875 * @ha: SCSI driver HA context
1876 * @pkt: Entry pointer
1877 */
1878static void
73208dfd 1879qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1880{
1da177e4 1881 srb_t *sp;
1da177e4
LT
1882 fc_port_t *fcport;
1883 struct scsi_cmnd *cp;
9a853f71
AV
1884 sts_entry_t *sts;
1885 struct sts_entry_24xx *sts24;
1da177e4
LT
1886 uint16_t comp_status;
1887 uint16_t scsi_status;
b7d2280c 1888 uint16_t ox_id;
1da177e4
LT
1889 uint8_t lscsi_status;
1890 int32_t resid;
5544213b
AV
1891 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1892 fw_resid_len;
9a853f71 1893 uint8_t *rsp_info, *sense_data;
e315cd28 1894 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1895 uint32_t handle;
1896 uint16_t que;
1897 struct req_que *req;
b7d2280c 1898 int logit = 1;
9ba56b95 1899 int res = 0;
a9b6f722 1900 uint16_t state_flags = 0;
9a853f71
AV
1901
1902 sts = (sts_entry_t *) pkt;
1903 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1904 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1905 comp_status = le16_to_cpu(sts24->comp_status);
1906 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
a9b6f722 1907 state_flags = le16_to_cpu(sts24->state_flags);
9a853f71
AV
1908 } else {
1909 comp_status = le16_to_cpu(sts->comp_status);
1910 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1911 }
2afa19a9
AC
1912 handle = (uint32_t) LSW(sts->handle);
1913 que = MSW(sts->handle);
1914 req = ha->req_q_map[que];
a9083016 1915
1da177e4 1916 /* Validate handle. */
2afa19a9
AC
1917 if (handle < MAX_OUTSTANDING_COMMANDS) {
1918 sp = req->outstanding_cmds[handle];
1da177e4
LT
1919 } else
1920 sp = NULL;
1921
1922 if (sp == NULL) {
cfb0919c 1923 ql_dbg(ql_dbg_io, vha, 0x3017,
7c3df132 1924 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1925
8f7daead
GM
1926 if (IS_QLA82XX(ha))
1927 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1928 else
1929 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1930 qla2xxx_wake_dpc(vha);
1da177e4
LT
1931 return;
1932 }
a9b6f722
SK
1933
1934 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1935 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1936 return;
1937 }
1938
1939 /* Fast path completion. */
1940 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1941 qla2x00_process_completed_request(vha, req, handle);
1942
1943 return;
1944 }
1945
1946 req->outstanding_cmds[handle] = NULL;
9ba56b95 1947 cp = GET_CMD_SP(sp);
1da177e4 1948 if (cp == NULL) {
cfb0919c 1949 ql_dbg(ql_dbg_io, vha, 0x3018,
7c3df132
SK
1950 "Command already returned (0x%x/%p).\n",
1951 sts->handle, sp);
1da177e4
LT
1952
1953 return;
1954 }
1955
9a853f71 1956 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1957
bdf79621 1958 fcport = sp->fcport;
1da177e4 1959
b7d2280c 1960 ox_id = 0;
5544213b
AV
1961 sense_len = par_sense_len = rsp_info_len = resid_len =
1962 fw_resid_len = 0;
e428924c 1963 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
1964 if (scsi_status & SS_SENSE_LEN_VALID)
1965 sense_len = le32_to_cpu(sts24->sense_len);
1966 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1967 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1968 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1969 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1970 if (comp_status == CS_DATA_UNDERRUN)
1971 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1972 rsp_info = sts24->data;
1973 sense_data = sts24->data;
1974 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 1975 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 1976 par_sense_len = sizeof(sts24->data);
9a853f71 1977 } else {
0f00a206
LC
1978 if (scsi_status & SS_SENSE_LEN_VALID)
1979 sense_len = le16_to_cpu(sts->req_sense_length);
1980 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1981 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
1982 resid_len = le32_to_cpu(sts->residual_length);
1983 rsp_info = sts->rsp_info;
1984 sense_data = sts->req_sense_data;
5544213b 1985 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
1986 }
1987
1da177e4
LT
1988 /* Check for any FCP transport errors. */
1989 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1990 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 1991 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 1992 sense_data += rsp_info_len;
5544213b
AV
1993 par_sense_len -= rsp_info_len;
1994 }
9a853f71 1995 if (rsp_info_len > 3 && rsp_info[3]) {
5e19ed90 1996 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
7c3df132
SK
1997 "FCP I/O protocol failure (0x%x/0x%x).\n",
1998 rsp_info_len, rsp_info[3]);
1da177e4 1999
9ba56b95 2000 res = DID_BUS_BUSY << 16;
b7d2280c 2001 goto out;
1da177e4
LT
2002 }
2003 }
2004
3e8ce320
AV
2005 /* Check for overrun. */
2006 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2007 scsi_status & SS_RESIDUAL_OVER)
2008 comp_status = CS_DATA_OVERRUN;
2009
1da177e4
LT
2010 /*
2011 * Based on Host and scsi status generate status code for Linux
2012 */
2013 switch (comp_status) {
2014 case CS_COMPLETE:
df7baa50 2015 case CS_QUEUE_FULL:
1da177e4 2016 if (scsi_status == 0) {
9ba56b95 2017 res = DID_OK << 16;
1da177e4
LT
2018 break;
2019 }
2020 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 2021 resid = resid_len;
385d70b4 2022 scsi_set_resid(cp, resid);
0da69df1
AV
2023
2024 if (!lscsi_status &&
385d70b4 2025 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 2026 cp->underflow)) {
5e19ed90 2027 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
7c3df132 2028 "Mid-layer underflow "
b7d2280c 2029 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2030 resid, scsi_bufflen(cp));
0da69df1 2031
9ba56b95 2032 res = DID_ERROR << 16;
0da69df1
AV
2033 break;
2034 }
1da177e4 2035 }
9ba56b95 2036 res = DID_OK << 16 | lscsi_status;
1da177e4 2037
df7baa50 2038 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2039 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
7c3df132 2040 "QUEUE FULL detected.\n");
df7baa50
AV
2041 break;
2042 }
b7d2280c 2043 logit = 0;
1da177e4
LT
2044 if (lscsi_status != SS_CHECK_CONDITION)
2045 break;
2046
b80ca4f7 2047 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2048 if (!(scsi_status & SS_SENSE_LEN_VALID))
2049 break;
2050
5544213b 2051 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
9ba56b95 2052 rsp, res);
1da177e4
LT
2053 break;
2054
2055 case CS_DATA_UNDERRUN:
ed17c71b 2056 /* Use F/W calculated residual length. */
0f00a206
LC
2057 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2058 scsi_set_resid(cp, resid);
2059 if (scsi_status & SS_RESIDUAL_UNDER) {
2060 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
5e19ed90 2061 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
7c3df132
SK
2062 "Dropped frame(s) detected "
2063 "(0x%x of 0x%x bytes).\n",
2064 resid, scsi_bufflen(cp));
0f00a206 2065
9ba56b95 2066 res = DID_ERROR << 16 | lscsi_status;
4e85e3d9 2067 goto check_scsi_status;
6acf8190 2068 }
ed17c71b 2069
0f00a206
LC
2070 if (!lscsi_status &&
2071 ((unsigned)(scsi_bufflen(cp) - resid) <
2072 cp->underflow)) {
5e19ed90 2073 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
7c3df132 2074 "Mid-layer underflow "
b7d2280c 2075 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2076 resid, scsi_bufflen(cp));
e038a1be 2077
9ba56b95 2078 res = DID_ERROR << 16;
0f00a206
LC
2079 break;
2080 }
4aee5766
GM
2081 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2082 lscsi_status != SAM_STAT_BUSY) {
2083 /*
2084 * scsi status of task set and busy are considered to be
2085 * task not completed.
2086 */
2087
5e19ed90 2088 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
7c3df132 2089 "Dropped frame(s) detected (0x%x "
4aee5766
GM
2090 "of 0x%x bytes).\n", resid,
2091 scsi_bufflen(cp));
0f00a206 2092
9ba56b95 2093 res = DID_ERROR << 16 | lscsi_status;
0374f55e 2094 goto check_scsi_status;
4aee5766
GM
2095 } else {
2096 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2097 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2098 scsi_status, lscsi_status);
1da177e4
LT
2099 }
2100
9ba56b95 2101 res = DID_OK << 16 | lscsi_status;
b7d2280c 2102 logit = 0;
0f00a206 2103
0374f55e 2104check_scsi_status:
1da177e4 2105 /*
fa2a1ce5 2106 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
2107 * Status.
2108 */
2109 if (lscsi_status != 0) {
ffec28a3 2110 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2111 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
7c3df132 2112 "QUEUE FULL detected.\n");
b7d2280c 2113 logit = 1;
ffec28a3
AV
2114 break;
2115 }
1da177e4
LT
2116 if (lscsi_status != SS_CHECK_CONDITION)
2117 break;
2118
b80ca4f7 2119 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2120 if (!(scsi_status & SS_SENSE_LEN_VALID))
2121 break;
2122
5544213b 2123 qla2x00_handle_sense(sp, sense_data, par_sense_len,
9ba56b95 2124 sense_len, rsp, res);
1da177e4
LT
2125 }
2126 break;
2127
1da177e4
LT
2128 case CS_PORT_LOGGED_OUT:
2129 case CS_PORT_CONFIG_CHG:
2130 case CS_PORT_BUSY:
2131 case CS_INCOMPLETE:
2132 case CS_PORT_UNAVAILABLE:
b7d2280c 2133 case CS_TIMEOUT:
ff454b01
CD
2134 case CS_RESET:
2135
056a4483
MC
2136 /*
2137 * We are going to have the fc class block the rport
2138 * while we try to recover so instruct the mid layer
2139 * to requeue until the class decides how to handle this.
2140 */
9ba56b95 2141 res = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
2142
2143 if (comp_status == CS_TIMEOUT) {
2144 if (IS_FWI2_CAPABLE(ha))
2145 break;
2146 else if ((le16_to_cpu(sts->status_flags) &
2147 SF_LOGOUT_SENT) == 0)
2148 break;
2149 }
2150
5e19ed90 2151 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
7c3df132
SK
2152 "Port down status: port-state=0x%x.\n",
2153 atomic_read(&fcport->state));
b7d2280c 2154
a7a28504 2155 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 2156 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
2157 break;
2158
1da177e4 2159 case CS_ABORTED:
9ba56b95 2160 res = DID_RESET << 16;
1da177e4 2161 break;
bad75002
AE
2162
2163 case CS_DIF_ERROR:
8cb2049c 2164 logit = qla2x00_handle_dif_error(sp, sts24);
fb6e4668 2165 res = cp->result;
bad75002 2166 break;
9e522cd8
AE
2167
2168 case CS_TRANSPORT:
2169 res = DID_ERROR << 16;
2170
2171 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2172 break;
2173
2174 if (state_flags & BIT_4)
2175 scmd_printk(KERN_WARNING, cp,
2176 "Unsupported device '%s' found.\n",
2177 cp->device->vendor);
2178 break;
2179
1da177e4 2180 default:
9ba56b95 2181 res = DID_ERROR << 16;
1da177e4
LT
2182 break;
2183 }
2184
b7d2280c
AV
2185out:
2186 if (logit)
5e19ed90 2187 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
7c3df132 2188 "FCP command status: 0x%x-0x%x (0x%x) "
cfb0919c
CD
2189 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2190 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
7c3df132 2191 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
9ba56b95 2192 comp_status, scsi_status, res, vha->host_no,
cfb0919c
CD
2193 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2194 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2195 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2196 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2197 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
7c3df132 2198 resid_len, fw_resid_len);
b7d2280c 2199
2afa19a9 2200 if (rsp->status_srb == NULL)
9ba56b95 2201 sp->done(ha, sp, res);
1da177e4
LT
2202}
2203
2204/**
2205 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2206 * @ha: SCSI driver HA context
2207 * @pkt: Entry pointer
2208 *
2209 * Extended sense data.
2210 */
2211static void
2afa19a9 2212qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4 2213{
9ba56b95 2214 uint8_t sense_sz = 0;
2afa19a9 2215 struct qla_hw_data *ha = rsp->hw;
7c3df132 2216 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
9ba56b95 2217 srb_t *sp = rsp->status_srb;
1da177e4 2218 struct scsi_cmnd *cp;
9ba56b95
GM
2219 uint32_t sense_len;
2220 uint8_t *sense_ptr;
1da177e4 2221
9ba56b95
GM
2222 if (!sp || !GET_CMD_SENSE_LEN(sp))
2223 return;
1da177e4 2224
9ba56b95
GM
2225 sense_len = GET_CMD_SENSE_LEN(sp);
2226 sense_ptr = GET_CMD_SENSE_PTR(sp);
1da177e4 2227
9ba56b95
GM
2228 cp = GET_CMD_SP(sp);
2229 if (cp == NULL) {
2230 ql_log(ql_log_warn, vha, 0x3025,
2231 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1da177e4 2232
9ba56b95
GM
2233 rsp->status_srb = NULL;
2234 return;
1da177e4 2235 }
1da177e4 2236
9ba56b95
GM
2237 if (sense_len > sizeof(pkt->data))
2238 sense_sz = sizeof(pkt->data);
2239 else
2240 sense_sz = sense_len;
c4631191 2241
9ba56b95
GM
2242 /* Move sense data. */
2243 if (IS_FWI2_CAPABLE(ha))
2244 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2245 memcpy(sense_ptr, pkt->data, sense_sz);
2246 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2247 sense_ptr, sense_sz);
c4631191 2248
9ba56b95
GM
2249 sense_len -= sense_sz;
2250 sense_ptr += sense_sz;
c4631191 2251
9ba56b95
GM
2252 SET_CMD_SENSE_PTR(sp, sense_ptr);
2253 SET_CMD_SENSE_LEN(sp, sense_len);
2254
2255 /* Place command on done queue. */
2256 if (sense_len == 0) {
2257 rsp->status_srb = NULL;
2258 sp->done(ha, sp, cp->result);
c4631191 2259 }
c4631191
GM
2260}
2261
1da177e4
LT
2262/**
2263 * qla2x00_error_entry() - Process an error entry.
2264 * @ha: SCSI driver HA context
2265 * @pkt: Entry pointer
2266 */
2267static void
73208dfd 2268qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
2269{
2270 srb_t *sp;
e315cd28 2271 struct qla_hw_data *ha = vha->hw;
c4631191 2272 const char func[] = "ERROR-IOCB";
2afa19a9 2273 uint16_t que = MSW(pkt->handle);
a6fe35c0 2274 struct req_que *req = NULL;
9ba56b95 2275 int res = DID_ERROR << 16;
7c3df132 2276
9ba56b95
GM
2277 ql_dbg(ql_dbg_async, vha, 0x502a,
2278 "type of error status in response: 0x%x\n", pkt->entry_status);
2279
a6fe35c0
AE
2280 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2281 goto fatal;
2282
2283 req = ha->req_q_map[que];
2284
9ba56b95
GM
2285 if (pkt->entry_status & RF_BUSY)
2286 res = DID_BUS_BUSY << 16;
1da177e4 2287
c4631191 2288 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
a6fe35c0 2289 if (sp) {
9ba56b95 2290 sp->done(ha, sp, res);
a6fe35c0 2291 return;
1da177e4 2292 }
a6fe35c0
AE
2293fatal:
2294 ql_log(ql_log_warn, vha, 0x5030,
2295 "Error entry - invalid handle/queue.\n");
2296
2297 if (IS_QLA82XX(ha))
2298 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2299 else
2300 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2301 qla2xxx_wake_dpc(vha);
1da177e4
LT
2302}
2303
9a853f71
AV
2304/**
2305 * qla24xx_mbx_completion() - Process mailbox command completions.
2306 * @ha: SCSI driver HA context
2307 * @mb0: Mailbox0 register
2308 */
2309static void
e315cd28 2310qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
2311{
2312 uint16_t cnt;
4fa94f83 2313 uint32_t mboxes;
9a853f71 2314 uint16_t __iomem *wptr;
e315cd28 2315 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
2316 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2317
4fa94f83
AV
2318 /* Read all mbox registers? */
2319 mboxes = (1 << ha->mbx_count) - 1;
2320 if (!ha->mcp)
2321 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2322 else
2323 mboxes = ha->mcp->in_mb;
2324
9a853f71
AV
2325 /* Load return mailbox registers. */
2326 ha->flags.mbox_int = 1;
2327 ha->mailbox_out[0] = mb0;
4fa94f83 2328 mboxes >>= 1;
9a853f71
AV
2329 wptr = (uint16_t __iomem *)&reg->mailbox1;
2330
2331 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
4fa94f83
AV
2332 if (mboxes & BIT_0)
2333 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2334
2335 mboxes >>= 1;
9a853f71
AV
2336 wptr++;
2337 }
9a853f71
AV
2338}
2339
2340/**
2341 * qla24xx_process_response_queue() - Process response queue entries.
2342 * @ha: SCSI driver HA context
2343 */
2afa19a9
AC
2344void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2345 struct rsp_que *rsp)
9a853f71 2346{
9a853f71 2347 struct sts_entry_24xx *pkt;
a9083016 2348 struct qla_hw_data *ha = vha->hw;
9a853f71 2349
e315cd28 2350 if (!vha->flags.online)
9a853f71
AV
2351 return;
2352
e315cd28
AC
2353 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2354 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2355
e315cd28
AC
2356 rsp->ring_index++;
2357 if (rsp->ring_index == rsp->length) {
2358 rsp->ring_index = 0;
2359 rsp->ring_ptr = rsp->ring;
9a853f71 2360 } else {
e315cd28 2361 rsp->ring_ptr++;
9a853f71
AV
2362 }
2363
2364 if (pkt->entry_status != 0) {
73208dfd 2365 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2d70c103
NB
2366
2367 (void)qlt_24xx_process_response_error(vha, pkt);
2368
9a853f71
AV
2369 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2370 wmb();
2371 continue;
2372 }
2373
2374 switch (pkt->entry_type) {
2375 case STATUS_TYPE:
73208dfd 2376 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2377 break;
2378 case STATUS_CONT_TYPE:
2afa19a9 2379 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2380 break;
2c3dfe3f 2381 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2382 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2383 (struct vp_rpt_id_entry_24xx *)pkt);
2384 break;
ac280b67
AV
2385 case LOGINOUT_PORT_IOCB_TYPE:
2386 qla24xx_logio_entry(vha, rsp->req,
2387 (struct logio_entry_24xx *)pkt);
2388 break;
3822263e
MI
2389 case TSK_MGMT_IOCB_TYPE:
2390 qla24xx_tm_iocb_entry(vha, rsp->req,
2391 (struct tsk_mgmt_entry *)pkt);
2392 break;
9a069e19
GM
2393 case CT_IOCB_TYPE:
2394 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
9a069e19
GM
2395 break;
2396 case ELS_IOCB_TYPE:
2397 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2398 break;
2d70c103
NB
2399 case ABTS_RECV_24XX:
2400 /* ensure that the ATIO queue is empty */
2401 qlt_24xx_process_atio_queue(vha);
2402 case ABTS_RESP_24XX:
2403 case CTIO_TYPE7:
2404 case NOTIFY_ACK_TYPE:
2405 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2406 break;
54883291
SK
2407 case MARKER_TYPE:
2408 /* Do nothing in this case, this check is to prevent it
2409 * from falling into default case
2410 */
2411 break;
9a853f71
AV
2412 default:
2413 /* Type Not Supported. */
7c3df132
SK
2414 ql_dbg(ql_dbg_async, vha, 0x5042,
2415 "Received unknown response pkt type %x "
9a853f71 2416 "entry status=%x.\n",
7c3df132 2417 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2418 break;
2419 }
2420 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2421 wmb();
2422 }
2423
2424 /* Adjust ring index */
a9083016
GM
2425 if (IS_QLA82XX(ha)) {
2426 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2427 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2428 } else
2429 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2430}
2431
05236a05 2432static void
e315cd28 2433qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2434{
2435 int rval;
2436 uint32_t cnt;
e315cd28 2437 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2438 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2439
6246b8a1 2440 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
05236a05
AV
2441 return;
2442
2443 rval = QLA_SUCCESS;
2444 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2445 RD_REG_DWORD(&reg->iobase_addr);
2446 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2447 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2448 rval == QLA_SUCCESS; cnt--) {
2449 if (cnt) {
2450 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2451 udelay(10);
2452 } else
2453 rval = QLA_FUNCTION_TIMEOUT;
2454 }
2455 if (rval == QLA_SUCCESS)
2456 goto next_test;
2457
2458 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2459 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2460 rval == QLA_SUCCESS; cnt--) {
2461 if (cnt) {
2462 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2463 udelay(10);
2464 } else
2465 rval = QLA_FUNCTION_TIMEOUT;
2466 }
2467 if (rval != QLA_SUCCESS)
2468 goto done;
2469
2470next_test:
2471 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2472 ql_log(ql_log_info, vha, 0x504c,
2473 "Additional code -- 0x55AA.\n");
05236a05
AV
2474
2475done:
2476 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2477 RD_REG_DWORD(&reg->iobase_window);
2478}
2479
9a853f71 2480/**
6246b8a1 2481 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
9a853f71
AV
2482 * @irq:
2483 * @dev_id: SCSI driver HA context
9a853f71
AV
2484 *
2485 * Called by system whenever the host adapter generates an interrupt.
2486 *
2487 * Returns handled flag.
2488 */
2489irqreturn_t
7d12e780 2490qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2491{
e315cd28
AC
2492 scsi_qla_host_t *vha;
2493 struct qla_hw_data *ha;
9a853f71
AV
2494 struct device_reg_24xx __iomem *reg;
2495 int status;
9a853f71
AV
2496 unsigned long iter;
2497 uint32_t stat;
2498 uint32_t hccr;
7d613ac6 2499 uint16_t mb[8];
e315cd28 2500 struct rsp_que *rsp;
43fac4d9 2501 unsigned long flags;
9a853f71 2502
e315cd28
AC
2503 rsp = (struct rsp_que *) dev_id;
2504 if (!rsp) {
3256b435
CD
2505 ql_log(ql_log_info, NULL, 0x5059,
2506 "%s: NULL response queue pointer.\n", __func__);
9a853f71
AV
2507 return IRQ_NONE;
2508 }
2509
e315cd28 2510 ha = rsp->hw;
9a853f71
AV
2511 reg = &ha->iobase->isp24;
2512 status = 0;
2513
85880801
AV
2514 if (unlikely(pci_channel_offline(ha->pdev)))
2515 return IRQ_HANDLED;
2516
43fac4d9 2517 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2518 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2519 for (iter = 50; iter--; ) {
2520 stat = RD_REG_DWORD(&reg->host_status);
2521 if (stat & HSRX_RISC_PAUSED) {
85880801 2522 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2523 break;
2524
9a853f71
AV
2525 hccr = RD_REG_DWORD(&reg->hccr);
2526
7c3df132
SK
2527 ql_log(ql_log_warn, vha, 0x504b,
2528 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2529 hccr);
05236a05 2530
e315cd28 2531 qla2xxx_check_risc_status(vha);
05236a05 2532
e315cd28
AC
2533 ha->isp_ops->fw_dump(vha, 1);
2534 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2535 break;
2536 } else if ((stat & HSRX_RISC_INT) == 0)
2537 break;
2538
2539 switch (stat & 0xff) {
fafbda9f
AE
2540 case INTR_ROM_MB_SUCCESS:
2541 case INTR_ROM_MB_FAILED:
2542 case INTR_MB_SUCCESS:
2543 case INTR_MB_FAILED:
e315cd28 2544 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2545 status |= MBX_INTERRUPT;
2546
2547 break;
fafbda9f 2548 case INTR_ASYNC_EVENT:
9a853f71
AV
2549 mb[0] = MSW(stat);
2550 mb[1] = RD_REG_WORD(&reg->mailbox1);
2551 mb[2] = RD_REG_WORD(&reg->mailbox2);
2552 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2553 qla2x00_async_event(vha, rsp, mb);
9a853f71 2554 break;
fafbda9f
AE
2555 case INTR_RSP_QUE_UPDATE:
2556 case INTR_RSP_QUE_UPDATE_83XX:
2afa19a9 2557 qla24xx_process_response_queue(vha, rsp);
9a853f71 2558 break;
fafbda9f 2559 case INTR_ATIO_QUE_UPDATE:
2d70c103
NB
2560 qlt_24xx_process_atio_queue(vha);
2561 break;
fafbda9f 2562 case INTR_ATIO_RSP_QUE_UPDATE:
2d70c103
NB
2563 qlt_24xx_process_atio_queue(vha);
2564 qla24xx_process_response_queue(vha, rsp);
2565 break;
9a853f71 2566 default:
7c3df132
SK
2567 ql_dbg(ql_dbg_async, vha, 0x504f,
2568 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2569 break;
2570 }
2571 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2572 RD_REG_DWORD_RELAXED(&reg->hccr);
cb860bbd
GM
2573 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2574 ndelay(3500);
9a853f71 2575 }
43fac4d9 2576 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71
AV
2577
2578 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2579 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 2580 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2581 complete(&ha->mbx_intr_comp);
9a853f71
AV
2582 }
2583
2584 return IRQ_HANDLED;
2585}
2586
a8488abe
AV
2587static irqreturn_t
2588qla24xx_msix_rsp_q(int irq, void *dev_id)
2589{
e315cd28
AC
2590 struct qla_hw_data *ha;
2591 struct rsp_que *rsp;
a8488abe 2592 struct device_reg_24xx __iomem *reg;
2afa19a9 2593 struct scsi_qla_host *vha;
0f19bc68 2594 unsigned long flags;
a8488abe 2595
e315cd28
AC
2596 rsp = (struct rsp_que *) dev_id;
2597 if (!rsp) {
3256b435
CD
2598 ql_log(ql_log_info, NULL, 0x505a,
2599 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2600 return IRQ_NONE;
2601 }
2602 ha = rsp->hw;
a8488abe
AV
2603 reg = &ha->iobase->isp24;
2604
0f19bc68 2605 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2606
a67093d4 2607 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2608 qla24xx_process_response_queue(vha, rsp);
3155754a 2609 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2610 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2611 RD_REG_DWORD_RELAXED(&reg->hccr);
2612 }
0f19bc68 2613 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2614
2615 return IRQ_HANDLED;
2616}
2617
68ca949c
AC
2618static irqreturn_t
2619qla25xx_msix_rsp_q(int irq, void *dev_id)
2620{
2621 struct qla_hw_data *ha;
2622 struct rsp_que *rsp;
3155754a 2623 struct device_reg_24xx __iomem *reg;
0f19bc68 2624 unsigned long flags;
68ca949c
AC
2625
2626 rsp = (struct rsp_que *) dev_id;
2627 if (!rsp) {
3256b435
CD
2628 ql_log(ql_log_info, NULL, 0x505b,
2629 "%s: NULL response queue pointer.\n", __func__);
68ca949c
AC
2630 return IRQ_NONE;
2631 }
2632 ha = rsp->hw;
2633
3155754a 2634 /* Clear the interrupt, if enabled, for this response queue */
d424754c 2635 if (!ha->flags.disable_msix_handshake) {
3155754a 2636 reg = &ha->iobase->isp24;
0f19bc68 2637 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2638 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2639 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2640 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2641 }
68ca949c
AC
2642 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2643
2644 return IRQ_HANDLED;
2645}
2646
a8488abe
AV
2647static irqreturn_t
2648qla24xx_msix_default(int irq, void *dev_id)
2649{
e315cd28
AC
2650 scsi_qla_host_t *vha;
2651 struct qla_hw_data *ha;
2652 struct rsp_que *rsp;
a8488abe
AV
2653 struct device_reg_24xx __iomem *reg;
2654 int status;
a8488abe
AV
2655 uint32_t stat;
2656 uint32_t hccr;
7d613ac6 2657 uint16_t mb[8];
0f19bc68 2658 unsigned long flags;
a8488abe 2659
e315cd28
AC
2660 rsp = (struct rsp_que *) dev_id;
2661 if (!rsp) {
3256b435
CD
2662 ql_log(ql_log_info, NULL, 0x505c,
2663 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2664 return IRQ_NONE;
2665 }
2666 ha = rsp->hw;
a8488abe
AV
2667 reg = &ha->iobase->isp24;
2668 status = 0;
2669
0f19bc68 2670 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2671 vha = pci_get_drvdata(ha->pdev);
87f27015 2672 do {
a8488abe
AV
2673 stat = RD_REG_DWORD(&reg->host_status);
2674 if (stat & HSRX_RISC_PAUSED) {
85880801 2675 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2676 break;
2677
a8488abe
AV
2678 hccr = RD_REG_DWORD(&reg->hccr);
2679
7c3df132
SK
2680 ql_log(ql_log_info, vha, 0x5050,
2681 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2682 hccr);
05236a05 2683
e315cd28 2684 qla2xxx_check_risc_status(vha);
05236a05 2685
e315cd28
AC
2686 ha->isp_ops->fw_dump(vha, 1);
2687 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2688 break;
2689 } else if ((stat & HSRX_RISC_INT) == 0)
2690 break;
2691
2692 switch (stat & 0xff) {
fafbda9f
AE
2693 case INTR_ROM_MB_SUCCESS:
2694 case INTR_ROM_MB_FAILED:
2695 case INTR_MB_SUCCESS:
2696 case INTR_MB_FAILED:
e315cd28 2697 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2698 status |= MBX_INTERRUPT;
2699
2700 break;
fafbda9f 2701 case INTR_ASYNC_EVENT:
a8488abe
AV
2702 mb[0] = MSW(stat);
2703 mb[1] = RD_REG_WORD(&reg->mailbox1);
2704 mb[2] = RD_REG_WORD(&reg->mailbox2);
2705 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2706 qla2x00_async_event(vha, rsp, mb);
a8488abe 2707 break;
fafbda9f
AE
2708 case INTR_RSP_QUE_UPDATE:
2709 case INTR_RSP_QUE_UPDATE_83XX:
2afa19a9 2710 qla24xx_process_response_queue(vha, rsp);
a8488abe 2711 break;
fafbda9f 2712 case INTR_ATIO_QUE_UPDATE:
2d70c103
NB
2713 qlt_24xx_process_atio_queue(vha);
2714 break;
fafbda9f 2715 case INTR_ATIO_RSP_QUE_UPDATE:
2d70c103
NB
2716 qlt_24xx_process_atio_queue(vha);
2717 qla24xx_process_response_queue(vha, rsp);
2718 break;
a8488abe 2719 default:
7c3df132
SK
2720 ql_dbg(ql_dbg_async, vha, 0x5051,
2721 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2722 break;
2723 }
2724 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2725 } while (0);
0f19bc68 2726 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2727
2728 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2729 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 2730 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2731 complete(&ha->mbx_intr_comp);
a8488abe 2732 }
a8488abe
AV
2733 return IRQ_HANDLED;
2734}
2735
2736/* Interrupt handling helpers. */
2737
2738struct qla_init_msix_entry {
a8488abe 2739 const char *name;
476834c2 2740 irq_handler_t handler;
a8488abe
AV
2741};
2742
68ca949c 2743static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2744 { "qla2xxx (default)", qla24xx_msix_default },
2745 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2746 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2747};
2748
a9083016
GM
2749static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2750 { "qla2xxx (default)", qla82xx_msix_default },
2751 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2752};
2753
a8488abe 2754static void
e315cd28 2755qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2756{
2757 int i;
2758 struct qla_msix_entry *qentry;
7c3df132 2759 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2760
73208dfd
AC
2761 for (i = 0; i < ha->msix_count; i++) {
2762 qentry = &ha->msix_entries[i];
a8488abe 2763 if (qentry->have_irq)
73208dfd 2764 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2765 }
2766 pci_disable_msix(ha->pdev);
73208dfd
AC
2767 kfree(ha->msix_entries);
2768 ha->msix_entries = NULL;
2769 ha->flags.msix_enabled = 0;
7c3df132
SK
2770 ql_dbg(ql_dbg_init, vha, 0x0042,
2771 "Disabled the MSI.\n");
a8488abe
AV
2772}
2773
2774static int
73208dfd 2775qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2776{
ad038fa8 2777#define MIN_MSIX_COUNT 2
a8488abe 2778 int i, ret;
73208dfd 2779 struct msix_entry *entries;
a8488abe 2780 struct qla_msix_entry *qentry;
7c3df132 2781 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2782
2783 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2784 GFP_KERNEL);
7c3df132
SK
2785 if (!entries) {
2786 ql_log(ql_log_warn, vha, 0x00bc,
2787 "Failed to allocate memory for msix_entry.\n");
73208dfd 2788 return -ENOMEM;
7c3df132 2789 }
a8488abe 2790
73208dfd
AC
2791 for (i = 0; i < ha->msix_count; i++)
2792 entries[i].entry = i;
a8488abe 2793
73208dfd 2794 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2795 if (ret) {
ad038fa8
LC
2796 if (ret < MIN_MSIX_COUNT)
2797 goto msix_failed;
2798
7c3df132
SK
2799 ql_log(ql_log_warn, vha, 0x00c6,
2800 "MSI-X: Failed to enable support "
2801 "-- %d/%d\n Retry with %d vectors.\n",
2802 ha->msix_count, ret, ret);
73208dfd
AC
2803 ha->msix_count = ret;
2804 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2805 if (ret) {
ad038fa8 2806msix_failed:
7c3df132
SK
2807 ql_log(ql_log_fatal, vha, 0x00c7,
2808 "MSI-X: Failed to enable support, "
2809 "giving up -- %d/%d.\n",
2810 ha->msix_count, ret);
73208dfd
AC
2811 goto msix_out;
2812 }
2afa19a9 2813 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2814 }
2815 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2816 ha->msix_count, GFP_KERNEL);
2817 if (!ha->msix_entries) {
7c3df132
SK
2818 ql_log(ql_log_fatal, vha, 0x00c8,
2819 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2820 ret = -ENOMEM;
a8488abe
AV
2821 goto msix_out;
2822 }
2823 ha->flags.msix_enabled = 1;
2824
73208dfd
AC
2825 for (i = 0; i < ha->msix_count; i++) {
2826 qentry = &ha->msix_entries[i];
2827 qentry->vector = entries[i].vector;
2828 qentry->entry = entries[i].entry;
a8488abe 2829 qentry->have_irq = 0;
73208dfd 2830 qentry->rsp = NULL;
a8488abe
AV
2831 }
2832
2afa19a9
AC
2833 /* Enable MSI-X vectors for the base queue */
2834 for (i = 0; i < 2; i++) {
2835 qentry = &ha->msix_entries[i];
a9083016
GM
2836 if (IS_QLA82XX(ha)) {
2837 ret = request_irq(qentry->vector,
2838 qla82xx_msix_entries[i].handler,
2839 0, qla82xx_msix_entries[i].name, rsp);
2840 } else {
2841 ret = request_irq(qentry->vector,
2842 msix_entries[i].handler,
2843 0, msix_entries[i].name, rsp);
2844 }
2afa19a9 2845 if (ret) {
7c3df132
SK
2846 ql_log(ql_log_fatal, vha, 0x00cb,
2847 "MSI-X: unable to register handler -- %x/%d.\n",
2848 qentry->vector, ret);
2afa19a9
AC
2849 qla24xx_disable_msix(ha);
2850 ha->mqenable = 0;
2851 goto msix_out;
2852 }
2853 qentry->have_irq = 1;
2854 qentry->rsp = rsp;
2855 rsp->msix = qentry;
73208dfd 2856 }
73208dfd
AC
2857
2858 /* Enable MSI-X vector for response queue update for queue 0 */
6246b8a1
GM
2859 if (IS_QLA83XX(ha)) {
2860 if (ha->msixbase && ha->mqiobase &&
2861 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2862 ha->mqenable = 1;
2863 } else
2864 if (ha->mqiobase
2865 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2866 ha->mqenable = 1;
7c3df132
SK
2867 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2868 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2869 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2870 ql_dbg(ql_dbg_init, vha, 0x0055,
2871 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2872 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2873
a8488abe 2874msix_out:
73208dfd 2875 kfree(entries);
a8488abe
AV
2876 return ret;
2877}
2878
2879int
73208dfd 2880qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2881{
2882 int ret;
963b0fdd 2883 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2884 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2885
2886 /* If possible, enable MSI-X. */
6246b8a1
GM
2887 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2888 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
6377a7ae
BH
2889 goto skip_msi;
2890
2891 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2892 (ha->pdev->subsystem_device == 0x7040 ||
2893 ha->pdev->subsystem_device == 0x7041 ||
2894 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2895 ql_log(ql_log_warn, vha, 0x0034,
2896 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2897 ha->pdev->subsystem_vendor,
7c3df132 2898 ha->pdev->subsystem_device);
6377a7ae
BH
2899 goto skip_msi;
2900 }
a8488abe 2901
42cd4f5d 2902 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
7c3df132
SK
2903 ql_log(ql_log_warn, vha, 0x0035,
2904 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
42cd4f5d 2905 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
a8488abe
AV
2906 goto skip_msix;
2907 }
2908
73208dfd 2909 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2910 if (!ret) {
7c3df132
SK
2911 ql_dbg(ql_dbg_init, vha, 0x0036,
2912 "MSI-X: Enabled (0x%X, 0x%X).\n",
2913 ha->chip_revision, ha->fw_attributes);
963b0fdd 2914 goto clear_risc_ints;
a8488abe 2915 }
7c3df132
SK
2916 ql_log(ql_log_info, vha, 0x0037,
2917 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2918skip_msix:
cbedb601 2919
3a03eb79 2920 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
487370af 2921 !IS_QLA8001(ha) && !IS_QLA82XX(ha))
cbedb601
AV
2922 goto skip_msi;
2923
2924 ret = pci_enable_msi(ha->pdev);
2925 if (!ret) {
7c3df132
SK
2926 ql_dbg(ql_dbg_init, vha, 0x0038,
2927 "MSI: Enabled.\n");
cbedb601 2928 ha->flags.msi_enabled = 1;
a9083016 2929 } else
7c3df132
SK
2930 ql_log(ql_log_warn, vha, 0x0039,
2931 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
a033b655
GM
2932
2933 /* Skip INTx on ISP82xx. */
2934 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2935 return QLA_FUNCTION_FAILED;
2936
cbedb601
AV
2937skip_msi:
2938
fd34f556 2939 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2940 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2941 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2942 if (ret) {
7c3df132 2943 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2944 "Failed to reserve interrupt %d already in use.\n",
2945 ha->pdev->irq);
963b0fdd
AV
2946 goto fail;
2947 }
7992abfc 2948
963b0fdd
AV
2949clear_risc_ints:
2950
c6952483 2951 spin_lock_irq(&ha->hardware_lock);
c1114953 2952 if (!IS_FWI2_CAPABLE(ha))
963b0fdd 2953 WRT_REG_WORD(&reg->isp.semaphore, 0);
c6952483 2954 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2955
963b0fdd 2956fail:
a8488abe
AV
2957 return ret;
2958}
2959
2960void
e315cd28 2961qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2962{
e315cd28 2963 struct qla_hw_data *ha = vha->hw;
9a347ff4
CD
2964 struct rsp_que *rsp;
2965
2966 /*
2967 * We need to check that ha->rsp_q_map is valid in case we are called
2968 * from a probe failure context.
2969 */
2970 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2971 return;
2972 rsp = ha->rsp_q_map[0];
a8488abe
AV
2973
2974 if (ha->flags.msix_enabled)
2975 qla24xx_disable_msix(ha);
90a86fc0 2976 else if (ha->flags.msi_enabled) {
e315cd28 2977 free_irq(ha->pdev->irq, rsp);
cbedb601 2978 pci_disable_msi(ha->pdev);
90a86fc0
JC
2979 } else
2980 free_irq(ha->pdev->irq, rsp);
a8488abe 2981}
e315cd28 2982
73208dfd
AC
2983
2984int qla25xx_request_irq(struct rsp_que *rsp)
2985{
2986 struct qla_hw_data *ha = rsp->hw;
2afa19a9 2987 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 2988 struct qla_msix_entry *msix = rsp->msix;
7c3df132 2989 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2990 int ret;
2991
2992 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2993 if (ret) {
7c3df132
SK
2994 ql_log(ql_log_fatal, vha, 0x00e6,
2995 "MSI-X: Unable to register handler -- %x/%d.\n",
2996 msix->vector, ret);
73208dfd
AC
2997 return ret;
2998 }
2999 msix->have_irq = 1;
3000 msix->rsp = rsp;
3001 return ret;
3002}