[SCSI] qla2xxx: Dont call nic restart firmware if it is already active and running.
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
46152ceb 3 * Copyright (c) 2003-2012 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4 9
05236a05 10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
df7baa50 12#include <scsi/scsi_tcq.h>
9a069e19 13#include <scsi/scsi_bsg_fc.h>
bad75002 14#include <scsi/scsi_eh.h>
df7baa50 15
1da177e4 16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
17static void qla2x00_process_completed_request(struct scsi_qla_host *,
18 struct req_que *, uint32_t);
19static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 20static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
21static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
22 sts_entry_t *);
9a853f71 23
1da177e4
LT
24/**
25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
26 * @irq:
27 * @dev_id: SCSI driver HA context
1da177e4
LT
28 *
29 * Called by system whenever the host adapter generates an interrupt.
30 *
31 * Returns handled flag.
32 */
33irqreturn_t
7d12e780 34qla2100_intr_handler(int irq, void *dev_id)
1da177e4 35{
e315cd28
AC
36 scsi_qla_host_t *vha;
37 struct qla_hw_data *ha;
3d71644c 38 struct device_reg_2xxx __iomem *reg;
1da177e4 39 int status;
1da177e4 40 unsigned long iter;
14e660e6 41 uint16_t hccr;
9a853f71 42 uint16_t mb[4];
e315cd28 43 struct rsp_que *rsp;
43fac4d9 44 unsigned long flags;
1da177e4 45
e315cd28
AC
46 rsp = (struct rsp_que *) dev_id;
47 if (!rsp) {
3256b435
CD
48 ql_log(ql_log_info, NULL, 0x505d,
49 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
50 return (IRQ_NONE);
51 }
52
e315cd28 53 ha = rsp->hw;
3d71644c 54 reg = &ha->iobase->isp;
1da177e4
LT
55 status = 0;
56
43fac4d9 57 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 58 vha = pci_get_drvdata(ha->pdev);
1da177e4 59 for (iter = 50; iter--; ) {
14e660e6
SJ
60 hccr = RD_REG_WORD(&reg->hccr);
61 if (hccr & HCCR_RISC_PAUSE) {
62 if (pci_channel_offline(ha->pdev))
63 break;
64
65 /*
66 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 67 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
68 * out of the RISC PAUSED state.
69 */
70 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
71 RD_REG_WORD(&reg->hccr);
72
e315cd28
AC
73 ha->isp_ops->fw_dump(vha, 1);
74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
75 break;
76 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
77 break;
78
79 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
80 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
81 RD_REG_WORD(&reg->hccr);
82
83 /* Get mailbox data. */
9a853f71
AV
84 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
85 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 86 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 87 status |= MBX_INTERRUPT;
9a853f71
AV
88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
89 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
90 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
91 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 92 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
93 } else {
94 /*EMPTY*/
7c3df132
SK
95 ql_dbg(ql_dbg_async, vha, 0x5025,
96 "Unrecognized interrupt type (%d).\n",
97 mb[0]);
1da177e4
LT
98 }
99 /* Release mailbox registers. */
100 WRT_REG_WORD(&reg->semaphore, 0);
101 RD_REG_WORD(&reg->semaphore);
102 } else {
73208dfd 103 qla2x00_process_response_queue(rsp);
1da177e4
LT
104
105 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
106 RD_REG_WORD(&reg->hccr);
107 }
108 }
43fac4d9 109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 110
1da177e4
LT
111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 114 complete(&ha->mbx_intr_comp);
1da177e4
LT
115 }
116
1da177e4
LT
117 return (IRQ_HANDLED);
118}
119
120/**
121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
122 * @irq:
123 * @dev_id: SCSI driver HA context
1da177e4
LT
124 *
125 * Called by system whenever the host adapter generates an interrupt.
126 *
127 * Returns handled flag.
128 */
129irqreturn_t
7d12e780 130qla2300_intr_handler(int irq, void *dev_id)
1da177e4 131{
e315cd28 132 scsi_qla_host_t *vha;
3d71644c 133 struct device_reg_2xxx __iomem *reg;
1da177e4 134 int status;
1da177e4
LT
135 unsigned long iter;
136 uint32_t stat;
1da177e4 137 uint16_t hccr;
9a853f71 138 uint16_t mb[4];
e315cd28
AC
139 struct rsp_que *rsp;
140 struct qla_hw_data *ha;
43fac4d9 141 unsigned long flags;
1da177e4 142
e315cd28
AC
143 rsp = (struct rsp_que *) dev_id;
144 if (!rsp) {
3256b435
CD
145 ql_log(ql_log_info, NULL, 0x5058,
146 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
147 return (IRQ_NONE);
148 }
149
e315cd28 150 ha = rsp->hw;
3d71644c 151 reg = &ha->iobase->isp;
1da177e4
LT
152 status = 0;
153
43fac4d9 154 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 155 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
156 for (iter = 50; iter--; ) {
157 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
158 if (stat & HSR_RISC_PAUSED) {
85880801 159 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
160 break;
161
1da177e4
LT
162 hccr = RD_REG_WORD(&reg->hccr);
163 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
164 ql_log(ql_log_warn, vha, 0x5026,
165 "Parity error -- HCCR=%x, Dumping "
166 "firmware.\n", hccr);
1da177e4 167 else
7c3df132
SK
168 ql_log(ql_log_warn, vha, 0x5027,
169 "RISC paused -- HCCR=%x, Dumping "
170 "firmware.\n", hccr);
1da177e4
LT
171
172 /*
173 * Issue a "HARD" reset in order for the RISC
174 * interrupt bit to be cleared. Schedule a big
a06a0f8e 175 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
176 */
177 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
178 RD_REG_WORD(&reg->hccr);
07f31805 179
e315cd28
AC
180 ha->isp_ops->fw_dump(vha, 1);
181 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
182 break;
183 } else if ((stat & HSR_RISC_INT) == 0)
184 break;
185
1da177e4 186 switch (stat & 0xff) {
1da177e4
LT
187 case 0x1:
188 case 0x2:
189 case 0x10:
190 case 0x11:
e315cd28 191 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
192 status |= MBX_INTERRUPT;
193
194 /* Release mailbox registers. */
195 WRT_REG_WORD(&reg->semaphore, 0);
196 break;
197 case 0x12:
9a853f71
AV
198 mb[0] = MSW(stat);
199 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
200 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
201 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 202 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
203 break;
204 case 0x13:
73208dfd 205 qla2x00_process_response_queue(rsp);
1da177e4
LT
206 break;
207 case 0x15:
9a853f71
AV
208 mb[0] = MBA_CMPLT_1_16BIT;
209 mb[1] = MSW(stat);
73208dfd 210 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
211 break;
212 case 0x16:
9a853f71
AV
213 mb[0] = MBA_SCSI_COMPLETION;
214 mb[1] = MSW(stat);
215 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 216 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
217 break;
218 default:
7c3df132
SK
219 ql_dbg(ql_dbg_async, vha, 0x5028,
220 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
221 break;
222 }
223 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
224 RD_REG_WORD_RELAXED(&reg->hccr);
225 }
43fac4d9 226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 227
1da177e4
LT
228 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
229 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 230 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 231 complete(&ha->mbx_intr_comp);
1da177e4
LT
232 }
233
1da177e4
LT
234 return (IRQ_HANDLED);
235}
236
237/**
238 * qla2x00_mbx_completion() - Process mailbox command completions.
239 * @ha: SCSI driver HA context
240 * @mb0: Mailbox0 register
241 */
242static void
e315cd28 243qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
244{
245 uint16_t cnt;
4fa94f83 246 uint32_t mboxes;
1da177e4 247 uint16_t __iomem *wptr;
e315cd28 248 struct qla_hw_data *ha = vha->hw;
3d71644c 249 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 250
4fa94f83
AV
251 /* Read all mbox registers? */
252 mboxes = (1 << ha->mbx_count) - 1;
253 if (!ha->mcp)
254 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
255 else
256 mboxes = ha->mcp->in_mb;
257
1da177e4
LT
258 /* Load return mailbox registers. */
259 ha->flags.mbox_int = 1;
260 ha->mailbox_out[0] = mb0;
4fa94f83 261 mboxes >>= 1;
1da177e4
LT
262 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
263
264 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 265 if (IS_QLA2200(ha) && cnt == 8)
1da177e4 266 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
4fa94f83 267 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
1da177e4 268 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
4fa94f83 269 else if (mboxes & BIT_0)
1da177e4 270 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 271
1da177e4 272 wptr++;
4fa94f83 273 mboxes >>= 1;
1da177e4 274 }
1da177e4
LT
275}
276
8a659571
AV
277static void
278qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
279{
280 static char *event[] =
281 { "Complete", "Request Notification", "Time Extension" };
282 int rval;
283 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
284 uint16_t __iomem *wptr;
285 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
286
287 /* Seed data -- mailbox1 -> mailbox7. */
288 wptr = (uint16_t __iomem *)&reg24->mailbox1;
289 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
290 mb[cnt] = RD_REG_WORD(wptr);
291
7c3df132 292 ql_dbg(ql_dbg_async, vha, 0x5021,
6246b8a1 293 "Inter-Driver Communication %s -- "
7c3df132
SK
294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
296 mb[4], mb[5], mb[6]);
8a659571 297
8fcd6b8b
CD
298 if (IS_QLA81XX(vha->hw)) {
299 /* Acknowledgement needed? [Notify && non-zero timeout]. */
300 timeout = (descr >> 8) & 0xf;
301 if (aen != MBA_IDC_NOTIFY || !timeout)
302 return;
303
304 ql_dbg(ql_dbg_async, vha, 0x5022,
305 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
306 vha->host_no, event[aen & 0xff], timeout);
307 }
8a659571
AV
308
309 rval = qla2x00_post_idc_ack_work(vha, mb);
310 if (rval != QLA_SUCCESS)
7c3df132 311 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
312 "IDC failed to post ACK.\n");
313}
314
daae62a3
CD
315#define LS_UNKNOWN 2
316char *
317qla2x00_get_link_speed_str(struct qla_hw_data *ha)
318{
319 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
320 char *link_speed;
321 int fw_speed = ha->link_data_rate;
322
323 if (IS_QLA2100(ha) || IS_QLA2200(ha))
324 link_speed = link_speeds[0];
325 else if (fw_speed == 0x13)
326 link_speed = link_speeds[6];
327 else {
328 link_speed = link_speeds[LS_UNKNOWN];
329 if (fw_speed < 6)
330 link_speed =
331 link_speeds[fw_speed];
332 }
333
334 return link_speed;
335}
336
7d613ac6
SV
337void
338qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
339{
340 struct qla_hw_data *ha = vha->hw;
341
342 /*
343 * 8200 AEN Interpretation:
344 * mb[0] = AEN code
345 * mb[1] = AEN Reason code
346 * mb[2] = LSW of Peg-Halt Status-1 Register
347 * mb[6] = MSW of Peg-Halt Status-1 Register
348 * mb[3] = LSW of Peg-Halt Status-2 register
349 * mb[7] = MSW of Peg-Halt Status-2 register
350 * mb[4] = IDC Device-State Register value
351 * mb[5] = IDC Driver-Presence Register value
352 */
353 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
354 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
355 mb[0], mb[1], mb[2], mb[6]);
356 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
357 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
358 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
359
360 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
361 IDC_HEARTBEAT_FAILURE)) {
362 ha->flags.nic_core_hung = 1;
363 ql_log(ql_log_warn, vha, 0x5060,
364 "83XX: F/W Error Reported: Check if reset required.\n");
365
366 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
367 uint32_t protocol_engine_id, fw_err_code, err_level;
368
369 /*
370 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
371 * - PEG-Halt Status-1 Register:
372 * (LSW = mb[2], MSW = mb[6])
373 * Bits 0-7 = protocol-engine ID
374 * Bits 8-28 = f/w error code
375 * Bits 29-31 = Error-level
376 * Error-level 0x1 = Non-Fatal error
377 * Error-level 0x2 = Recoverable Fatal error
378 * Error-level 0x4 = UnRecoverable Fatal error
379 * - PEG-Halt Status-2 Register:
380 * (LSW = mb[3], MSW = mb[7])
381 */
382 protocol_engine_id = (mb[2] & 0xff);
383 fw_err_code = (((mb[2] & 0xff00) >> 8) |
384 ((mb[6] & 0x1fff) << 8));
385 err_level = ((mb[6] & 0xe000) >> 13);
386 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
387 "Register: protocol_engine_id=0x%x "
388 "fw_err_code=0x%x err_level=0x%x.\n",
389 protocol_engine_id, fw_err_code, err_level);
390 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
391 "Register: 0x%x%x.\n", mb[7], mb[3]);
392 if (err_level == ERR_LEVEL_NON_FATAL) {
393 ql_log(ql_log_warn, vha, 0x5063,
394 "Not a fatal error, f/w has recovered "
395 "iteself.\n");
396 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
397 ql_log(ql_log_fatal, vha, 0x5064,
398 "Recoverable Fatal error: Chip reset "
399 "required.\n");
400 qla83xx_schedule_work(vha,
401 QLA83XX_NIC_CORE_RESET);
402 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
403 ql_log(ql_log_fatal, vha, 0x5065,
404 "Unrecoverable Fatal error: Set FAILED "
405 "state, reboot required.\n");
406 qla83xx_schedule_work(vha,
407 QLA83XX_NIC_CORE_UNRECOVERABLE);
408 }
409 }
410
411 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
412 uint16_t peg_fw_state, nw_interface_link_up;
413 uint16_t nw_interface_signal_detect, sfp_status;
414 uint16_t htbt_counter, htbt_monitor_enable;
415 uint16_t sfp_additonal_info, sfp_multirate;
416 uint16_t sfp_tx_fault, link_speed, dcbx_status;
417
418 /*
419 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
420 * - PEG-to-FC Status Register:
421 * (LSW = mb[2], MSW = mb[6])
422 * Bits 0-7 = Peg-Firmware state
423 * Bit 8 = N/W Interface Link-up
424 * Bit 9 = N/W Interface signal detected
425 * Bits 10-11 = SFP Status
426 * SFP Status 0x0 = SFP+ transceiver not expected
427 * SFP Status 0x1 = SFP+ transceiver not present
428 * SFP Status 0x2 = SFP+ transceiver invalid
429 * SFP Status 0x3 = SFP+ transceiver present and
430 * valid
431 * Bits 12-14 = Heartbeat Counter
432 * Bit 15 = Heartbeat Monitor Enable
433 * Bits 16-17 = SFP Additional Info
434 * SFP info 0x0 = Unregocnized transceiver for
435 * Ethernet
436 * SFP info 0x1 = SFP+ brand validation failed
437 * SFP info 0x2 = SFP+ speed validation failed
438 * SFP info 0x3 = SFP+ access error
439 * Bit 18 = SFP Multirate
440 * Bit 19 = SFP Tx Fault
441 * Bits 20-22 = Link Speed
442 * Bits 23-27 = Reserved
443 * Bits 28-30 = DCBX Status
444 * DCBX Status 0x0 = DCBX Disabled
445 * DCBX Status 0x1 = DCBX Enabled
446 * DCBX Status 0x2 = DCBX Exchange error
447 * Bit 31 = Reserved
448 */
449 peg_fw_state = (mb[2] & 0x00ff);
450 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
451 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
452 sfp_status = ((mb[2] & 0x0c00) >> 10);
453 htbt_counter = ((mb[2] & 0x7000) >> 12);
454 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
455 sfp_additonal_info = (mb[6] & 0x0003);
456 sfp_multirate = ((mb[6] & 0x0004) >> 2);
457 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
458 link_speed = ((mb[6] & 0x0070) >> 4);
459 dcbx_status = ((mb[6] & 0x7000) >> 12);
460
461 ql_log(ql_log_warn, vha, 0x5066,
462 "Peg-to-Fc Status Register:\n"
463 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
464 "nw_interface_signal_detect=0x%x"
465 "\nsfp_statis=0x%x.\n ", peg_fw_state,
466 nw_interface_link_up, nw_interface_signal_detect,
467 sfp_status);
468 ql_log(ql_log_warn, vha, 0x5067,
469 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
470 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
471 htbt_counter, htbt_monitor_enable,
472 sfp_additonal_info, sfp_multirate);
473 ql_log(ql_log_warn, vha, 0x5068,
474 "sfp_tx_fault=0x%x, link_state=0x%x, "
475 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
476 dcbx_status);
477
478 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
479 }
480
481 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
482 ql_log(ql_log_warn, vha, 0x5069,
483 "Heartbeat Failure encountered, chip reset "
484 "required.\n");
485
486 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
487 }
488 }
489
490 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
491 ql_log(ql_log_info, vha, 0x506a,
492 "IDC Device-State changed = 0x%x.\n", mb[4]);
493 qla83xx_schedule_work(vha, MBA_IDC_AEN);
494 }
495}
496
1da177e4
LT
497/**
498 * qla2x00_async_event() - Process aynchronous events.
499 * @ha: SCSI driver HA context
9a853f71 500 * @mb: Mailbox registers (0 - 3)
1da177e4 501 */
2c3dfe3f 502void
73208dfd 503qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 504{
1da177e4 505 uint16_t handle_cnt;
bdab23da 506 uint16_t cnt, mbx;
1da177e4 507 uint32_t handles[5];
e315cd28 508 struct qla_hw_data *ha = vha->hw;
3d71644c 509 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 510 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 511 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4 512 uint32_t rscn_entry, host_pid;
4d4df193 513 unsigned long flags;
1da177e4
LT
514
515 /* Setup to process RIO completion. */
516 handle_cnt = 0;
6246b8a1 517 if (IS_CNA_CAPABLE(ha))
3a03eb79 518 goto skip_rio;
1da177e4
LT
519 switch (mb[0]) {
520 case MBA_SCSI_COMPLETION:
9a853f71 521 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
522 handle_cnt = 1;
523 break;
524 case MBA_CMPLT_1_16BIT:
9a853f71 525 handles[0] = mb[1];
1da177e4
LT
526 handle_cnt = 1;
527 mb[0] = MBA_SCSI_COMPLETION;
528 break;
529 case MBA_CMPLT_2_16BIT:
9a853f71
AV
530 handles[0] = mb[1];
531 handles[1] = mb[2];
1da177e4
LT
532 handle_cnt = 2;
533 mb[0] = MBA_SCSI_COMPLETION;
534 break;
535 case MBA_CMPLT_3_16BIT:
9a853f71
AV
536 handles[0] = mb[1];
537 handles[1] = mb[2];
538 handles[2] = mb[3];
1da177e4
LT
539 handle_cnt = 3;
540 mb[0] = MBA_SCSI_COMPLETION;
541 break;
542 case MBA_CMPLT_4_16BIT:
9a853f71
AV
543 handles[0] = mb[1];
544 handles[1] = mb[2];
545 handles[2] = mb[3];
1da177e4
LT
546 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
547 handle_cnt = 4;
548 mb[0] = MBA_SCSI_COMPLETION;
549 break;
550 case MBA_CMPLT_5_16BIT:
9a853f71
AV
551 handles[0] = mb[1];
552 handles[1] = mb[2];
553 handles[2] = mb[3];
1da177e4
LT
554 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
555 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
556 handle_cnt = 5;
557 mb[0] = MBA_SCSI_COMPLETION;
558 break;
559 case MBA_CMPLT_2_32BIT:
9a853f71 560 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
561 handles[1] = le32_to_cpu(
562 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
563 RD_MAILBOX_REG(ha, reg, 6));
564 handle_cnt = 2;
565 mb[0] = MBA_SCSI_COMPLETION;
566 break;
567 default:
568 break;
569 }
3a03eb79 570skip_rio:
1da177e4
LT
571 switch (mb[0]) {
572 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 573 if (!vha->flags.online)
1da177e4
LT
574 break;
575
576 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
577 qla2x00_process_completed_request(vha, rsp->req,
578 handles[cnt]);
1da177e4
LT
579 break;
580
581 case MBA_RESET: /* Reset */
7c3df132
SK
582 ql_dbg(ql_dbg_async, vha, 0x5002,
583 "Asynchronous RESET.\n");
1da177e4 584
e315cd28 585 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
586 break;
587
588 case MBA_SYSTEM_ERR: /* System Error */
6246b8a1
GM
589 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
590 RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 591 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
592 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
593 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 594
e315cd28 595 ha->isp_ops->fw_dump(vha, 1);
1da177e4 596
e428924c 597 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 598 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 599 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
600 "Unrecoverable Hardware Error: adapter "
601 "marked OFFLINE!\n");
e315cd28 602 vha->flags.online = 0;
6246b8a1 603 vha->device_flags |= DFLG_DEV_FAILED;
b1d46989 604 } else {
25985edc 605 /* Check to see if MPI timeout occurred */
b1d46989
MI
606 if ((mbx & MBX_3) && (ha->flags.port0))
607 set_bit(MPI_RESET_NEEDED,
608 &vha->dpc_flags);
609
e315cd28 610 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 611 }
9a853f71 612 } else if (mb[1] == 0) {
7c3df132 613 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
614 "Unrecoverable Hardware Error: adapter marked "
615 "OFFLINE!\n");
e315cd28 616 vha->flags.online = 0;
6246b8a1 617 vha->device_flags |= DFLG_DEV_FAILED;
1da177e4 618 } else
e315cd28 619 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
620 break;
621
622 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
623 ql_log(ql_log_warn, vha, 0x5006,
624 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 625
e315cd28 626 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
627 break;
628
629 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
630 ql_log(ql_log_warn, vha, 0x5007,
631 "ISP Response Transfer Error.\n");
1da177e4 632
e315cd28 633 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
634 break;
635
636 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
637 ql_dbg(ql_dbg_async, vha, 0x5008,
638 "Asynchronous WAKEUP_THRES.\n");
1da177e4 639
2d70c103 640 break;
1da177e4 641 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
cfb0919c 642 ql_dbg(ql_dbg_async, vha, 0x5009,
7c3df132 643 "LIP occurred (%x).\n", mb[1]);
1da177e4 644
e315cd28
AC
645 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
646 atomic_set(&vha->loop_state, LOOP_DOWN);
647 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
648 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
649 }
650
e315cd28
AC
651 if (vha->vp_idx) {
652 atomic_set(&vha->vp_state, VP_FAILED);
653 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
654 }
655
e315cd28
AC
656 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
657 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 658
e315cd28
AC
659 vha->flags.management_server_logged_in = 0;
660 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
661 break;
662
663 case MBA_LOOP_UP: /* Loop Up Event */
daae62a3 664 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d8b45213 665 ha->link_data_rate = PORT_SPEED_1GB;
daae62a3 666 else
1da177e4 667 ha->link_data_rate = mb[1];
1da177e4 668
cfb0919c 669 ql_dbg(ql_dbg_async, vha, 0x500a,
daae62a3
CD
670 "LOOP UP detected (%s Gbps).\n",
671 qla2x00_get_link_speed_str(ha));
1da177e4 672
e315cd28
AC
673 vha->flags.management_server_logged_in = 0;
674 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
675 break;
676
677 case MBA_LOOP_DOWN: /* Loop Down Event */
6246b8a1
GM
678 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
679 ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 680 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
cfb0919c 681 ql_dbg(ql_dbg_async, vha, 0x500b,
7c3df132
SK
682 "LOOP DOWN detected (%x %x %x %x).\n",
683 mb[1], mb[2], mb[3], mbx);
1da177e4 684
e315cd28
AC
685 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
686 atomic_set(&vha->loop_state, LOOP_DOWN);
687 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
688 vha->device_flags |= DFLG_NO_CABLE;
689 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
690 }
691
e315cd28
AC
692 if (vha->vp_idx) {
693 atomic_set(&vha->vp_state, VP_FAILED);
694 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
695 }
696
e315cd28 697 vha->flags.management_server_logged_in = 0;
d8b45213 698 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 699 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
700 break;
701
702 case MBA_LIP_RESET: /* LIP reset occurred */
cfb0919c 703 ql_dbg(ql_dbg_async, vha, 0x500c,
cc3ef7bc 704 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 705
e315cd28
AC
706 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
707 atomic_set(&vha->loop_state, LOOP_DOWN);
708 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
709 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
710 }
711
e315cd28
AC
712 if (vha->vp_idx) {
713 atomic_set(&vha->vp_state, VP_FAILED);
714 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
715 }
716
e315cd28 717 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
718
719 ha->operating_mode = LOOP;
e315cd28
AC
720 vha->flags.management_server_logged_in = 0;
721 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
722 break;
723
3a03eb79 724 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
725 case MBA_POINT_TO_POINT: /* Point-to-Point */
726 if (IS_QLA2100(ha))
727 break;
728
6246b8a1 729 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
7c3df132
SK
730 ql_dbg(ql_dbg_async, vha, 0x500d,
731 "DCBX Completed -- %04x %04x %04x.\n",
732 mb[1], mb[2], mb[3]);
23f2ebd1
SR
733 if (ha->notify_dcbx_comp)
734 complete(&ha->dcbx_comp);
735
736 } else
7c3df132
SK
737 ql_dbg(ql_dbg_async, vha, 0x500e,
738 "Asynchronous P2P MODE received.\n");
1da177e4
LT
739
740 /*
741 * Until there's a transition from loop down to loop up, treat
742 * this as loop down only.
743 */
e315cd28
AC
744 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
745 atomic_set(&vha->loop_state, LOOP_DOWN);
746 if (!atomic_read(&vha->loop_down_timer))
747 atomic_set(&vha->loop_down_timer,
1da177e4 748 LOOP_DOWN_TIME);
e315cd28 749 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
750 }
751
e315cd28
AC
752 if (vha->vp_idx) {
753 atomic_set(&vha->vp_state, VP_FAILED);
754 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
755 }
756
e315cd28
AC
757 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
758 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
759
760 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
761 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
762
763 ha->flags.gpsc_supported = 1;
e315cd28 764 vha->flags.management_server_logged_in = 0;
1da177e4
LT
765 break;
766
767 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
768 if (IS_QLA2100(ha))
769 break;
770
cfb0919c 771 ql_dbg(ql_dbg_async, vha, 0x500f,
1da177e4
LT
772 "Configuration change detected: value=%x.\n", mb[1]);
773
e315cd28
AC
774 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
775 atomic_set(&vha->loop_state, LOOP_DOWN);
776 if (!atomic_read(&vha->loop_down_timer))
777 atomic_set(&vha->loop_down_timer,
1da177e4 778 LOOP_DOWN_TIME);
e315cd28 779 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
780 }
781
e315cd28
AC
782 if (vha->vp_idx) {
783 atomic_set(&vha->vp_state, VP_FAILED);
784 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
785 }
786
e315cd28
AC
787 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
788 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
789 break;
790
791 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
792 /*
793 * Handle only global and vn-port update events
794 *
795 * Relevant inputs:
796 * mb[1] = N_Port handle of changed port
797 * OR 0xffff for global event
798 * mb[2] = New login state
799 * 7 = Port logged out
800 * mb[3] = LSB is vp_idx, 0xff = all vps
801 *
802 * Skip processing if:
803 * Event is global, vp_idx is NOT all vps,
804 * vp_idx does not match
805 * Event is not global, vp_idx does not match
806 */
12cec63e
AV
807 if (IS_QLA2XXX_MIDTYPE(ha) &&
808 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
809 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
810 break;
73208dfd 811
9764ff88
AV
812 /* Global event -- port logout or port unavailable. */
813 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
814 ql_dbg(ql_dbg_async, vha, 0x5010,
815 "Port unavailable %04x %04x %04x.\n",
816 mb[1], mb[2], mb[3]);
daae62a3
CD
817 ql_log(ql_log_warn, vha, 0x505e,
818 "Link is offline.\n");
9764ff88
AV
819
820 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
821 atomic_set(&vha->loop_state, LOOP_DOWN);
822 atomic_set(&vha->loop_down_timer,
823 LOOP_DOWN_TIME);
824 vha->device_flags |= DFLG_NO_CABLE;
825 qla2x00_mark_all_devices_lost(vha, 1);
826 }
827
828 if (vha->vp_idx) {
829 atomic_set(&vha->vp_state, VP_FAILED);
830 fc_vport_set_state(vha->fc_vport,
831 FC_VPORT_FAILED);
faadc5e7 832 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
833 }
834
835 vha->flags.management_server_logged_in = 0;
836 ha->link_data_rate = PORT_SPEED_UNKNOWN;
837 break;
838 }
839
1da177e4 840 /*
cc3ef7bc 841 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
842 * event etc. earlier indicating loop is down) then process
843 * it. Otherwise ignore it and Wait for RSCN to come in.
844 */
e315cd28 845 atomic_set(&vha->loop_down_timer, 0);
79cc785f 846 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
7c3df132
SK
847 ql_dbg(ql_dbg_async, vha, 0x5011,
848 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
849 mb[1], mb[2], mb[3]);
2d70c103
NB
850
851 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
852 break;
853 }
854
7c3df132
SK
855 ql_dbg(ql_dbg_async, vha, 0x5012,
856 "Port database changed %04x %04x %04x.\n",
857 mb[1], mb[2], mb[3]);
daae62a3
CD
858 ql_log(ql_log_warn, vha, 0x505f,
859 "Link is operational (%s Gbps).\n",
860 qla2x00_get_link_speed_str(ha));
1da177e4
LT
861
862 /*
863 * Mark all devices as missing so we will login again.
864 */
e315cd28 865 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 866
e315cd28 867 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 868
2d70c103
NB
869 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
870 set_bit(SCR_PENDING, &vha->dpc_flags);
871
e315cd28
AC
872 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
873 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2d70c103
NB
874
875 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
876 break;
877
878 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 879 /* Check if the Vport has issued a SCR */
e315cd28 880 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
881 break;
882 /* Only handle SCNs for our Vport index. */
0d6e61bc 883 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 884 break;
0d6e61bc 885
7c3df132
SK
886 ql_dbg(ql_dbg_async, vha, 0x5013,
887 "RSCN database changed -- %04x %04x %04x.\n",
888 mb[1], mb[2], mb[3]);
1da177e4 889
59d72d87 890 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
891 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
892 | vha->d_id.b.al_pa;
1da177e4 893 if (rscn_entry == host_pid) {
7c3df132
SK
894 ql_dbg(ql_dbg_async, vha, 0x5014,
895 "Ignoring RSCN update to local host "
896 "port ID (%06x).\n", host_pid);
1da177e4
LT
897 break;
898 }
899
59d72d87
RA
900 /* Ignore reserved bits from RSCN-payload. */
901 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1da177e4 902
e315cd28
AC
903 atomic_set(&vha->loop_down_timer, 0);
904 vha->flags.management_server_logged_in = 0;
1da177e4 905
e315cd28
AC
906 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
907 set_bit(RSCN_UPDATE, &vha->dpc_flags);
908 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
909 break;
910
911 /* case MBA_RIO_RESPONSE: */
912 case MBA_ZIO_RESPONSE:
7c3df132
SK
913 ql_dbg(ql_dbg_async, vha, 0x5015,
914 "[R|Z]IO update completion.\n");
1da177e4 915
e428924c 916 if (IS_FWI2_CAPABLE(ha))
2afa19a9 917 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 918 else
73208dfd 919 qla2x00_process_response_queue(rsp);
1da177e4 920 break;
9a853f71
AV
921
922 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
923 ql_dbg(ql_dbg_async, vha, 0x5016,
924 "Discard RND Frame -- %04x %04x %04x.\n",
925 mb[1], mb[2], mb[3]);
9a853f71 926 break;
45ebeb56
AV
927
928 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
929 ql_dbg(ql_dbg_async, vha, 0x5017,
930 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 931 break;
4d4df193
HK
932
933 case MBA_ISP84XX_ALERT:
7c3df132
SK
934 ql_dbg(ql_dbg_async, vha, 0x5018,
935 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
936 mb[1], mb[2], mb[3]);
4d4df193
HK
937
938 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
939 switch (mb[1]) {
940 case A84_PANIC_RECOVERY:
7c3df132
SK
941 ql_log(ql_log_info, vha, 0x5019,
942 "Alert 84XX: panic recovery %04x %04x.\n",
943 mb[2], mb[3]);
4d4df193
HK
944 break;
945 case A84_OP_LOGIN_COMPLETE:
946 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
947 ql_log(ql_log_info, vha, 0x501a,
948 "Alert 84XX: firmware version %x.\n",
949 ha->cs84xx->op_fw_version);
4d4df193
HK
950 break;
951 case A84_DIAG_LOGIN_COMPLETE:
952 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
953 ql_log(ql_log_info, vha, 0x501b,
954 "Alert 84XX: diagnostic firmware version %x.\n",
955 ha->cs84xx->diag_fw_version);
4d4df193
HK
956 break;
957 case A84_GOLD_LOGIN_COMPLETE:
958 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
959 ha->cs84xx->fw_update = 1;
7c3df132
SK
960 ql_log(ql_log_info, vha, 0x501c,
961 "Alert 84XX: gold firmware version %x.\n",
962 ha->cs84xx->gold_fw_version);
4d4df193
HK
963 break;
964 default:
7c3df132
SK
965 ql_log(ql_log_warn, vha, 0x501d,
966 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
967 mb[1], mb[2], mb[3]);
968 }
969 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
970 break;
3a03eb79 971 case MBA_DCBX_START:
7c3df132
SK
972 ql_dbg(ql_dbg_async, vha, 0x501e,
973 "DCBX Started -- %04x %04x %04x.\n",
974 mb[1], mb[2], mb[3]);
3a03eb79
AV
975 break;
976 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
977 ql_dbg(ql_dbg_async, vha, 0x501f,
978 "DCBX Parameters Updated -- %04x %04x %04x.\n",
979 mb[1], mb[2], mb[3]);
3a03eb79
AV
980 break;
981 case MBA_FCF_CONF_ERR:
7c3df132
SK
982 ql_dbg(ql_dbg_async, vha, 0x5020,
983 "FCF Configuration Error -- %04x %04x %04x.\n",
984 mb[1], mb[2], mb[3]);
3a03eb79 985 break;
3a03eb79 986 case MBA_IDC_NOTIFY:
8fcd6b8b
CD
987 /* See if we need to quiesce any I/O */
988 if (IS_QLA8031(vha->hw))
989 if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
990 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
991 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
992 /* Ack that we have quiesced I/O */
993 qla81xx_idc_event(vha, mb[0], mb[1]);
994 qla2xxx_wake_dpc(vha);
995 }
996 case MBA_IDC_COMPLETE:
3a03eb79 997 case MBA_IDC_TIME_EXT:
7d613ac6
SV
998 if (IS_QLA81XX(vha->hw))
999 qla81xx_idc_event(vha, mb[0], mb[1]);
1000 break;
1001
1002 case MBA_IDC_AEN:
1003 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1004 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1005 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1006 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1007 qla83xx_handle_8200_aen(vha, mb);
3a03eb79 1008 break;
7d613ac6 1009
6246b8a1
GM
1010 default:
1011 ql_dbg(ql_dbg_async, vha, 0x5057,
1012 "Unknown AEN:%04x %04x %04x %04x\n",
1013 mb[0], mb[1], mb[2], mb[3]);
1da177e4 1014 }
2c3dfe3f 1015
2d70c103
NB
1016 qlt_async_event(mb[0], vha, mb);
1017
e315cd28 1018 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 1019 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
1020}
1021
1022/**
1023 * qla2x00_process_completed_request() - Process a Fast Post response.
1024 * @ha: SCSI driver HA context
1025 * @index: SRB index
1026 */
1027static void
73208dfd
AC
1028qla2x00_process_completed_request(struct scsi_qla_host *vha,
1029 struct req_que *req, uint32_t index)
1da177e4
LT
1030{
1031 srb_t *sp;
e315cd28 1032 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1033
1034 /* Validate handle. */
1035 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
1036 ql_log(ql_log_warn, vha, 0x3014,
1037 "Invalid SCSI command index (%x).\n", index);
1da177e4 1038
8f7daead
GM
1039 if (IS_QLA82XX(ha))
1040 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1041 else
1042 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1043 return;
1044 }
1045
e315cd28 1046 sp = req->outstanding_cmds[index];
1da177e4
LT
1047 if (sp) {
1048 /* Free outstanding command slot. */
e315cd28 1049 req->outstanding_cmds[index] = NULL;
1da177e4 1050
1da177e4 1051 /* Save ISP completion status */
9ba56b95 1052 sp->done(ha, sp, DID_OK << 16);
1da177e4 1053 } else {
7c3df132 1054 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 1055
8f7daead
GM
1056 if (IS_QLA82XX(ha))
1057 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1058 else
1059 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1060 }
1061}
1062
ac280b67
AV
1063static srb_t *
1064qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1065 struct req_que *req, void *iocb)
1066{
1067 struct qla_hw_data *ha = vha->hw;
1068 sts_entry_t *pkt = iocb;
1069 srb_t *sp = NULL;
1070 uint16_t index;
1071
1072 index = LSW(pkt->handle);
1073 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
1074 ql_log(ql_log_warn, vha, 0x5031,
1075 "Invalid command index (%x).\n", index);
8f7daead
GM
1076 if (IS_QLA82XX(ha))
1077 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1078 else
1079 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
1080 goto done;
1081 }
1082 sp = req->outstanding_cmds[index];
1083 if (!sp) {
7c3df132
SK
1084 ql_log(ql_log_warn, vha, 0x5032,
1085 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
1086 return sp;
1087 }
1088 if (sp->handle != index) {
7c3df132
SK
1089 ql_log(ql_log_warn, vha, 0x5033,
1090 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
1091 return NULL;
1092 }
9a069e19 1093
ac280b67 1094 req->outstanding_cmds[index] = NULL;
9a069e19 1095
ac280b67
AV
1096done:
1097 return sp;
1098}
1099
1100static void
1101qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1102 struct mbx_entry *mbx)
1103{
1104 const char func[] = "MBX-IOCB";
1105 const char *type;
ac280b67
AV
1106 fc_port_t *fcport;
1107 srb_t *sp;
4916392b 1108 struct srb_iocb *lio;
99b0bec7 1109 uint16_t *data;
5ff1d584 1110 uint16_t status;
ac280b67
AV
1111
1112 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1113 if (!sp)
1114 return;
1115
9ba56b95
GM
1116 lio = &sp->u.iocb_cmd;
1117 type = sp->name;
ac280b67 1118 fcport = sp->fcport;
4916392b 1119 data = lio->u.logio.data;
ac280b67 1120
5ff1d584 1121 data[0] = MBS_COMMAND_ERROR;
4916392b 1122 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1123 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1124 if (mbx->entry_status) {
7c3df132 1125 ql_dbg(ql_dbg_async, vha, 0x5043,
cfb0919c 1126 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
d3fa9e7d 1127 "entry-status=%x status=%x state-flag=%x "
cfb0919c
CD
1128 "status-flags=%x.\n", type, sp->handle,
1129 fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
1130 fcport->d_id.b.al_pa, mbx->entry_status,
1131 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 1132 le16_to_cpu(mbx->status_flags));
d3fa9e7d 1133
cfb0919c 1134 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
7c3df132 1135 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 1136
99b0bec7 1137 goto logio_done;
ac280b67
AV
1138 }
1139
5ff1d584 1140 status = le16_to_cpu(mbx->status);
9ba56b95 1141 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
5ff1d584
AV
1142 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1143 status = 0;
1144 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132 1145 ql_dbg(ql_dbg_async, vha, 0x5045,
cfb0919c
CD
1146 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1147 type, sp->handle, fcport->d_id.b.domain,
1148 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1149 le16_to_cpu(mbx->mb1));
ac280b67
AV
1150
1151 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1152 if (sp->type == SRB_LOGIN_CMD) {
99b0bec7
AV
1153 fcport->port_type = FCT_TARGET;
1154 if (le16_to_cpu(mbx->mb1) & BIT_0)
1155 fcport->port_type = FCT_INITIATOR;
6ac52608 1156 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 1157 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 1158 }
99b0bec7 1159 goto logio_done;
ac280b67
AV
1160 }
1161
1162 data[0] = le16_to_cpu(mbx->mb0);
1163 switch (data[0]) {
1164 case MBS_PORT_ID_USED:
1165 data[1] = le16_to_cpu(mbx->mb1);
1166 break;
1167 case MBS_LOOP_ID_USED:
1168 break;
1169 default:
1170 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1171 break;
1172 }
1173
7c3df132 1174 ql_log(ql_log_warn, vha, 0x5046,
cfb0919c
CD
1175 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1176 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1177 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1178 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
ac280b67 1179 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 1180 le16_to_cpu(mbx->mb7));
ac280b67 1181
99b0bec7 1182logio_done:
9ba56b95 1183 sp->done(vha, sp, 0);
ac280b67
AV
1184}
1185
9bc4f4fb
HZ
1186static void
1187qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1188 sts_entry_t *pkt, int iocb_type)
1189{
1190 const char func[] = "CT_IOCB";
1191 const char *type;
9bc4f4fb 1192 srb_t *sp;
9bc4f4fb
HZ
1193 struct fc_bsg_job *bsg_job;
1194 uint16_t comp_status;
9ba56b95 1195 int res;
9bc4f4fb
HZ
1196
1197 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1198 if (!sp)
1199 return;
1200
9ba56b95 1201 bsg_job = sp->u.bsg_job;
9bc4f4fb 1202
9ba56b95 1203 type = "ct pass-through";
9bc4f4fb
HZ
1204
1205 comp_status = le16_to_cpu(pkt->comp_status);
1206
1207 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1208 * fc payload to the caller
1209 */
1210 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1211 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1212
1213 if (comp_status != CS_COMPLETE) {
1214 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1215 res = DID_OK << 16;
9bc4f4fb
HZ
1216 bsg_job->reply->reply_payload_rcv_len =
1217 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1218
7c3df132
SK
1219 ql_log(ql_log_warn, vha, 0x5048,
1220 "CT pass-through-%s error "
9bc4f4fb 1221 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1222 type, comp_status,
1223 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1224 } else {
7c3df132
SK
1225 ql_log(ql_log_warn, vha, 0x5049,
1226 "CT pass-through-%s error "
1227 "comp_status-status=0x%x.\n", type, comp_status);
9ba56b95 1228 res = DID_ERROR << 16;
9bc4f4fb
HZ
1229 bsg_job->reply->reply_payload_rcv_len = 0;
1230 }
cfb0919c 1231 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
7c3df132 1232 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1233 } else {
9ba56b95 1234 res = DID_OK << 16;
9bc4f4fb
HZ
1235 bsg_job->reply->reply_payload_rcv_len =
1236 bsg_job->reply_payload.payload_len;
1237 bsg_job->reply_len = 0;
1238 }
1239
9ba56b95 1240 sp->done(vha, sp, res);
9bc4f4fb
HZ
1241}
1242
9a069e19
GM
1243static void
1244qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1245 struct sts_entry_24xx *pkt, int iocb_type)
1246{
1247 const char func[] = "ELS_CT_IOCB";
1248 const char *type;
9a069e19 1249 srb_t *sp;
9a069e19
GM
1250 struct fc_bsg_job *bsg_job;
1251 uint16_t comp_status;
1252 uint32_t fw_status[3];
1253 uint8_t* fw_sts_ptr;
9ba56b95 1254 int res;
9a069e19
GM
1255
1256 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1257 if (!sp)
1258 return;
9ba56b95 1259 bsg_job = sp->u.bsg_job;
9a069e19
GM
1260
1261 type = NULL;
9ba56b95 1262 switch (sp->type) {
9a069e19
GM
1263 case SRB_ELS_CMD_RPT:
1264 case SRB_ELS_CMD_HST:
1265 type = "els";
1266 break;
1267 case SRB_CT_CMD:
1268 type = "ct pass-through";
1269 break;
1270 default:
37fed3ee 1271 ql_dbg(ql_dbg_user, vha, 0x503e,
9ba56b95 1272 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
9a069e19
GM
1273 return;
1274 }
1275
1276 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1277 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1278 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1279
1280 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1281 * fc payload to the caller
1282 */
1283 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1284 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1285
1286 if (comp_status != CS_COMPLETE) {
1287 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1288 res = DID_OK << 16;
9a069e19 1289 bsg_job->reply->reply_payload_rcv_len =
9ba56b95 1290 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
9a069e19 1291
37fed3ee 1292 ql_dbg(ql_dbg_user, vha, 0x503f,
cfb0919c 1293 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1294 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
cfb0919c 1295 type, sp->handle, comp_status, fw_status[1], fw_status[2],
7c3df132
SK
1296 le16_to_cpu(((struct els_sts_entry_24xx *)
1297 pkt)->total_byte_count));
9a069e19
GM
1298 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1299 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1300 }
1301 else {
37fed3ee 1302 ql_dbg(ql_dbg_user, vha, 0x5040,
cfb0919c 1303 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1304 "error subcode 1=0x%x error subcode 2=0x%x.\n",
cfb0919c 1305 type, sp->handle, comp_status,
7c3df132
SK
1306 le16_to_cpu(((struct els_sts_entry_24xx *)
1307 pkt)->error_subcode_1),
1308 le16_to_cpu(((struct els_sts_entry_24xx *)
1309 pkt)->error_subcode_2));
9ba56b95 1310 res = DID_ERROR << 16;
9a069e19
GM
1311 bsg_job->reply->reply_payload_rcv_len = 0;
1312 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1313 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1314 }
37fed3ee 1315 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
7c3df132 1316 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1317 }
1318 else {
9ba56b95 1319 res = DID_OK << 16;
9a069e19
GM
1320 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1321 bsg_job->reply_len = 0;
1322 }
1323
9ba56b95 1324 sp->done(vha, sp, res);
9a069e19
GM
1325}
1326
ac280b67
AV
1327static void
1328qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1329 struct logio_entry_24xx *logio)
1330{
1331 const char func[] = "LOGIO-IOCB";
1332 const char *type;
ac280b67
AV
1333 fc_port_t *fcport;
1334 srb_t *sp;
4916392b 1335 struct srb_iocb *lio;
99b0bec7 1336 uint16_t *data;
ac280b67
AV
1337 uint32_t iop[2];
1338
1339 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1340 if (!sp)
1341 return;
1342
9ba56b95
GM
1343 lio = &sp->u.iocb_cmd;
1344 type = sp->name;
ac280b67 1345 fcport = sp->fcport;
4916392b 1346 data = lio->u.logio.data;
ac280b67 1347
5ff1d584 1348 data[0] = MBS_COMMAND_ERROR;
4916392b 1349 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1350 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1351 if (logio->entry_status) {
5e19ed90 1352 ql_log(ql_log_warn, fcport->vha, 0x5034,
cfb0919c 1353 "Async-%s error entry - hdl=%x"
d3fa9e7d 1354 "portid=%02x%02x%02x entry-status=%x.\n",
cfb0919c
CD
1355 type, sp->handle, fcport->d_id.b.domain,
1356 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1357 logio->entry_status);
1358 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
7c3df132 1359 (uint8_t *)logio, sizeof(*logio));
ac280b67 1360
99b0bec7 1361 goto logio_done;
ac280b67
AV
1362 }
1363
1364 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
5e19ed90 1365 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
cfb0919c
CD
1366 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1367 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1368 fcport->d_id.b.area, fcport->d_id.b.al_pa,
7c3df132 1369 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1370
1371 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1372 if (sp->type != SRB_LOGIN_CMD)
99b0bec7 1373 goto logio_done;
ac280b67
AV
1374
1375 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1376 if (iop[0] & BIT_4) {
1377 fcport->port_type = FCT_TARGET;
1378 if (iop[0] & BIT_8)
8474f3a0 1379 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1380 } else if (iop[0] & BIT_5)
ac280b67 1381 fcport->port_type = FCT_INITIATOR;
b0cd579c 1382
2d70c103
NB
1383 if (iop[0] & BIT_7)
1384 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1385
ac280b67
AV
1386 if (logio->io_parameter[7] || logio->io_parameter[8])
1387 fcport->supported_classes |= FC_COS_CLASS2;
1388 if (logio->io_parameter[9] || logio->io_parameter[10])
1389 fcport->supported_classes |= FC_COS_CLASS3;
1390
99b0bec7 1391 goto logio_done;
ac280b67
AV
1392 }
1393
1394 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1395 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1396 switch (iop[0]) {
1397 case LSC_SCODE_PORTID_USED:
1398 data[0] = MBS_PORT_ID_USED;
1399 data[1] = LSW(iop[1]);
1400 break;
1401 case LSC_SCODE_NPORT_USED:
1402 data[0] = MBS_LOOP_ID_USED;
1403 break;
ac280b67
AV
1404 default:
1405 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1406 break;
1407 }
1408
5e19ed90 1409 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
cfb0919c
CD
1410 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1411 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
d3fa9e7d 1412 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1413 le16_to_cpu(logio->comp_status),
1414 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1415 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1416
99b0bec7 1417logio_done:
9ba56b95 1418 sp->done(vha, sp, 0);
ac280b67
AV
1419}
1420
3822263e
MI
1421static void
1422qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1423 struct tsk_mgmt_entry *tsk)
1424{
1425 const char func[] = "TMF-IOCB";
1426 const char *type;
1427 fc_port_t *fcport;
1428 srb_t *sp;
1429 struct srb_iocb *iocb;
3822263e
MI
1430 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1431 int error = 1;
1432
1433 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1434 if (!sp)
1435 return;
1436
9ba56b95
GM
1437 iocb = &sp->u.iocb_cmd;
1438 type = sp->name;
3822263e
MI
1439 fcport = sp->fcport;
1440
1441 if (sts->entry_status) {
5e19ed90 1442 ql_log(ql_log_warn, fcport->vha, 0x5038,
cfb0919c
CD
1443 "Async-%s error - hdl=%x entry-status(%x).\n",
1444 type, sp->handle, sts->entry_status);
3822263e 1445 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
5e19ed90 1446 ql_log(ql_log_warn, fcport->vha, 0x5039,
cfb0919c
CD
1447 "Async-%s error - hdl=%x completion status(%x).\n",
1448 type, sp->handle, sts->comp_status);
3822263e
MI
1449 } else if (!(le16_to_cpu(sts->scsi_status) &
1450 SS_RESPONSE_INFO_LEN_VALID)) {
5e19ed90 1451 ql_log(ql_log_warn, fcport->vha, 0x503a,
cfb0919c
CD
1452 "Async-%s error - hdl=%x no response info(%x).\n",
1453 type, sp->handle, sts->scsi_status);
3822263e 1454 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
5e19ed90 1455 ql_log(ql_log_warn, fcport->vha, 0x503b,
cfb0919c
CD
1456 "Async-%s error - hdl=%x not enough response(%d).\n",
1457 type, sp->handle, sts->rsp_data_len);
3822263e 1458 } else if (sts->data[3]) {
5e19ed90 1459 ql_log(ql_log_warn, fcport->vha, 0x503c,
cfb0919c
CD
1460 "Async-%s error - hdl=%x response(%x).\n",
1461 type, sp->handle, sts->data[3]);
3822263e
MI
1462 } else {
1463 error = 0;
1464 }
1465
1466 if (error) {
1467 iocb->u.tmf.data = error;
7c3df132
SK
1468 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1469 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1470 }
1471
9ba56b95 1472 sp->done(vha, sp, 0);
3822263e
MI
1473}
1474
1da177e4
LT
1475/**
1476 * qla2x00_process_response_queue() - Process response queue entries.
1477 * @ha: SCSI driver HA context
1478 */
1479void
73208dfd 1480qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1481{
73208dfd
AC
1482 struct scsi_qla_host *vha;
1483 struct qla_hw_data *ha = rsp->hw;
3d71644c 1484 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1485 sts_entry_t *pkt;
1486 uint16_t handle_cnt;
1487 uint16_t cnt;
73208dfd 1488
2afa19a9 1489 vha = pci_get_drvdata(ha->pdev);
1da177e4 1490
e315cd28 1491 if (!vha->flags.online)
1da177e4
LT
1492 return;
1493
e315cd28
AC
1494 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1495 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1496
e315cd28
AC
1497 rsp->ring_index++;
1498 if (rsp->ring_index == rsp->length) {
1499 rsp->ring_index = 0;
1500 rsp->ring_ptr = rsp->ring;
1da177e4 1501 } else {
e315cd28 1502 rsp->ring_ptr++;
1da177e4
LT
1503 }
1504
1505 if (pkt->entry_status != 0) {
73208dfd 1506 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1507 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1508 wmb();
1509 continue;
1510 }
1511
1512 switch (pkt->entry_type) {
1513 case STATUS_TYPE:
73208dfd 1514 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1515 break;
1516 case STATUS_TYPE_21:
1517 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1518 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1519 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1520 ((sts21_entry_t *)pkt)->handle[cnt]);
1521 }
1522 break;
1523 case STATUS_TYPE_22:
1524 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1525 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1526 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1527 ((sts22_entry_t *)pkt)->handle[cnt]);
1528 }
1529 break;
1530 case STATUS_CONT_TYPE:
2afa19a9 1531 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1532 break;
ac280b67
AV
1533 case MBX_IOCB_TYPE:
1534 qla2x00_mbx_iocb_entry(vha, rsp->req,
1535 (struct mbx_entry *)pkt);
3822263e 1536 break;
9bc4f4fb
HZ
1537 case CT_IOCB_TYPE:
1538 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1539 break;
1da177e4
LT
1540 default:
1541 /* Type Not Supported. */
7c3df132
SK
1542 ql_log(ql_log_warn, vha, 0x504a,
1543 "Received unknown response pkt type %x "
1da177e4 1544 "entry status=%x.\n",
7c3df132 1545 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1546 break;
1547 }
1548 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1549 wmb();
1550 }
1551
1552 /* Adjust ring index */
e315cd28 1553 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1554}
1555
4733fcb1 1556static inline void
5544213b 1557qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
9ba56b95 1558 uint32_t sense_len, struct rsp_que *rsp, int res)
4733fcb1 1559{
7c3df132 1560 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95
GM
1561 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1562 uint32_t track_sense_len;
4733fcb1
AV
1563
1564 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1565 sense_len = SCSI_SENSE_BUFFERSIZE;
1566
9ba56b95
GM
1567 SET_CMD_SENSE_LEN(sp, sense_len);
1568 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1569 track_sense_len = sense_len;
1570
1571 if (sense_len > par_sense_len)
5544213b 1572 sense_len = par_sense_len;
4733fcb1
AV
1573
1574 memcpy(cp->sense_buffer, sense_data, sense_len);
1575
9ba56b95
GM
1576 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1577 track_sense_len -= sense_len;
1578 SET_CMD_SENSE_LEN(sp, track_sense_len);
1579
1580 if (track_sense_len != 0) {
2afa19a9 1581 rsp->status_srb = sp;
9ba56b95
GM
1582 cp->result = res;
1583 }
4733fcb1 1584
cfb0919c
CD
1585 if (sense_len) {
1586 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1587 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1588 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1589 cp);
7c3df132
SK
1590 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1591 cp->sense_buffer, sense_len);
cfb0919c 1592 }
4733fcb1
AV
1593}
1594
bad75002
AE
1595struct scsi_dif_tuple {
1596 __be16 guard; /* Checksum */
d6a03581 1597 __be16 app_tag; /* APPL identifier */
bad75002
AE
1598 __be32 ref_tag; /* Target LBA or indirect LBA */
1599};
1600
1601/*
1602 * Checks the guard or meta-data for the type of error
1603 * detected by the HBA. In case of errors, we set the
1604 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1605 * to indicate to the kernel that the HBA detected error.
1606 */
8cb2049c 1607static inline int
bad75002
AE
1608qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1609{
7c3df132 1610 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95 1611 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
8cb2049c
AE
1612 uint8_t *ap = &sts24->data[12];
1613 uint8_t *ep = &sts24->data[20];
bad75002
AE
1614 uint32_t e_ref_tag, a_ref_tag;
1615 uint16_t e_app_tag, a_app_tag;
1616 uint16_t e_guard, a_guard;
1617
8cb2049c
AE
1618 /*
1619 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1620 * would make guard field appear at offset 2
1621 */
1622 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1623 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1624 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1625 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1626 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1627 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1628
7c3df132
SK
1629 ql_dbg(ql_dbg_io, vha, 0x3023,
1630 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1631
7c3df132
SK
1632 ql_dbg(ql_dbg_io, vha, 0x3024,
1633 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1634 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1635 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1636 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1637 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1638
8cb2049c
AE
1639 /*
1640 * Ignore sector if:
1641 * For type 3: ref & app tag is all 'f's
1642 * For type 0,1,2: app tag is all 'f's
1643 */
1644 if ((a_app_tag == 0xffff) &&
1645 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1646 (a_ref_tag == 0xffffffff))) {
1647 uint32_t blocks_done, resid;
1648 sector_t lba_s = scsi_get_lba(cmd);
1649
1650 /* 2TB boundary case covered automatically with this */
1651 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1652
1653 resid = scsi_bufflen(cmd) - (blocks_done *
1654 cmd->device->sector_size);
1655
1656 scsi_set_resid(cmd, resid);
1657 cmd->result = DID_OK << 16;
1658
1659 /* Update protection tag */
1660 if (scsi_prot_sg_count(cmd)) {
1661 uint32_t i, j = 0, k = 0, num_ent;
1662 struct scatterlist *sg;
1663 struct sd_dif_tuple *spt;
1664
1665 /* Patch the corresponding protection tags */
1666 scsi_for_each_prot_sg(cmd, sg,
1667 scsi_prot_sg_count(cmd), i) {
1668 num_ent = sg_dma_len(sg) / 8;
1669 if (k + num_ent < blocks_done) {
1670 k += num_ent;
1671 continue;
1672 }
1673 j = blocks_done - k - 1;
1674 k = blocks_done;
1675 break;
1676 }
1677
1678 if (k != blocks_done) {
cfb0919c 1679 ql_log(ql_log_warn, vha, 0x302f,
8ec9c7fb
RD
1680 "unexpected tag values tag:lba=%x:%llx)\n",
1681 e_ref_tag, (unsigned long long)lba_s);
8cb2049c
AE
1682 return 1;
1683 }
1684
1685 spt = page_address(sg_page(sg)) + sg->offset;
1686 spt += j;
1687
1688 spt->app_tag = 0xffff;
1689 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1690 spt->ref_tag = 0xffffffff;
1691 }
1692
1693 return 0;
1694 }
1695
bad75002
AE
1696 /* check guard */
1697 if (e_guard != a_guard) {
1698 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1699 0x10, 0x1);
1700 set_driver_byte(cmd, DRIVER_SENSE);
1701 set_host_byte(cmd, DID_ABORT);
1702 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1703 return 1;
bad75002
AE
1704 }
1705
e02587d7
AE
1706 /* check ref tag */
1707 if (e_ref_tag != a_ref_tag) {
bad75002 1708 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1709 0x10, 0x3);
bad75002
AE
1710 set_driver_byte(cmd, DRIVER_SENSE);
1711 set_host_byte(cmd, DID_ABORT);
1712 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1713 return 1;
bad75002
AE
1714 }
1715
e02587d7
AE
1716 /* check appl tag */
1717 if (e_app_tag != a_app_tag) {
bad75002 1718 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1719 0x10, 0x2);
bad75002
AE
1720 set_driver_byte(cmd, DRIVER_SENSE);
1721 set_host_byte(cmd, DID_ABORT);
1722 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1723 return 1;
bad75002 1724 }
e02587d7 1725
8cb2049c 1726 return 1;
bad75002
AE
1727}
1728
a9b6f722
SK
1729static void
1730qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1731 struct req_que *req, uint32_t index)
1732{
1733 struct qla_hw_data *ha = vha->hw;
1734 srb_t *sp;
1735 uint16_t comp_status;
1736 uint16_t scsi_status;
1737 uint16_t thread_id;
1738 uint32_t rval = EXT_STATUS_OK;
1739 struct fc_bsg_job *bsg_job = NULL;
1740 sts_entry_t *sts;
1741 struct sts_entry_24xx *sts24;
1742 sts = (sts_entry_t *) pkt;
1743 sts24 = (struct sts_entry_24xx *) pkt;
1744
1745 /* Validate handle. */
1746 if (index >= MAX_OUTSTANDING_COMMANDS) {
1747 ql_log(ql_log_warn, vha, 0x70af,
1748 "Invalid SCSI completion handle 0x%x.\n", index);
1749 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1750 return;
1751 }
1752
1753 sp = req->outstanding_cmds[index];
1754 if (sp) {
1755 /* Free outstanding command slot. */
1756 req->outstanding_cmds[index] = NULL;
1757 bsg_job = sp->u.bsg_job;
1758 } else {
1759 ql_log(ql_log_warn, vha, 0x70b0,
1760 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1761 req->id, index);
1762
1763 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1764 return;
1765 }
1766
1767 if (IS_FWI2_CAPABLE(ha)) {
1768 comp_status = le16_to_cpu(sts24->comp_status);
1769 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1770 } else {
1771 comp_status = le16_to_cpu(sts->comp_status);
1772 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1773 }
1774
1775 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1776 switch (comp_status) {
1777 case CS_COMPLETE:
1778 if (scsi_status == 0) {
1779 bsg_job->reply->reply_payload_rcv_len =
1780 bsg_job->reply_payload.payload_len;
1781 rval = EXT_STATUS_OK;
1782 }
1783 goto done;
1784
1785 case CS_DATA_OVERRUN:
1786 ql_dbg(ql_dbg_user, vha, 0x70b1,
1787 "Command completed with date overrun thread_id=%d\n",
1788 thread_id);
1789 rval = EXT_STATUS_DATA_OVERRUN;
1790 break;
1791
1792 case CS_DATA_UNDERRUN:
1793 ql_dbg(ql_dbg_user, vha, 0x70b2,
1794 "Command completed with date underrun thread_id=%d\n",
1795 thread_id);
1796 rval = EXT_STATUS_DATA_UNDERRUN;
1797 break;
1798 case CS_BIDIR_RD_OVERRUN:
1799 ql_dbg(ql_dbg_user, vha, 0x70b3,
1800 "Command completed with read data overrun thread_id=%d\n",
1801 thread_id);
1802 rval = EXT_STATUS_DATA_OVERRUN;
1803 break;
1804
1805 case CS_BIDIR_RD_WR_OVERRUN:
1806 ql_dbg(ql_dbg_user, vha, 0x70b4,
1807 "Command completed with read and write data overrun "
1808 "thread_id=%d\n", thread_id);
1809 rval = EXT_STATUS_DATA_OVERRUN;
1810 break;
1811
1812 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1813 ql_dbg(ql_dbg_user, vha, 0x70b5,
1814 "Command completed with read data over and write data "
1815 "underrun thread_id=%d\n", thread_id);
1816 rval = EXT_STATUS_DATA_OVERRUN;
1817 break;
1818
1819 case CS_BIDIR_RD_UNDERRUN:
1820 ql_dbg(ql_dbg_user, vha, 0x70b6,
1821 "Command completed with read data data underrun "
1822 "thread_id=%d\n", thread_id);
1823 rval = EXT_STATUS_DATA_UNDERRUN;
1824 break;
1825
1826 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1827 ql_dbg(ql_dbg_user, vha, 0x70b7,
1828 "Command completed with read data under and write data "
1829 "overrun thread_id=%d\n", thread_id);
1830 rval = EXT_STATUS_DATA_UNDERRUN;
1831 break;
1832
1833 case CS_BIDIR_RD_WR_UNDERRUN:
1834 ql_dbg(ql_dbg_user, vha, 0x70b8,
1835 "Command completed with read and write data underrun "
1836 "thread_id=%d\n", thread_id);
1837 rval = EXT_STATUS_DATA_UNDERRUN;
1838 break;
1839
1840 case CS_BIDIR_DMA:
1841 ql_dbg(ql_dbg_user, vha, 0x70b9,
1842 "Command completed with data DMA error thread_id=%d\n",
1843 thread_id);
1844 rval = EXT_STATUS_DMA_ERR;
1845 break;
1846
1847 case CS_TIMEOUT:
1848 ql_dbg(ql_dbg_user, vha, 0x70ba,
1849 "Command completed with timeout thread_id=%d\n",
1850 thread_id);
1851 rval = EXT_STATUS_TIMEOUT;
1852 break;
1853 default:
1854 ql_dbg(ql_dbg_user, vha, 0x70bb,
1855 "Command completed with completion status=0x%x "
1856 "thread_id=%d\n", comp_status, thread_id);
1857 rval = EXT_STATUS_ERR;
1858 break;
1859 }
1860 bsg_job->reply->reply_payload_rcv_len = 0;
1861
1862done:
1863 /* Return the vendor specific reply to API */
1864 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1865 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1866 /* Always return DID_OK, bsg will send the vendor specific response
1867 * in this case only */
1868 sp->done(vha, sp, (DID_OK << 6));
1869
1870}
1871
1da177e4
LT
1872/**
1873 * qla2x00_status_entry() - Process a Status IOCB entry.
1874 * @ha: SCSI driver HA context
1875 * @pkt: Entry pointer
1876 */
1877static void
73208dfd 1878qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1879{
1da177e4 1880 srb_t *sp;
1da177e4
LT
1881 fc_port_t *fcport;
1882 struct scsi_cmnd *cp;
9a853f71
AV
1883 sts_entry_t *sts;
1884 struct sts_entry_24xx *sts24;
1da177e4
LT
1885 uint16_t comp_status;
1886 uint16_t scsi_status;
b7d2280c 1887 uint16_t ox_id;
1da177e4
LT
1888 uint8_t lscsi_status;
1889 int32_t resid;
5544213b
AV
1890 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1891 fw_resid_len;
9a853f71 1892 uint8_t *rsp_info, *sense_data;
e315cd28 1893 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1894 uint32_t handle;
1895 uint16_t que;
1896 struct req_que *req;
b7d2280c 1897 int logit = 1;
9ba56b95 1898 int res = 0;
a9b6f722 1899 uint16_t state_flags = 0;
9a853f71
AV
1900
1901 sts = (sts_entry_t *) pkt;
1902 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1903 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1904 comp_status = le16_to_cpu(sts24->comp_status);
1905 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
a9b6f722 1906 state_flags = le16_to_cpu(sts24->state_flags);
9a853f71
AV
1907 } else {
1908 comp_status = le16_to_cpu(sts->comp_status);
1909 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1910 }
2afa19a9
AC
1911 handle = (uint32_t) LSW(sts->handle);
1912 que = MSW(sts->handle);
1913 req = ha->req_q_map[que];
a9083016 1914
1da177e4 1915 /* Validate handle. */
2afa19a9
AC
1916 if (handle < MAX_OUTSTANDING_COMMANDS) {
1917 sp = req->outstanding_cmds[handle];
1da177e4
LT
1918 } else
1919 sp = NULL;
1920
1921 if (sp == NULL) {
cfb0919c 1922 ql_dbg(ql_dbg_io, vha, 0x3017,
7c3df132 1923 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1924
8f7daead
GM
1925 if (IS_QLA82XX(ha))
1926 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1927 else
1928 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1929 qla2xxx_wake_dpc(vha);
1da177e4
LT
1930 return;
1931 }
a9b6f722
SK
1932
1933 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1934 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1935 return;
1936 }
1937
1938 /* Fast path completion. */
1939 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1940 qla2x00_process_completed_request(vha, req, handle);
1941
1942 return;
1943 }
1944
1945 req->outstanding_cmds[handle] = NULL;
9ba56b95 1946 cp = GET_CMD_SP(sp);
1da177e4 1947 if (cp == NULL) {
cfb0919c 1948 ql_dbg(ql_dbg_io, vha, 0x3018,
7c3df132
SK
1949 "Command already returned (0x%x/%p).\n",
1950 sts->handle, sp);
1da177e4
LT
1951
1952 return;
1953 }
1954
9a853f71 1955 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1956
bdf79621 1957 fcport = sp->fcport;
1da177e4 1958
b7d2280c 1959 ox_id = 0;
5544213b
AV
1960 sense_len = par_sense_len = rsp_info_len = resid_len =
1961 fw_resid_len = 0;
e428924c 1962 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
1963 if (scsi_status & SS_SENSE_LEN_VALID)
1964 sense_len = le32_to_cpu(sts24->sense_len);
1965 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1966 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1967 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1968 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1969 if (comp_status == CS_DATA_UNDERRUN)
1970 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1971 rsp_info = sts24->data;
1972 sense_data = sts24->data;
1973 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 1974 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 1975 par_sense_len = sizeof(sts24->data);
9a853f71 1976 } else {
0f00a206
LC
1977 if (scsi_status & SS_SENSE_LEN_VALID)
1978 sense_len = le16_to_cpu(sts->req_sense_length);
1979 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1980 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
1981 resid_len = le32_to_cpu(sts->residual_length);
1982 rsp_info = sts->rsp_info;
1983 sense_data = sts->req_sense_data;
5544213b 1984 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
1985 }
1986
1da177e4
LT
1987 /* Check for any FCP transport errors. */
1988 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1989 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 1990 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 1991 sense_data += rsp_info_len;
5544213b
AV
1992 par_sense_len -= rsp_info_len;
1993 }
9a853f71 1994 if (rsp_info_len > 3 && rsp_info[3]) {
5e19ed90 1995 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
7c3df132
SK
1996 "FCP I/O protocol failure (0x%x/0x%x).\n",
1997 rsp_info_len, rsp_info[3]);
1da177e4 1998
9ba56b95 1999 res = DID_BUS_BUSY << 16;
b7d2280c 2000 goto out;
1da177e4
LT
2001 }
2002 }
2003
3e8ce320
AV
2004 /* Check for overrun. */
2005 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2006 scsi_status & SS_RESIDUAL_OVER)
2007 comp_status = CS_DATA_OVERRUN;
2008
1da177e4
LT
2009 /*
2010 * Based on Host and scsi status generate status code for Linux
2011 */
2012 switch (comp_status) {
2013 case CS_COMPLETE:
df7baa50 2014 case CS_QUEUE_FULL:
1da177e4 2015 if (scsi_status == 0) {
9ba56b95 2016 res = DID_OK << 16;
1da177e4
LT
2017 break;
2018 }
2019 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 2020 resid = resid_len;
385d70b4 2021 scsi_set_resid(cp, resid);
0da69df1
AV
2022
2023 if (!lscsi_status &&
385d70b4 2024 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 2025 cp->underflow)) {
5e19ed90 2026 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
7c3df132 2027 "Mid-layer underflow "
b7d2280c 2028 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2029 resid, scsi_bufflen(cp));
0da69df1 2030
9ba56b95 2031 res = DID_ERROR << 16;
0da69df1
AV
2032 break;
2033 }
1da177e4 2034 }
9ba56b95 2035 res = DID_OK << 16 | lscsi_status;
1da177e4 2036
df7baa50 2037 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2038 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
7c3df132 2039 "QUEUE FULL detected.\n");
df7baa50
AV
2040 break;
2041 }
b7d2280c 2042 logit = 0;
1da177e4
LT
2043 if (lscsi_status != SS_CHECK_CONDITION)
2044 break;
2045
b80ca4f7 2046 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2047 if (!(scsi_status & SS_SENSE_LEN_VALID))
2048 break;
2049
5544213b 2050 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
9ba56b95 2051 rsp, res);
1da177e4
LT
2052 break;
2053
2054 case CS_DATA_UNDERRUN:
ed17c71b 2055 /* Use F/W calculated residual length. */
0f00a206
LC
2056 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2057 scsi_set_resid(cp, resid);
2058 if (scsi_status & SS_RESIDUAL_UNDER) {
2059 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
5e19ed90 2060 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
7c3df132
SK
2061 "Dropped frame(s) detected "
2062 "(0x%x of 0x%x bytes).\n",
2063 resid, scsi_bufflen(cp));
0f00a206 2064
9ba56b95 2065 res = DID_ERROR << 16 | lscsi_status;
4e85e3d9 2066 goto check_scsi_status;
6acf8190 2067 }
ed17c71b 2068
0f00a206
LC
2069 if (!lscsi_status &&
2070 ((unsigned)(scsi_bufflen(cp) - resid) <
2071 cp->underflow)) {
5e19ed90 2072 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
7c3df132 2073 "Mid-layer underflow "
b7d2280c 2074 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2075 resid, scsi_bufflen(cp));
e038a1be 2076
9ba56b95 2077 res = DID_ERROR << 16;
0f00a206
LC
2078 break;
2079 }
4aee5766
GM
2080 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2081 lscsi_status != SAM_STAT_BUSY) {
2082 /*
2083 * scsi status of task set and busy are considered to be
2084 * task not completed.
2085 */
2086
5e19ed90 2087 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
7c3df132 2088 "Dropped frame(s) detected (0x%x "
4aee5766
GM
2089 "of 0x%x bytes).\n", resid,
2090 scsi_bufflen(cp));
0f00a206 2091
9ba56b95 2092 res = DID_ERROR << 16 | lscsi_status;
0374f55e 2093 goto check_scsi_status;
4aee5766
GM
2094 } else {
2095 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2096 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2097 scsi_status, lscsi_status);
1da177e4
LT
2098 }
2099
9ba56b95 2100 res = DID_OK << 16 | lscsi_status;
b7d2280c 2101 logit = 0;
0f00a206 2102
0374f55e 2103check_scsi_status:
1da177e4 2104 /*
fa2a1ce5 2105 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
2106 * Status.
2107 */
2108 if (lscsi_status != 0) {
ffec28a3 2109 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2110 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
7c3df132 2111 "QUEUE FULL detected.\n");
b7d2280c 2112 logit = 1;
ffec28a3
AV
2113 break;
2114 }
1da177e4
LT
2115 if (lscsi_status != SS_CHECK_CONDITION)
2116 break;
2117
b80ca4f7 2118 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2119 if (!(scsi_status & SS_SENSE_LEN_VALID))
2120 break;
2121
5544213b 2122 qla2x00_handle_sense(sp, sense_data, par_sense_len,
9ba56b95 2123 sense_len, rsp, res);
1da177e4
LT
2124 }
2125 break;
2126
1da177e4
LT
2127 case CS_PORT_LOGGED_OUT:
2128 case CS_PORT_CONFIG_CHG:
2129 case CS_PORT_BUSY:
2130 case CS_INCOMPLETE:
2131 case CS_PORT_UNAVAILABLE:
b7d2280c 2132 case CS_TIMEOUT:
ff454b01
CD
2133 case CS_RESET:
2134
056a4483
MC
2135 /*
2136 * We are going to have the fc class block the rport
2137 * while we try to recover so instruct the mid layer
2138 * to requeue until the class decides how to handle this.
2139 */
9ba56b95 2140 res = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
2141
2142 if (comp_status == CS_TIMEOUT) {
2143 if (IS_FWI2_CAPABLE(ha))
2144 break;
2145 else if ((le16_to_cpu(sts->status_flags) &
2146 SF_LOGOUT_SENT) == 0)
2147 break;
2148 }
2149
5e19ed90 2150 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
7c3df132
SK
2151 "Port down status: port-state=0x%x.\n",
2152 atomic_read(&fcport->state));
b7d2280c 2153
a7a28504 2154 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 2155 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
2156 break;
2157
1da177e4 2158 case CS_ABORTED:
9ba56b95 2159 res = DID_RESET << 16;
1da177e4 2160 break;
bad75002
AE
2161
2162 case CS_DIF_ERROR:
8cb2049c 2163 logit = qla2x00_handle_dif_error(sp, sts24);
bad75002 2164 break;
1da177e4 2165 default:
9ba56b95 2166 res = DID_ERROR << 16;
1da177e4
LT
2167 break;
2168 }
2169
b7d2280c
AV
2170out:
2171 if (logit)
5e19ed90 2172 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
7c3df132 2173 "FCP command status: 0x%x-0x%x (0x%x) "
cfb0919c
CD
2174 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2175 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
7c3df132 2176 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
9ba56b95 2177 comp_status, scsi_status, res, vha->host_no,
cfb0919c
CD
2178 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2179 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2180 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2181 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2182 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
7c3df132 2183 resid_len, fw_resid_len);
b7d2280c 2184
2afa19a9 2185 if (rsp->status_srb == NULL)
9ba56b95 2186 sp->done(ha, sp, res);
1da177e4
LT
2187}
2188
2189/**
2190 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2191 * @ha: SCSI driver HA context
2192 * @pkt: Entry pointer
2193 *
2194 * Extended sense data.
2195 */
2196static void
2afa19a9 2197qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4 2198{
9ba56b95 2199 uint8_t sense_sz = 0;
2afa19a9 2200 struct qla_hw_data *ha = rsp->hw;
7c3df132 2201 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
9ba56b95 2202 srb_t *sp = rsp->status_srb;
1da177e4 2203 struct scsi_cmnd *cp;
9ba56b95
GM
2204 uint32_t sense_len;
2205 uint8_t *sense_ptr;
1da177e4 2206
9ba56b95
GM
2207 if (!sp || !GET_CMD_SENSE_LEN(sp))
2208 return;
1da177e4 2209
9ba56b95
GM
2210 sense_len = GET_CMD_SENSE_LEN(sp);
2211 sense_ptr = GET_CMD_SENSE_PTR(sp);
1da177e4 2212
9ba56b95
GM
2213 cp = GET_CMD_SP(sp);
2214 if (cp == NULL) {
2215 ql_log(ql_log_warn, vha, 0x3025,
2216 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1da177e4 2217
9ba56b95
GM
2218 rsp->status_srb = NULL;
2219 return;
1da177e4 2220 }
1da177e4 2221
9ba56b95
GM
2222 if (sense_len > sizeof(pkt->data))
2223 sense_sz = sizeof(pkt->data);
2224 else
2225 sense_sz = sense_len;
c4631191 2226
9ba56b95
GM
2227 /* Move sense data. */
2228 if (IS_FWI2_CAPABLE(ha))
2229 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2230 memcpy(sense_ptr, pkt->data, sense_sz);
2231 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2232 sense_ptr, sense_sz);
c4631191 2233
9ba56b95
GM
2234 sense_len -= sense_sz;
2235 sense_ptr += sense_sz;
c4631191 2236
9ba56b95
GM
2237 SET_CMD_SENSE_PTR(sp, sense_ptr);
2238 SET_CMD_SENSE_LEN(sp, sense_len);
2239
2240 /* Place command on done queue. */
2241 if (sense_len == 0) {
2242 rsp->status_srb = NULL;
2243 sp->done(ha, sp, cp->result);
c4631191 2244 }
c4631191
GM
2245}
2246
1da177e4
LT
2247/**
2248 * qla2x00_error_entry() - Process an error entry.
2249 * @ha: SCSI driver HA context
2250 * @pkt: Entry pointer
2251 */
2252static void
73208dfd 2253qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
2254{
2255 srb_t *sp;
e315cd28 2256 struct qla_hw_data *ha = vha->hw;
c4631191 2257 const char func[] = "ERROR-IOCB";
2afa19a9 2258 uint16_t que = MSW(pkt->handle);
a6fe35c0 2259 struct req_que *req = NULL;
9ba56b95 2260 int res = DID_ERROR << 16;
7c3df132 2261
9ba56b95
GM
2262 ql_dbg(ql_dbg_async, vha, 0x502a,
2263 "type of error status in response: 0x%x\n", pkt->entry_status);
2264
a6fe35c0
AE
2265 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2266 goto fatal;
2267
2268 req = ha->req_q_map[que];
2269
9ba56b95
GM
2270 if (pkt->entry_status & RF_BUSY)
2271 res = DID_BUS_BUSY << 16;
1da177e4 2272
c4631191 2273 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
a6fe35c0 2274 if (sp) {
9ba56b95 2275 sp->done(ha, sp, res);
a6fe35c0 2276 return;
1da177e4 2277 }
a6fe35c0
AE
2278fatal:
2279 ql_log(ql_log_warn, vha, 0x5030,
2280 "Error entry - invalid handle/queue.\n");
2281
2282 if (IS_QLA82XX(ha))
2283 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2284 else
2285 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2286 qla2xxx_wake_dpc(vha);
1da177e4
LT
2287}
2288
9a853f71
AV
2289/**
2290 * qla24xx_mbx_completion() - Process mailbox command completions.
2291 * @ha: SCSI driver HA context
2292 * @mb0: Mailbox0 register
2293 */
2294static void
e315cd28 2295qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
2296{
2297 uint16_t cnt;
4fa94f83 2298 uint32_t mboxes;
9a853f71 2299 uint16_t __iomem *wptr;
e315cd28 2300 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
2301 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2302
4fa94f83
AV
2303 /* Read all mbox registers? */
2304 mboxes = (1 << ha->mbx_count) - 1;
2305 if (!ha->mcp)
2306 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2307 else
2308 mboxes = ha->mcp->in_mb;
2309
9a853f71
AV
2310 /* Load return mailbox registers. */
2311 ha->flags.mbox_int = 1;
2312 ha->mailbox_out[0] = mb0;
4fa94f83 2313 mboxes >>= 1;
9a853f71
AV
2314 wptr = (uint16_t __iomem *)&reg->mailbox1;
2315
2316 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
4fa94f83
AV
2317 if (mboxes & BIT_0)
2318 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2319
2320 mboxes >>= 1;
9a853f71
AV
2321 wptr++;
2322 }
9a853f71
AV
2323}
2324
2325/**
2326 * qla24xx_process_response_queue() - Process response queue entries.
2327 * @ha: SCSI driver HA context
2328 */
2afa19a9
AC
2329void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2330 struct rsp_que *rsp)
9a853f71 2331{
9a853f71 2332 struct sts_entry_24xx *pkt;
a9083016 2333 struct qla_hw_data *ha = vha->hw;
9a853f71 2334
e315cd28 2335 if (!vha->flags.online)
9a853f71
AV
2336 return;
2337
e315cd28
AC
2338 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2339 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2340
e315cd28
AC
2341 rsp->ring_index++;
2342 if (rsp->ring_index == rsp->length) {
2343 rsp->ring_index = 0;
2344 rsp->ring_ptr = rsp->ring;
9a853f71 2345 } else {
e315cd28 2346 rsp->ring_ptr++;
9a853f71
AV
2347 }
2348
2349 if (pkt->entry_status != 0) {
73208dfd 2350 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2d70c103
NB
2351
2352 (void)qlt_24xx_process_response_error(vha, pkt);
2353
9a853f71
AV
2354 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2355 wmb();
2356 continue;
2357 }
2358
2359 switch (pkt->entry_type) {
2360 case STATUS_TYPE:
73208dfd 2361 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2362 break;
2363 case STATUS_CONT_TYPE:
2afa19a9 2364 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2365 break;
2c3dfe3f 2366 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2367 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2368 (struct vp_rpt_id_entry_24xx *)pkt);
2369 break;
ac280b67
AV
2370 case LOGINOUT_PORT_IOCB_TYPE:
2371 qla24xx_logio_entry(vha, rsp->req,
2372 (struct logio_entry_24xx *)pkt);
2373 break;
3822263e
MI
2374 case TSK_MGMT_IOCB_TYPE:
2375 qla24xx_tm_iocb_entry(vha, rsp->req,
2376 (struct tsk_mgmt_entry *)pkt);
2377 break;
9a069e19
GM
2378 case CT_IOCB_TYPE:
2379 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
9a069e19
GM
2380 break;
2381 case ELS_IOCB_TYPE:
2382 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2383 break;
2d70c103
NB
2384 case ABTS_RECV_24XX:
2385 /* ensure that the ATIO queue is empty */
2386 qlt_24xx_process_atio_queue(vha);
2387 case ABTS_RESP_24XX:
2388 case CTIO_TYPE7:
2389 case NOTIFY_ACK_TYPE:
2390 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2391 break;
54883291
SK
2392 case MARKER_TYPE:
2393 /* Do nothing in this case, this check is to prevent it
2394 * from falling into default case
2395 */
2396 break;
9a853f71
AV
2397 default:
2398 /* Type Not Supported. */
7c3df132
SK
2399 ql_dbg(ql_dbg_async, vha, 0x5042,
2400 "Received unknown response pkt type %x "
9a853f71 2401 "entry status=%x.\n",
7c3df132 2402 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2403 break;
2404 }
2405 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2406 wmb();
2407 }
2408
2409 /* Adjust ring index */
a9083016
GM
2410 if (IS_QLA82XX(ha)) {
2411 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2412 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2413 } else
2414 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2415}
2416
05236a05 2417static void
e315cd28 2418qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2419{
2420 int rval;
2421 uint32_t cnt;
e315cd28 2422 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2423 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2424
6246b8a1 2425 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
05236a05
AV
2426 return;
2427
2428 rval = QLA_SUCCESS;
2429 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2430 RD_REG_DWORD(&reg->iobase_addr);
2431 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2432 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2433 rval == QLA_SUCCESS; cnt--) {
2434 if (cnt) {
2435 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2436 udelay(10);
2437 } else
2438 rval = QLA_FUNCTION_TIMEOUT;
2439 }
2440 if (rval == QLA_SUCCESS)
2441 goto next_test;
2442
2443 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2444 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2445 rval == QLA_SUCCESS; cnt--) {
2446 if (cnt) {
2447 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2448 udelay(10);
2449 } else
2450 rval = QLA_FUNCTION_TIMEOUT;
2451 }
2452 if (rval != QLA_SUCCESS)
2453 goto done;
2454
2455next_test:
2456 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2457 ql_log(ql_log_info, vha, 0x504c,
2458 "Additional code -- 0x55AA.\n");
05236a05
AV
2459
2460done:
2461 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2462 RD_REG_DWORD(&reg->iobase_window);
2463}
2464
9a853f71 2465/**
6246b8a1 2466 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
9a853f71
AV
2467 * @irq:
2468 * @dev_id: SCSI driver HA context
9a853f71
AV
2469 *
2470 * Called by system whenever the host adapter generates an interrupt.
2471 *
2472 * Returns handled flag.
2473 */
2474irqreturn_t
7d12e780 2475qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2476{
e315cd28
AC
2477 scsi_qla_host_t *vha;
2478 struct qla_hw_data *ha;
9a853f71
AV
2479 struct device_reg_24xx __iomem *reg;
2480 int status;
9a853f71
AV
2481 unsigned long iter;
2482 uint32_t stat;
2483 uint32_t hccr;
7d613ac6 2484 uint16_t mb[8];
e315cd28 2485 struct rsp_que *rsp;
43fac4d9 2486 unsigned long flags;
9a853f71 2487
e315cd28
AC
2488 rsp = (struct rsp_que *) dev_id;
2489 if (!rsp) {
3256b435
CD
2490 ql_log(ql_log_info, NULL, 0x5059,
2491 "%s: NULL response queue pointer.\n", __func__);
9a853f71
AV
2492 return IRQ_NONE;
2493 }
2494
e315cd28 2495 ha = rsp->hw;
9a853f71
AV
2496 reg = &ha->iobase->isp24;
2497 status = 0;
2498
85880801
AV
2499 if (unlikely(pci_channel_offline(ha->pdev)))
2500 return IRQ_HANDLED;
2501
43fac4d9 2502 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2503 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2504 for (iter = 50; iter--; ) {
2505 stat = RD_REG_DWORD(&reg->host_status);
2506 if (stat & HSRX_RISC_PAUSED) {
85880801 2507 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2508 break;
2509
9a853f71
AV
2510 hccr = RD_REG_DWORD(&reg->hccr);
2511
7c3df132
SK
2512 ql_log(ql_log_warn, vha, 0x504b,
2513 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2514 hccr);
05236a05 2515
e315cd28 2516 qla2xxx_check_risc_status(vha);
05236a05 2517
e315cd28
AC
2518 ha->isp_ops->fw_dump(vha, 1);
2519 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2520 break;
2521 } else if ((stat & HSRX_RISC_INT) == 0)
2522 break;
2523
2524 switch (stat & 0xff) {
2525 case 0x1:
2526 case 0x2:
2527 case 0x10:
2528 case 0x11:
e315cd28 2529 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2530 status |= MBX_INTERRUPT;
2531
2532 break;
2533 case 0x12:
2534 mb[0] = MSW(stat);
2535 mb[1] = RD_REG_WORD(&reg->mailbox1);
2536 mb[2] = RD_REG_WORD(&reg->mailbox2);
2537 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2538 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
2539 break;
2540 case 0x13:
73208dfd 2541 case 0x14:
2afa19a9 2542 qla24xx_process_response_queue(vha, rsp);
9a853f71 2543 break;
2d70c103
NB
2544 case 0x1C: /* ATIO queue updated */
2545 qlt_24xx_process_atio_queue(vha);
2546 break;
2547 case 0x1D: /* ATIO and response queues updated */
2548 qlt_24xx_process_atio_queue(vha);
2549 qla24xx_process_response_queue(vha, rsp);
2550 break;
9a853f71 2551 default:
7c3df132
SK
2552 ql_dbg(ql_dbg_async, vha, 0x504f,
2553 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2554 break;
2555 }
2556 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2557 RD_REG_DWORD_RELAXED(&reg->hccr);
2558 }
43fac4d9 2559 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71
AV
2560
2561 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2562 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 2563 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2564 complete(&ha->mbx_intr_comp);
9a853f71
AV
2565 }
2566
2567 return IRQ_HANDLED;
2568}
2569
a8488abe
AV
2570static irqreturn_t
2571qla24xx_msix_rsp_q(int irq, void *dev_id)
2572{
e315cd28
AC
2573 struct qla_hw_data *ha;
2574 struct rsp_que *rsp;
a8488abe 2575 struct device_reg_24xx __iomem *reg;
2afa19a9 2576 struct scsi_qla_host *vha;
0f19bc68 2577 unsigned long flags;
a8488abe 2578
e315cd28
AC
2579 rsp = (struct rsp_que *) dev_id;
2580 if (!rsp) {
3256b435
CD
2581 ql_log(ql_log_info, NULL, 0x505a,
2582 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2583 return IRQ_NONE;
2584 }
2585 ha = rsp->hw;
a8488abe
AV
2586 reg = &ha->iobase->isp24;
2587
0f19bc68 2588 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2589
a67093d4 2590 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2591 qla24xx_process_response_queue(vha, rsp);
3155754a 2592 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2593 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2594 RD_REG_DWORD_RELAXED(&reg->hccr);
2595 }
0f19bc68 2596 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2597
2598 return IRQ_HANDLED;
2599}
2600
68ca949c
AC
2601static irqreturn_t
2602qla25xx_msix_rsp_q(int irq, void *dev_id)
2603{
2604 struct qla_hw_data *ha;
2605 struct rsp_que *rsp;
3155754a 2606 struct device_reg_24xx __iomem *reg;
0f19bc68 2607 unsigned long flags;
68ca949c
AC
2608
2609 rsp = (struct rsp_que *) dev_id;
2610 if (!rsp) {
3256b435
CD
2611 ql_log(ql_log_info, NULL, 0x505b,
2612 "%s: NULL response queue pointer.\n", __func__);
68ca949c
AC
2613 return IRQ_NONE;
2614 }
2615 ha = rsp->hw;
2616
3155754a 2617 /* Clear the interrupt, if enabled, for this response queue */
d424754c 2618 if (!ha->flags.disable_msix_handshake) {
3155754a 2619 reg = &ha->iobase->isp24;
0f19bc68 2620 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2621 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2622 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2624 }
68ca949c
AC
2625 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2626
2627 return IRQ_HANDLED;
2628}
2629
a8488abe
AV
2630static irqreturn_t
2631qla24xx_msix_default(int irq, void *dev_id)
2632{
e315cd28
AC
2633 scsi_qla_host_t *vha;
2634 struct qla_hw_data *ha;
2635 struct rsp_que *rsp;
a8488abe
AV
2636 struct device_reg_24xx __iomem *reg;
2637 int status;
a8488abe
AV
2638 uint32_t stat;
2639 uint32_t hccr;
7d613ac6 2640 uint16_t mb[8];
0f19bc68 2641 unsigned long flags;
a8488abe 2642
e315cd28
AC
2643 rsp = (struct rsp_que *) dev_id;
2644 if (!rsp) {
3256b435
CD
2645 ql_log(ql_log_info, NULL, 0x505c,
2646 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2647 return IRQ_NONE;
2648 }
2649 ha = rsp->hw;
a8488abe
AV
2650 reg = &ha->iobase->isp24;
2651 status = 0;
2652
0f19bc68 2653 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2654 vha = pci_get_drvdata(ha->pdev);
87f27015 2655 do {
a8488abe
AV
2656 stat = RD_REG_DWORD(&reg->host_status);
2657 if (stat & HSRX_RISC_PAUSED) {
85880801 2658 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2659 break;
2660
a8488abe
AV
2661 hccr = RD_REG_DWORD(&reg->hccr);
2662
7c3df132
SK
2663 ql_log(ql_log_info, vha, 0x5050,
2664 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2665 hccr);
05236a05 2666
e315cd28 2667 qla2xxx_check_risc_status(vha);
05236a05 2668
e315cd28
AC
2669 ha->isp_ops->fw_dump(vha, 1);
2670 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2671 break;
2672 } else if ((stat & HSRX_RISC_INT) == 0)
2673 break;
2674
2675 switch (stat & 0xff) {
2676 case 0x1:
2677 case 0x2:
2678 case 0x10:
2679 case 0x11:
e315cd28 2680 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2681 status |= MBX_INTERRUPT;
2682
2683 break;
2684 case 0x12:
2685 mb[0] = MSW(stat);
2686 mb[1] = RD_REG_WORD(&reg->mailbox1);
2687 mb[2] = RD_REG_WORD(&reg->mailbox2);
2688 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2689 qla2x00_async_event(vha, rsp, mb);
a8488abe
AV
2690 break;
2691 case 0x13:
73208dfd 2692 case 0x14:
2afa19a9 2693 qla24xx_process_response_queue(vha, rsp);
a8488abe 2694 break;
2d70c103
NB
2695 case 0x1C: /* ATIO queue updated */
2696 qlt_24xx_process_atio_queue(vha);
2697 break;
2698 case 0x1D: /* ATIO and response queues updated */
2699 qlt_24xx_process_atio_queue(vha);
2700 qla24xx_process_response_queue(vha, rsp);
2701 break;
a8488abe 2702 default:
7c3df132
SK
2703 ql_dbg(ql_dbg_async, vha, 0x5051,
2704 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2705 break;
2706 }
2707 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2708 } while (0);
0f19bc68 2709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2710
2711 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2712 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 2713 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2714 complete(&ha->mbx_intr_comp);
a8488abe 2715 }
a8488abe
AV
2716 return IRQ_HANDLED;
2717}
2718
2719/* Interrupt handling helpers. */
2720
2721struct qla_init_msix_entry {
a8488abe 2722 const char *name;
476834c2 2723 irq_handler_t handler;
a8488abe
AV
2724};
2725
68ca949c 2726static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2727 { "qla2xxx (default)", qla24xx_msix_default },
2728 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2729 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2730};
2731
a9083016
GM
2732static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2733 { "qla2xxx (default)", qla82xx_msix_default },
2734 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2735};
2736
a8488abe 2737static void
e315cd28 2738qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2739{
2740 int i;
2741 struct qla_msix_entry *qentry;
7c3df132 2742 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2743
73208dfd
AC
2744 for (i = 0; i < ha->msix_count; i++) {
2745 qentry = &ha->msix_entries[i];
a8488abe 2746 if (qentry->have_irq)
73208dfd 2747 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2748 }
2749 pci_disable_msix(ha->pdev);
73208dfd
AC
2750 kfree(ha->msix_entries);
2751 ha->msix_entries = NULL;
2752 ha->flags.msix_enabled = 0;
7c3df132
SK
2753 ql_dbg(ql_dbg_init, vha, 0x0042,
2754 "Disabled the MSI.\n");
a8488abe
AV
2755}
2756
2757static int
73208dfd 2758qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2759{
ad038fa8 2760#define MIN_MSIX_COUNT 2
a8488abe 2761 int i, ret;
73208dfd 2762 struct msix_entry *entries;
a8488abe 2763 struct qla_msix_entry *qentry;
7c3df132 2764 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2765
2766 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2767 GFP_KERNEL);
7c3df132
SK
2768 if (!entries) {
2769 ql_log(ql_log_warn, vha, 0x00bc,
2770 "Failed to allocate memory for msix_entry.\n");
73208dfd 2771 return -ENOMEM;
7c3df132 2772 }
a8488abe 2773
73208dfd
AC
2774 for (i = 0; i < ha->msix_count; i++)
2775 entries[i].entry = i;
a8488abe 2776
73208dfd 2777 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2778 if (ret) {
ad038fa8
LC
2779 if (ret < MIN_MSIX_COUNT)
2780 goto msix_failed;
2781
7c3df132
SK
2782 ql_log(ql_log_warn, vha, 0x00c6,
2783 "MSI-X: Failed to enable support "
2784 "-- %d/%d\n Retry with %d vectors.\n",
2785 ha->msix_count, ret, ret);
73208dfd
AC
2786 ha->msix_count = ret;
2787 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2788 if (ret) {
ad038fa8 2789msix_failed:
7c3df132
SK
2790 ql_log(ql_log_fatal, vha, 0x00c7,
2791 "MSI-X: Failed to enable support, "
2792 "giving up -- %d/%d.\n",
2793 ha->msix_count, ret);
73208dfd
AC
2794 goto msix_out;
2795 }
2afa19a9 2796 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2797 }
2798 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2799 ha->msix_count, GFP_KERNEL);
2800 if (!ha->msix_entries) {
7c3df132
SK
2801 ql_log(ql_log_fatal, vha, 0x00c8,
2802 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2803 ret = -ENOMEM;
a8488abe
AV
2804 goto msix_out;
2805 }
2806 ha->flags.msix_enabled = 1;
2807
73208dfd
AC
2808 for (i = 0; i < ha->msix_count; i++) {
2809 qentry = &ha->msix_entries[i];
2810 qentry->vector = entries[i].vector;
2811 qentry->entry = entries[i].entry;
a8488abe 2812 qentry->have_irq = 0;
73208dfd 2813 qentry->rsp = NULL;
a8488abe
AV
2814 }
2815
2afa19a9
AC
2816 /* Enable MSI-X vectors for the base queue */
2817 for (i = 0; i < 2; i++) {
2818 qentry = &ha->msix_entries[i];
a9083016
GM
2819 if (IS_QLA82XX(ha)) {
2820 ret = request_irq(qentry->vector,
2821 qla82xx_msix_entries[i].handler,
2822 0, qla82xx_msix_entries[i].name, rsp);
2823 } else {
2824 ret = request_irq(qentry->vector,
2825 msix_entries[i].handler,
2826 0, msix_entries[i].name, rsp);
2827 }
2afa19a9 2828 if (ret) {
7c3df132
SK
2829 ql_log(ql_log_fatal, vha, 0x00cb,
2830 "MSI-X: unable to register handler -- %x/%d.\n",
2831 qentry->vector, ret);
2afa19a9
AC
2832 qla24xx_disable_msix(ha);
2833 ha->mqenable = 0;
2834 goto msix_out;
2835 }
2836 qentry->have_irq = 1;
2837 qentry->rsp = rsp;
2838 rsp->msix = qentry;
73208dfd 2839 }
73208dfd
AC
2840
2841 /* Enable MSI-X vector for response queue update for queue 0 */
6246b8a1
GM
2842 if (IS_QLA83XX(ha)) {
2843 if (ha->msixbase && ha->mqiobase &&
2844 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2845 ha->mqenable = 1;
2846 } else
2847 if (ha->mqiobase
2848 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2849 ha->mqenable = 1;
7c3df132
SK
2850 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2851 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2852 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2853 ql_dbg(ql_dbg_init, vha, 0x0055,
2854 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2855 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2856
a8488abe 2857msix_out:
73208dfd 2858 kfree(entries);
a8488abe
AV
2859 return ret;
2860}
2861
2862int
73208dfd 2863qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2864{
2865 int ret;
963b0fdd 2866 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2867 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2868
2869 /* If possible, enable MSI-X. */
6246b8a1
GM
2870 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2871 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
6377a7ae
BH
2872 goto skip_msi;
2873
2874 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2875 (ha->pdev->subsystem_device == 0x7040 ||
2876 ha->pdev->subsystem_device == 0x7041 ||
2877 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2878 ql_log(ql_log_warn, vha, 0x0034,
2879 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2880 ha->pdev->subsystem_vendor,
7c3df132 2881 ha->pdev->subsystem_device);
6377a7ae
BH
2882 goto skip_msi;
2883 }
a8488abe 2884
42cd4f5d 2885 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
7c3df132
SK
2886 ql_log(ql_log_warn, vha, 0x0035,
2887 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
42cd4f5d 2888 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
a8488abe
AV
2889 goto skip_msix;
2890 }
2891
73208dfd 2892 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2893 if (!ret) {
7c3df132
SK
2894 ql_dbg(ql_dbg_init, vha, 0x0036,
2895 "MSI-X: Enabled (0x%X, 0x%X).\n",
2896 ha->chip_revision, ha->fw_attributes);
963b0fdd 2897 goto clear_risc_ints;
a8488abe 2898 }
7c3df132
SK
2899 ql_log(ql_log_info, vha, 0x0037,
2900 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2901skip_msix:
cbedb601 2902
3a03eb79
AV
2903 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2904 !IS_QLA8001(ha))
cbedb601
AV
2905 goto skip_msi;
2906
2907 ret = pci_enable_msi(ha->pdev);
2908 if (!ret) {
7c3df132
SK
2909 ql_dbg(ql_dbg_init, vha, 0x0038,
2910 "MSI: Enabled.\n");
cbedb601 2911 ha->flags.msi_enabled = 1;
a9083016 2912 } else
7c3df132
SK
2913 ql_log(ql_log_warn, vha, 0x0039,
2914 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
a033b655
GM
2915
2916 /* Skip INTx on ISP82xx. */
2917 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2918 return QLA_FUNCTION_FAILED;
2919
cbedb601
AV
2920skip_msi:
2921
fd34f556 2922 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2923 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2924 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2925 if (ret) {
7c3df132 2926 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2927 "Failed to reserve interrupt %d already in use.\n",
2928 ha->pdev->irq);
963b0fdd
AV
2929 goto fail;
2930 }
7992abfc 2931
963b0fdd
AV
2932clear_risc_ints:
2933
3a03eb79
AV
2934 /*
2935 * FIXME: Noted that 8014s were being dropped during NK testing.
2936 * Timing deltas during MSI-X/INTa transitions?
2937 */
6246b8a1 2938 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
3a03eb79 2939 goto fail;
c6952483 2940 spin_lock_irq(&ha->hardware_lock);
963b0fdd
AV
2941 if (IS_FWI2_CAPABLE(ha)) {
2942 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2943 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2944 } else {
2945 WRT_REG_WORD(&reg->isp.semaphore, 0);
2946 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2947 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
a8488abe 2948 }
c6952483 2949 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2950
963b0fdd 2951fail:
a8488abe
AV
2952 return ret;
2953}
2954
2955void
e315cd28 2956qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2957{
e315cd28 2958 struct qla_hw_data *ha = vha->hw;
9a347ff4
CD
2959 struct rsp_que *rsp;
2960
2961 /*
2962 * We need to check that ha->rsp_q_map is valid in case we are called
2963 * from a probe failure context.
2964 */
2965 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2966 return;
2967 rsp = ha->rsp_q_map[0];
a8488abe
AV
2968
2969 if (ha->flags.msix_enabled)
2970 qla24xx_disable_msix(ha);
90a86fc0 2971 else if (ha->flags.msi_enabled) {
e315cd28 2972 free_irq(ha->pdev->irq, rsp);
cbedb601 2973 pci_disable_msi(ha->pdev);
90a86fc0
JC
2974 } else
2975 free_irq(ha->pdev->irq, rsp);
a8488abe 2976}
e315cd28 2977
73208dfd
AC
2978
2979int qla25xx_request_irq(struct rsp_que *rsp)
2980{
2981 struct qla_hw_data *ha = rsp->hw;
2afa19a9 2982 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 2983 struct qla_msix_entry *msix = rsp->msix;
7c3df132 2984 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2985 int ret;
2986
2987 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2988 if (ret) {
7c3df132
SK
2989 ql_log(ql_log_fatal, vha, 0x00e6,
2990 "MSI-X: Unable to register handler -- %x/%d.\n",
2991 msix->vector, ret);
73208dfd
AC
2992 return ret;
2993 }
2994 msix->have_irq = 1;
2995 msix->rsp = rsp;
2996 return ret;
2997}