[SCSI] qla2xxx: Don't process RSCNs for a vport on the same physical adapter.
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
1e63395c 3 * Copyright (c) 2003-2013 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4 9
05236a05 10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
df7baa50 12#include <scsi/scsi_tcq.h>
9a069e19 13#include <scsi/scsi_bsg_fc.h>
bad75002 14#include <scsi/scsi_eh.h>
df7baa50 15
aa230bc5
AE
16#include "qla_target.h"
17
1da177e4 18static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
19static void qla2x00_process_completed_request(struct scsi_qla_host *,
20 struct req_que *, uint32_t);
21static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 22static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
23static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
9a853f71 25
1da177e4
LT
26/**
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
28 * @irq:
29 * @dev_id: SCSI driver HA context
1da177e4
LT
30 *
31 * Called by system whenever the host adapter generates an interrupt.
32 *
33 * Returns handled flag.
34 */
35irqreturn_t
7d12e780 36qla2100_intr_handler(int irq, void *dev_id)
1da177e4 37{
e315cd28
AC
38 scsi_qla_host_t *vha;
39 struct qla_hw_data *ha;
3d71644c 40 struct device_reg_2xxx __iomem *reg;
1da177e4 41 int status;
1da177e4 42 unsigned long iter;
14e660e6 43 uint16_t hccr;
9a853f71 44 uint16_t mb[4];
e315cd28 45 struct rsp_que *rsp;
43fac4d9 46 unsigned long flags;
1da177e4 47
e315cd28
AC
48 rsp = (struct rsp_que *) dev_id;
49 if (!rsp) {
3256b435
CD
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
52 return (IRQ_NONE);
53 }
54
e315cd28 55 ha = rsp->hw;
3d71644c 56 reg = &ha->iobase->isp;
1da177e4
LT
57 status = 0;
58
43fac4d9 59 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 60 vha = pci_get_drvdata(ha->pdev);
1da177e4 61 for (iter = 50; iter--; ) {
14e660e6
SJ
62 hccr = RD_REG_WORD(&reg->hccr);
63 if (hccr & HCCR_RISC_PAUSE) {
64 if (pci_channel_offline(ha->pdev))
65 break;
66
67 /*
68 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 69 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
70 * out of the RISC PAUSED state.
71 */
72 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
73 RD_REG_WORD(&reg->hccr);
74
e315cd28
AC
75 ha->isp_ops->fw_dump(vha, 1);
76 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
77 break;
78 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
79 break;
80
81 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
82 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
83 RD_REG_WORD(&reg->hccr);
84
85 /* Get mailbox data. */
9a853f71
AV
86 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
87 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 88 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 89 status |= MBX_INTERRUPT;
9a853f71
AV
90 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
91 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
92 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
93 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 94 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
95 } else {
96 /*EMPTY*/
7c3df132
SK
97 ql_dbg(ql_dbg_async, vha, 0x5025,
98 "Unrecognized interrupt type (%d).\n",
99 mb[0]);
1da177e4
LT
100 }
101 /* Release mailbox registers. */
102 WRT_REG_WORD(&reg->semaphore, 0);
103 RD_REG_WORD(&reg->semaphore);
104 } else {
73208dfd 105 qla2x00_process_response_queue(rsp);
1da177e4
LT
106
107 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
108 RD_REG_WORD(&reg->hccr);
109 }
110 }
43fac4d9 111 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 112
1da177e4
LT
113 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
114 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 115 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 116 complete(&ha->mbx_intr_comp);
1da177e4
LT
117 }
118
1da177e4
LT
119 return (IRQ_HANDLED);
120}
121
122/**
123 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
124 * @irq:
125 * @dev_id: SCSI driver HA context
1da177e4
LT
126 *
127 * Called by system whenever the host adapter generates an interrupt.
128 *
129 * Returns handled flag.
130 */
131irqreturn_t
7d12e780 132qla2300_intr_handler(int irq, void *dev_id)
1da177e4 133{
e315cd28 134 scsi_qla_host_t *vha;
3d71644c 135 struct device_reg_2xxx __iomem *reg;
1da177e4 136 int status;
1da177e4
LT
137 unsigned long iter;
138 uint32_t stat;
1da177e4 139 uint16_t hccr;
9a853f71 140 uint16_t mb[4];
e315cd28
AC
141 struct rsp_que *rsp;
142 struct qla_hw_data *ha;
43fac4d9 143 unsigned long flags;
1da177e4 144
e315cd28
AC
145 rsp = (struct rsp_que *) dev_id;
146 if (!rsp) {
3256b435
CD
147 ql_log(ql_log_info, NULL, 0x5058,
148 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
149 return (IRQ_NONE);
150 }
151
e315cd28 152 ha = rsp->hw;
3d71644c 153 reg = &ha->iobase->isp;
1da177e4
LT
154 status = 0;
155
43fac4d9 156 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 157 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
158 for (iter = 50; iter--; ) {
159 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
160 if (stat & HSR_RISC_PAUSED) {
85880801 161 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
162 break;
163
1da177e4
LT
164 hccr = RD_REG_WORD(&reg->hccr);
165 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
166 ql_log(ql_log_warn, vha, 0x5026,
167 "Parity error -- HCCR=%x, Dumping "
168 "firmware.\n", hccr);
1da177e4 169 else
7c3df132
SK
170 ql_log(ql_log_warn, vha, 0x5027,
171 "RISC paused -- HCCR=%x, Dumping "
172 "firmware.\n", hccr);
1da177e4
LT
173
174 /*
175 * Issue a "HARD" reset in order for the RISC
176 * interrupt bit to be cleared. Schedule a big
a06a0f8e 177 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
178 */
179 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
180 RD_REG_WORD(&reg->hccr);
07f31805 181
e315cd28
AC
182 ha->isp_ops->fw_dump(vha, 1);
183 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
184 break;
185 } else if ((stat & HSR_RISC_INT) == 0)
186 break;
187
1da177e4 188 switch (stat & 0xff) {
1da177e4
LT
189 case 0x1:
190 case 0x2:
191 case 0x10:
192 case 0x11:
e315cd28 193 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
194 status |= MBX_INTERRUPT;
195
196 /* Release mailbox registers. */
197 WRT_REG_WORD(&reg->semaphore, 0);
198 break;
199 case 0x12:
9a853f71
AV
200 mb[0] = MSW(stat);
201 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
202 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
203 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 204 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
205 break;
206 case 0x13:
73208dfd 207 qla2x00_process_response_queue(rsp);
1da177e4
LT
208 break;
209 case 0x15:
9a853f71
AV
210 mb[0] = MBA_CMPLT_1_16BIT;
211 mb[1] = MSW(stat);
73208dfd 212 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
213 break;
214 case 0x16:
9a853f71
AV
215 mb[0] = MBA_SCSI_COMPLETION;
216 mb[1] = MSW(stat);
217 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 218 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
219 break;
220 default:
7c3df132
SK
221 ql_dbg(ql_dbg_async, vha, 0x5028,
222 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
223 break;
224 }
225 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
226 RD_REG_WORD_RELAXED(&reg->hccr);
227 }
43fac4d9 228 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 229
1da177e4
LT
230 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
231 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 232 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 233 complete(&ha->mbx_intr_comp);
1da177e4
LT
234 }
235
1da177e4
LT
236 return (IRQ_HANDLED);
237}
238
239/**
240 * qla2x00_mbx_completion() - Process mailbox command completions.
241 * @ha: SCSI driver HA context
242 * @mb0: Mailbox0 register
243 */
244static void
e315cd28 245qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
246{
247 uint16_t cnt;
4fa94f83 248 uint32_t mboxes;
1da177e4 249 uint16_t __iomem *wptr;
e315cd28 250 struct qla_hw_data *ha = vha->hw;
3d71644c 251 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 252
4fa94f83
AV
253 /* Read all mbox registers? */
254 mboxes = (1 << ha->mbx_count) - 1;
255 if (!ha->mcp)
a720101d 256 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
4fa94f83
AV
257 else
258 mboxes = ha->mcp->in_mb;
259
1da177e4
LT
260 /* Load return mailbox registers. */
261 ha->flags.mbox_int = 1;
262 ha->mailbox_out[0] = mb0;
4fa94f83 263 mboxes >>= 1;
1da177e4
LT
264 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
265
266 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 267 if (IS_QLA2200(ha) && cnt == 8)
1da177e4 268 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
4fa94f83 269 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
1da177e4 270 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
4fa94f83 271 else if (mboxes & BIT_0)
1da177e4 272 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 273
1da177e4 274 wptr++;
4fa94f83 275 mboxes >>= 1;
1da177e4 276 }
1da177e4
LT
277}
278
8a659571
AV
279static void
280qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
281{
282 static char *event[] =
283 { "Complete", "Request Notification", "Time Extension" };
284 int rval;
285 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
286 uint16_t __iomem *wptr;
287 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
288
289 /* Seed data -- mailbox1 -> mailbox7. */
290 wptr = (uint16_t __iomem *)&reg24->mailbox1;
291 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
292 mb[cnt] = RD_REG_WORD(wptr);
293
7c3df132 294 ql_dbg(ql_dbg_async, vha, 0x5021,
6246b8a1 295 "Inter-Driver Communication %s -- "
7c3df132
SK
296 "%04x %04x %04x %04x %04x %04x %04x.\n",
297 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
298 mb[4], mb[5], mb[6]);
bf5b8ad7
CD
299 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
300 vha->hw->flags.idc_compl_status = 1;
301 if (vha->hw->notify_dcbx_comp)
302 complete(&vha->hw->dcbx_comp);
303 }
8a659571 304
bf5b8ad7
CD
305 /* Acknowledgement needed? [Notify && non-zero timeout]. */
306 timeout = (descr >> 8) & 0xf;
307 if (aen != MBA_IDC_NOTIFY || !timeout)
308 return;
8fcd6b8b 309
bf5b8ad7
CD
310 ql_dbg(ql_dbg_async, vha, 0x5022,
311 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
312 vha->host_no, event[aen & 0xff], timeout);
8a659571
AV
313
314 rval = qla2x00_post_idc_ack_work(vha, mb);
315 if (rval != QLA_SUCCESS)
7c3df132 316 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
317 "IDC failed to post ACK.\n");
318}
319
daae62a3 320#define LS_UNKNOWN 2
d0297c9a
JC
321const char *
322qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
daae62a3 323{
d0297c9a
JC
324 static const char * const link_speeds[] = {
325 "1", "2", "?", "4", "8", "16", "10"
326 };
daae62a3
CD
327
328 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d0297c9a
JC
329 return link_speeds[0];
330 else if (speed == 0x13)
331 return link_speeds[6];
332 else if (speed < 6)
333 return link_speeds[speed];
334 else
335 return link_speeds[LS_UNKNOWN];
daae62a3
CD
336}
337
fa492630 338static void
7d613ac6
SV
339qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
340{
341 struct qla_hw_data *ha = vha->hw;
342
343 /*
344 * 8200 AEN Interpretation:
345 * mb[0] = AEN code
346 * mb[1] = AEN Reason code
347 * mb[2] = LSW of Peg-Halt Status-1 Register
348 * mb[6] = MSW of Peg-Halt Status-1 Register
349 * mb[3] = LSW of Peg-Halt Status-2 register
350 * mb[7] = MSW of Peg-Halt Status-2 register
351 * mb[4] = IDC Device-State Register value
352 * mb[5] = IDC Driver-Presence Register value
353 */
354 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
355 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
356 mb[0], mb[1], mb[2], mb[6]);
357 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
358 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
359 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
360
361 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
362 IDC_HEARTBEAT_FAILURE)) {
363 ha->flags.nic_core_hung = 1;
364 ql_log(ql_log_warn, vha, 0x5060,
365 "83XX: F/W Error Reported: Check if reset required.\n");
366
367 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
368 uint32_t protocol_engine_id, fw_err_code, err_level;
369
370 /*
371 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
372 * - PEG-Halt Status-1 Register:
373 * (LSW = mb[2], MSW = mb[6])
374 * Bits 0-7 = protocol-engine ID
375 * Bits 8-28 = f/w error code
376 * Bits 29-31 = Error-level
377 * Error-level 0x1 = Non-Fatal error
378 * Error-level 0x2 = Recoverable Fatal error
379 * Error-level 0x4 = UnRecoverable Fatal error
380 * - PEG-Halt Status-2 Register:
381 * (LSW = mb[3], MSW = mb[7])
382 */
383 protocol_engine_id = (mb[2] & 0xff);
384 fw_err_code = (((mb[2] & 0xff00) >> 8) |
385 ((mb[6] & 0x1fff) << 8));
386 err_level = ((mb[6] & 0xe000) >> 13);
387 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
388 "Register: protocol_engine_id=0x%x "
389 "fw_err_code=0x%x err_level=0x%x.\n",
390 protocol_engine_id, fw_err_code, err_level);
391 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
392 "Register: 0x%x%x.\n", mb[7], mb[3]);
393 if (err_level == ERR_LEVEL_NON_FATAL) {
394 ql_log(ql_log_warn, vha, 0x5063,
395 "Not a fatal error, f/w has recovered "
396 "iteself.\n");
397 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
398 ql_log(ql_log_fatal, vha, 0x5064,
399 "Recoverable Fatal error: Chip reset "
400 "required.\n");
401 qla83xx_schedule_work(vha,
402 QLA83XX_NIC_CORE_RESET);
403 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
404 ql_log(ql_log_fatal, vha, 0x5065,
405 "Unrecoverable Fatal error: Set FAILED "
406 "state, reboot required.\n");
407 qla83xx_schedule_work(vha,
408 QLA83XX_NIC_CORE_UNRECOVERABLE);
409 }
410 }
411
412 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
413 uint16_t peg_fw_state, nw_interface_link_up;
414 uint16_t nw_interface_signal_detect, sfp_status;
415 uint16_t htbt_counter, htbt_monitor_enable;
416 uint16_t sfp_additonal_info, sfp_multirate;
417 uint16_t sfp_tx_fault, link_speed, dcbx_status;
418
419 /*
420 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
421 * - PEG-to-FC Status Register:
422 * (LSW = mb[2], MSW = mb[6])
423 * Bits 0-7 = Peg-Firmware state
424 * Bit 8 = N/W Interface Link-up
425 * Bit 9 = N/W Interface signal detected
426 * Bits 10-11 = SFP Status
427 * SFP Status 0x0 = SFP+ transceiver not expected
428 * SFP Status 0x1 = SFP+ transceiver not present
429 * SFP Status 0x2 = SFP+ transceiver invalid
430 * SFP Status 0x3 = SFP+ transceiver present and
431 * valid
432 * Bits 12-14 = Heartbeat Counter
433 * Bit 15 = Heartbeat Monitor Enable
434 * Bits 16-17 = SFP Additional Info
435 * SFP info 0x0 = Unregocnized transceiver for
436 * Ethernet
437 * SFP info 0x1 = SFP+ brand validation failed
438 * SFP info 0x2 = SFP+ speed validation failed
439 * SFP info 0x3 = SFP+ access error
440 * Bit 18 = SFP Multirate
441 * Bit 19 = SFP Tx Fault
442 * Bits 20-22 = Link Speed
443 * Bits 23-27 = Reserved
444 * Bits 28-30 = DCBX Status
445 * DCBX Status 0x0 = DCBX Disabled
446 * DCBX Status 0x1 = DCBX Enabled
447 * DCBX Status 0x2 = DCBX Exchange error
448 * Bit 31 = Reserved
449 */
450 peg_fw_state = (mb[2] & 0x00ff);
451 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
452 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
453 sfp_status = ((mb[2] & 0x0c00) >> 10);
454 htbt_counter = ((mb[2] & 0x7000) >> 12);
455 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
456 sfp_additonal_info = (mb[6] & 0x0003);
457 sfp_multirate = ((mb[6] & 0x0004) >> 2);
458 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
459 link_speed = ((mb[6] & 0x0070) >> 4);
460 dcbx_status = ((mb[6] & 0x7000) >> 12);
461
462 ql_log(ql_log_warn, vha, 0x5066,
463 "Peg-to-Fc Status Register:\n"
464 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
465 "nw_interface_signal_detect=0x%x"
466 "\nsfp_statis=0x%x.\n ", peg_fw_state,
467 nw_interface_link_up, nw_interface_signal_detect,
468 sfp_status);
469 ql_log(ql_log_warn, vha, 0x5067,
470 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
471 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
472 htbt_counter, htbt_monitor_enable,
473 sfp_additonal_info, sfp_multirate);
474 ql_log(ql_log_warn, vha, 0x5068,
475 "sfp_tx_fault=0x%x, link_state=0x%x, "
476 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
477 dcbx_status);
478
479 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
480 }
481
482 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
483 ql_log(ql_log_warn, vha, 0x5069,
484 "Heartbeat Failure encountered, chip reset "
485 "required.\n");
486
487 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
488 }
489 }
490
491 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
492 ql_log(ql_log_info, vha, 0x506a,
493 "IDC Device-State changed = 0x%x.\n", mb[4]);
494 qla83xx_schedule_work(vha, MBA_IDC_AEN);
495 }
496}
497
bb4cf5b7
CD
498int
499qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
500{
501 struct qla_hw_data *ha = vha->hw;
502 scsi_qla_host_t *vp;
503 uint32_t vp_did;
504 unsigned long flags;
505 int ret = 0;
506
507 if (!ha->num_vhosts)
508 return ret;
509
510 spin_lock_irqsave(&ha->vport_slock, flags);
511 list_for_each_entry(vp, &ha->vp_list, list) {
512 vp_did = vp->d_id.b24;
513 if (vp_did == rscn_entry) {
514 ret = 1;
515 break;
516 }
517 }
518 spin_unlock_irqrestore(&ha->vport_slock, flags);
519
520 return ret;
521}
522
1da177e4
LT
523/**
524 * qla2x00_async_event() - Process aynchronous events.
525 * @ha: SCSI driver HA context
9a853f71 526 * @mb: Mailbox registers (0 - 3)
1da177e4 527 */
2c3dfe3f 528void
73208dfd 529qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 530{
1da177e4 531 uint16_t handle_cnt;
bdab23da 532 uint16_t cnt, mbx;
1da177e4 533 uint32_t handles[5];
e315cd28 534 struct qla_hw_data *ha = vha->hw;
3d71644c 535 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 536 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 537 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4 538 uint32_t rscn_entry, host_pid;
4d4df193 539 unsigned long flags;
1da177e4
LT
540
541 /* Setup to process RIO completion. */
542 handle_cnt = 0;
6246b8a1 543 if (IS_CNA_CAPABLE(ha))
3a03eb79 544 goto skip_rio;
1da177e4
LT
545 switch (mb[0]) {
546 case MBA_SCSI_COMPLETION:
9a853f71 547 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
548 handle_cnt = 1;
549 break;
550 case MBA_CMPLT_1_16BIT:
9a853f71 551 handles[0] = mb[1];
1da177e4
LT
552 handle_cnt = 1;
553 mb[0] = MBA_SCSI_COMPLETION;
554 break;
555 case MBA_CMPLT_2_16BIT:
9a853f71
AV
556 handles[0] = mb[1];
557 handles[1] = mb[2];
1da177e4
LT
558 handle_cnt = 2;
559 mb[0] = MBA_SCSI_COMPLETION;
560 break;
561 case MBA_CMPLT_3_16BIT:
9a853f71
AV
562 handles[0] = mb[1];
563 handles[1] = mb[2];
564 handles[2] = mb[3];
1da177e4
LT
565 handle_cnt = 3;
566 mb[0] = MBA_SCSI_COMPLETION;
567 break;
568 case MBA_CMPLT_4_16BIT:
9a853f71
AV
569 handles[0] = mb[1];
570 handles[1] = mb[2];
571 handles[2] = mb[3];
1da177e4
LT
572 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
573 handle_cnt = 4;
574 mb[0] = MBA_SCSI_COMPLETION;
575 break;
576 case MBA_CMPLT_5_16BIT:
9a853f71
AV
577 handles[0] = mb[1];
578 handles[1] = mb[2];
579 handles[2] = mb[3];
1da177e4
LT
580 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
581 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
582 handle_cnt = 5;
583 mb[0] = MBA_SCSI_COMPLETION;
584 break;
585 case MBA_CMPLT_2_32BIT:
9a853f71 586 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
587 handles[1] = le32_to_cpu(
588 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
589 RD_MAILBOX_REG(ha, reg, 6));
590 handle_cnt = 2;
591 mb[0] = MBA_SCSI_COMPLETION;
592 break;
593 default:
594 break;
595 }
3a03eb79 596skip_rio:
1da177e4
LT
597 switch (mb[0]) {
598 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 599 if (!vha->flags.online)
1da177e4
LT
600 break;
601
602 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
603 qla2x00_process_completed_request(vha, rsp->req,
604 handles[cnt]);
1da177e4
LT
605 break;
606
607 case MBA_RESET: /* Reset */
7c3df132
SK
608 ql_dbg(ql_dbg_async, vha, 0x5002,
609 "Asynchronous RESET.\n");
1da177e4 610
e315cd28 611 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
612 break;
613
614 case MBA_SYSTEM_ERR: /* System Error */
6246b8a1
GM
615 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
616 RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 617 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
618 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
619 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 620
e315cd28 621 ha->isp_ops->fw_dump(vha, 1);
1da177e4 622
e428924c 623 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 624 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 625 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
626 "Unrecoverable Hardware Error: adapter "
627 "marked OFFLINE!\n");
e315cd28 628 vha->flags.online = 0;
6246b8a1 629 vha->device_flags |= DFLG_DEV_FAILED;
b1d46989 630 } else {
25985edc 631 /* Check to see if MPI timeout occurred */
b1d46989
MI
632 if ((mbx & MBX_3) && (ha->flags.port0))
633 set_bit(MPI_RESET_NEEDED,
634 &vha->dpc_flags);
635
e315cd28 636 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 637 }
9a853f71 638 } else if (mb[1] == 0) {
7c3df132 639 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
640 "Unrecoverable Hardware Error: adapter marked "
641 "OFFLINE!\n");
e315cd28 642 vha->flags.online = 0;
6246b8a1 643 vha->device_flags |= DFLG_DEV_FAILED;
1da177e4 644 } else
e315cd28 645 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
646 break;
647
648 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
649 ql_log(ql_log_warn, vha, 0x5006,
650 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 651
e315cd28 652 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
653 break;
654
655 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
656 ql_log(ql_log_warn, vha, 0x5007,
657 "ISP Response Transfer Error.\n");
1da177e4 658
e315cd28 659 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
660 break;
661
662 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
663 ql_dbg(ql_dbg_async, vha, 0x5008,
664 "Asynchronous WAKEUP_THRES.\n");
1da177e4 665
2d70c103 666 break;
1da177e4 667 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
cfb0919c 668 ql_dbg(ql_dbg_async, vha, 0x5009,
7c3df132 669 "LIP occurred (%x).\n", mb[1]);
1da177e4 670
e315cd28
AC
671 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
672 atomic_set(&vha->loop_state, LOOP_DOWN);
673 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
674 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
675 }
676
e315cd28
AC
677 if (vha->vp_idx) {
678 atomic_set(&vha->vp_state, VP_FAILED);
679 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
680 }
681
e315cd28
AC
682 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
683 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 684
e315cd28
AC
685 vha->flags.management_server_logged_in = 0;
686 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
687 break;
688
689 case MBA_LOOP_UP: /* Loop Up Event */
daae62a3 690 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d8b45213 691 ha->link_data_rate = PORT_SPEED_1GB;
daae62a3 692 else
1da177e4 693 ha->link_data_rate = mb[1];
1da177e4 694
cfb0919c 695 ql_dbg(ql_dbg_async, vha, 0x500a,
daae62a3 696 "LOOP UP detected (%s Gbps).\n",
d0297c9a 697 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1da177e4 698
e315cd28
AC
699 vha->flags.management_server_logged_in = 0;
700 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
701 break;
702
703 case MBA_LOOP_DOWN: /* Loop Down Event */
6246b8a1
GM
704 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
705 ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 706 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
cfb0919c 707 ql_dbg(ql_dbg_async, vha, 0x500b,
7c3df132
SK
708 "LOOP DOWN detected (%x %x %x %x).\n",
709 mb[1], mb[2], mb[3], mbx);
1da177e4 710
e315cd28
AC
711 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
712 atomic_set(&vha->loop_state, LOOP_DOWN);
713 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
714 vha->device_flags |= DFLG_NO_CABLE;
715 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
716 }
717
e315cd28
AC
718 if (vha->vp_idx) {
719 atomic_set(&vha->vp_state, VP_FAILED);
720 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
721 }
722
e315cd28 723 vha->flags.management_server_logged_in = 0;
d8b45213 724 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 725 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
726 break;
727
728 case MBA_LIP_RESET: /* LIP reset occurred */
cfb0919c 729 ql_dbg(ql_dbg_async, vha, 0x500c,
cc3ef7bc 730 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 731
e315cd28
AC
732 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
733 atomic_set(&vha->loop_state, LOOP_DOWN);
734 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
735 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
736 }
737
e315cd28
AC
738 if (vha->vp_idx) {
739 atomic_set(&vha->vp_state, VP_FAILED);
740 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
741 }
742
e315cd28 743 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
744
745 ha->operating_mode = LOOP;
e315cd28
AC
746 vha->flags.management_server_logged_in = 0;
747 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
748 break;
749
3a03eb79 750 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
751 case MBA_POINT_TO_POINT: /* Point-to-Point */
752 if (IS_QLA2100(ha))
753 break;
754
6246b8a1 755 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
7c3df132
SK
756 ql_dbg(ql_dbg_async, vha, 0x500d,
757 "DCBX Completed -- %04x %04x %04x.\n",
758 mb[1], mb[2], mb[3]);
23f2ebd1
SR
759 if (ha->notify_dcbx_comp)
760 complete(&ha->dcbx_comp);
761
762 } else
7c3df132
SK
763 ql_dbg(ql_dbg_async, vha, 0x500e,
764 "Asynchronous P2P MODE received.\n");
1da177e4
LT
765
766 /*
767 * Until there's a transition from loop down to loop up, treat
768 * this as loop down only.
769 */
e315cd28
AC
770 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
771 atomic_set(&vha->loop_state, LOOP_DOWN);
772 if (!atomic_read(&vha->loop_down_timer))
773 atomic_set(&vha->loop_down_timer,
1da177e4 774 LOOP_DOWN_TIME);
e315cd28 775 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
776 }
777
e315cd28
AC
778 if (vha->vp_idx) {
779 atomic_set(&vha->vp_state, VP_FAILED);
780 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
781 }
782
e315cd28
AC
783 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
784 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
785
786 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
787 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
788
789 ha->flags.gpsc_supported = 1;
e315cd28 790 vha->flags.management_server_logged_in = 0;
1da177e4
LT
791 break;
792
793 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
794 if (IS_QLA2100(ha))
795 break;
796
cfb0919c 797 ql_dbg(ql_dbg_async, vha, 0x500f,
1da177e4
LT
798 "Configuration change detected: value=%x.\n", mb[1]);
799
e315cd28
AC
800 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
801 atomic_set(&vha->loop_state, LOOP_DOWN);
802 if (!atomic_read(&vha->loop_down_timer))
803 atomic_set(&vha->loop_down_timer,
1da177e4 804 LOOP_DOWN_TIME);
e315cd28 805 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
806 }
807
e315cd28
AC
808 if (vha->vp_idx) {
809 atomic_set(&vha->vp_state, VP_FAILED);
810 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
811 }
812
e315cd28
AC
813 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
814 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
815 break;
816
817 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
818 /*
819 * Handle only global and vn-port update events
820 *
821 * Relevant inputs:
822 * mb[1] = N_Port handle of changed port
823 * OR 0xffff for global event
824 * mb[2] = New login state
825 * 7 = Port logged out
826 * mb[3] = LSB is vp_idx, 0xff = all vps
827 *
828 * Skip processing if:
829 * Event is global, vp_idx is NOT all vps,
830 * vp_idx does not match
831 * Event is not global, vp_idx does not match
832 */
12cec63e
AV
833 if (IS_QLA2XXX_MIDTYPE(ha) &&
834 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
835 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
836 break;
73208dfd 837
9764ff88
AV
838 /* Global event -- port logout or port unavailable. */
839 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
840 ql_dbg(ql_dbg_async, vha, 0x5010,
841 "Port unavailable %04x %04x %04x.\n",
842 mb[1], mb[2], mb[3]);
daae62a3
CD
843 ql_log(ql_log_warn, vha, 0x505e,
844 "Link is offline.\n");
9764ff88
AV
845
846 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
847 atomic_set(&vha->loop_state, LOOP_DOWN);
848 atomic_set(&vha->loop_down_timer,
849 LOOP_DOWN_TIME);
850 vha->device_flags |= DFLG_NO_CABLE;
851 qla2x00_mark_all_devices_lost(vha, 1);
852 }
853
854 if (vha->vp_idx) {
855 atomic_set(&vha->vp_state, VP_FAILED);
856 fc_vport_set_state(vha->fc_vport,
857 FC_VPORT_FAILED);
faadc5e7 858 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
859 }
860
861 vha->flags.management_server_logged_in = 0;
862 ha->link_data_rate = PORT_SPEED_UNKNOWN;
863 break;
864 }
865
1da177e4 866 /*
cc3ef7bc 867 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
868 * event etc. earlier indicating loop is down) then process
869 * it. Otherwise ignore it and Wait for RSCN to come in.
870 */
e315cd28 871 atomic_set(&vha->loop_down_timer, 0);
79cc785f 872 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
7c3df132
SK
873 ql_dbg(ql_dbg_async, vha, 0x5011,
874 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
875 mb[1], mb[2], mb[3]);
2d70c103
NB
876
877 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
878 break;
879 }
880
7c3df132
SK
881 ql_dbg(ql_dbg_async, vha, 0x5012,
882 "Port database changed %04x %04x %04x.\n",
883 mb[1], mb[2], mb[3]);
daae62a3
CD
884 ql_log(ql_log_warn, vha, 0x505f,
885 "Link is operational (%s Gbps).\n",
d0297c9a 886 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1da177e4
LT
887
888 /*
889 * Mark all devices as missing so we will login again.
890 */
e315cd28 891 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 892
e315cd28 893 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 894
2d70c103
NB
895 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
896 set_bit(SCR_PENDING, &vha->dpc_flags);
897
e315cd28
AC
898 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
899 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2d70c103
NB
900
901 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
902 break;
903
904 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 905 /* Check if the Vport has issued a SCR */
e315cd28 906 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
907 break;
908 /* Only handle SCNs for our Vport index. */
0d6e61bc 909 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 910 break;
0d6e61bc 911
7c3df132
SK
912 ql_dbg(ql_dbg_async, vha, 0x5013,
913 "RSCN database changed -- %04x %04x %04x.\n",
914 mb[1], mb[2], mb[3]);
1da177e4 915
59d72d87 916 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
917 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
918 | vha->d_id.b.al_pa;
1da177e4 919 if (rscn_entry == host_pid) {
7c3df132
SK
920 ql_dbg(ql_dbg_async, vha, 0x5014,
921 "Ignoring RSCN update to local host "
922 "port ID (%06x).\n", host_pid);
1da177e4
LT
923 break;
924 }
925
59d72d87
RA
926 /* Ignore reserved bits from RSCN-payload. */
927 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1da177e4 928
bb4cf5b7
CD
929 /* Skip RSCNs for virtual ports on the same physical port */
930 if (qla2x00_is_a_vp_did(vha, rscn_entry))
931 break;
932
e315cd28
AC
933 atomic_set(&vha->loop_down_timer, 0);
934 vha->flags.management_server_logged_in = 0;
1da177e4 935
e315cd28
AC
936 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
937 set_bit(RSCN_UPDATE, &vha->dpc_flags);
938 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
939 break;
940
941 /* case MBA_RIO_RESPONSE: */
942 case MBA_ZIO_RESPONSE:
7c3df132
SK
943 ql_dbg(ql_dbg_async, vha, 0x5015,
944 "[R|Z]IO update completion.\n");
1da177e4 945
e428924c 946 if (IS_FWI2_CAPABLE(ha))
2afa19a9 947 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 948 else
73208dfd 949 qla2x00_process_response_queue(rsp);
1da177e4 950 break;
9a853f71
AV
951
952 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
953 ql_dbg(ql_dbg_async, vha, 0x5016,
954 "Discard RND Frame -- %04x %04x %04x.\n",
955 mb[1], mb[2], mb[3]);
9a853f71 956 break;
45ebeb56
AV
957
958 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
959 ql_dbg(ql_dbg_async, vha, 0x5017,
960 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 961 break;
4d4df193
HK
962
963 case MBA_ISP84XX_ALERT:
7c3df132
SK
964 ql_dbg(ql_dbg_async, vha, 0x5018,
965 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
966 mb[1], mb[2], mb[3]);
4d4df193
HK
967
968 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
969 switch (mb[1]) {
970 case A84_PANIC_RECOVERY:
7c3df132
SK
971 ql_log(ql_log_info, vha, 0x5019,
972 "Alert 84XX: panic recovery %04x %04x.\n",
973 mb[2], mb[3]);
4d4df193
HK
974 break;
975 case A84_OP_LOGIN_COMPLETE:
976 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
977 ql_log(ql_log_info, vha, 0x501a,
978 "Alert 84XX: firmware version %x.\n",
979 ha->cs84xx->op_fw_version);
4d4df193
HK
980 break;
981 case A84_DIAG_LOGIN_COMPLETE:
982 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
983 ql_log(ql_log_info, vha, 0x501b,
984 "Alert 84XX: diagnostic firmware version %x.\n",
985 ha->cs84xx->diag_fw_version);
4d4df193
HK
986 break;
987 case A84_GOLD_LOGIN_COMPLETE:
988 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
989 ha->cs84xx->fw_update = 1;
7c3df132
SK
990 ql_log(ql_log_info, vha, 0x501c,
991 "Alert 84XX: gold firmware version %x.\n",
992 ha->cs84xx->gold_fw_version);
4d4df193
HK
993 break;
994 default:
7c3df132
SK
995 ql_log(ql_log_warn, vha, 0x501d,
996 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
997 mb[1], mb[2], mb[3]);
998 }
999 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1000 break;
3a03eb79 1001 case MBA_DCBX_START:
7c3df132
SK
1002 ql_dbg(ql_dbg_async, vha, 0x501e,
1003 "DCBX Started -- %04x %04x %04x.\n",
1004 mb[1], mb[2], mb[3]);
3a03eb79
AV
1005 break;
1006 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
1007 ql_dbg(ql_dbg_async, vha, 0x501f,
1008 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1009 mb[1], mb[2], mb[3]);
3a03eb79
AV
1010 break;
1011 case MBA_FCF_CONF_ERR:
7c3df132
SK
1012 ql_dbg(ql_dbg_async, vha, 0x5020,
1013 "FCF Configuration Error -- %04x %04x %04x.\n",
1014 mb[1], mb[2], mb[3]);
3a03eb79 1015 break;
3a03eb79 1016 case MBA_IDC_NOTIFY:
67b2a31f
CD
1017 if (IS_QLA8031(vha->hw)) {
1018 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1019 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1020 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1021 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
8fcd6b8b 1022 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
67b2a31f
CD
1023 /*
1024 * Extend loop down timer since port is active.
1025 */
1026 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1027 atomic_set(&vha->loop_down_timer,
1028 LOOP_DOWN_TIME);
8fcd6b8b
CD
1029 qla2xxx_wake_dpc(vha);
1030 }
67b2a31f 1031 }
8fcd6b8b 1032 case MBA_IDC_COMPLETE:
3a03eb79 1033 case MBA_IDC_TIME_EXT:
bf5b8ad7 1034 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
7d613ac6
SV
1035 qla81xx_idc_event(vha, mb[0], mb[1]);
1036 break;
1037
1038 case MBA_IDC_AEN:
1039 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1040 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1041 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1042 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1043 qla83xx_handle_8200_aen(vha, mb);
3a03eb79 1044 break;
7d613ac6 1045
6246b8a1
GM
1046 default:
1047 ql_dbg(ql_dbg_async, vha, 0x5057,
1048 "Unknown AEN:%04x %04x %04x %04x\n",
1049 mb[0], mb[1], mb[2], mb[3]);
1da177e4 1050 }
2c3dfe3f 1051
2d70c103
NB
1052 qlt_async_event(mb[0], vha, mb);
1053
e315cd28 1054 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 1055 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
1056}
1057
1058/**
1059 * qla2x00_process_completed_request() - Process a Fast Post response.
1060 * @ha: SCSI driver HA context
1061 * @index: SRB index
1062 */
1063static void
73208dfd
AC
1064qla2x00_process_completed_request(struct scsi_qla_host *vha,
1065 struct req_que *req, uint32_t index)
1da177e4
LT
1066{
1067 srb_t *sp;
e315cd28 1068 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1069
1070 /* Validate handle. */
8d93f550 1071 if (index >= req->num_outstanding_cmds) {
7c3df132
SK
1072 ql_log(ql_log_warn, vha, 0x3014,
1073 "Invalid SCSI command index (%x).\n", index);
1da177e4 1074
8f7daead
GM
1075 if (IS_QLA82XX(ha))
1076 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1077 else
1078 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1079 return;
1080 }
1081
e315cd28 1082 sp = req->outstanding_cmds[index];
1da177e4
LT
1083 if (sp) {
1084 /* Free outstanding command slot. */
e315cd28 1085 req->outstanding_cmds[index] = NULL;
1da177e4 1086
1da177e4 1087 /* Save ISP completion status */
9ba56b95 1088 sp->done(ha, sp, DID_OK << 16);
1da177e4 1089 } else {
7c3df132 1090 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 1091
8f7daead
GM
1092 if (IS_QLA82XX(ha))
1093 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1094 else
1095 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1096 }
1097}
1098
ac280b67
AV
1099static srb_t *
1100qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1101 struct req_que *req, void *iocb)
1102{
1103 struct qla_hw_data *ha = vha->hw;
1104 sts_entry_t *pkt = iocb;
1105 srb_t *sp = NULL;
1106 uint16_t index;
1107
1108 index = LSW(pkt->handle);
8d93f550 1109 if (index >= req->num_outstanding_cmds) {
7c3df132
SK
1110 ql_log(ql_log_warn, vha, 0x5031,
1111 "Invalid command index (%x).\n", index);
8f7daead
GM
1112 if (IS_QLA82XX(ha))
1113 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1114 else
1115 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
1116 goto done;
1117 }
1118 sp = req->outstanding_cmds[index];
1119 if (!sp) {
7c3df132
SK
1120 ql_log(ql_log_warn, vha, 0x5032,
1121 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
1122 return sp;
1123 }
1124 if (sp->handle != index) {
7c3df132
SK
1125 ql_log(ql_log_warn, vha, 0x5033,
1126 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
1127 return NULL;
1128 }
9a069e19 1129
ac280b67 1130 req->outstanding_cmds[index] = NULL;
9a069e19 1131
ac280b67
AV
1132done:
1133 return sp;
1134}
1135
1136static void
1137qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1138 struct mbx_entry *mbx)
1139{
1140 const char func[] = "MBX-IOCB";
1141 const char *type;
ac280b67
AV
1142 fc_port_t *fcport;
1143 srb_t *sp;
4916392b 1144 struct srb_iocb *lio;
99b0bec7 1145 uint16_t *data;
5ff1d584 1146 uint16_t status;
ac280b67
AV
1147
1148 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1149 if (!sp)
1150 return;
1151
9ba56b95
GM
1152 lio = &sp->u.iocb_cmd;
1153 type = sp->name;
ac280b67 1154 fcport = sp->fcport;
4916392b 1155 data = lio->u.logio.data;
ac280b67 1156
5ff1d584 1157 data[0] = MBS_COMMAND_ERROR;
4916392b 1158 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1159 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1160 if (mbx->entry_status) {
7c3df132 1161 ql_dbg(ql_dbg_async, vha, 0x5043,
cfb0919c 1162 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
d3fa9e7d 1163 "entry-status=%x status=%x state-flag=%x "
cfb0919c
CD
1164 "status-flags=%x.\n", type, sp->handle,
1165 fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
1166 fcport->d_id.b.al_pa, mbx->entry_status,
1167 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 1168 le16_to_cpu(mbx->status_flags));
d3fa9e7d 1169
cfb0919c 1170 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
7c3df132 1171 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 1172
99b0bec7 1173 goto logio_done;
ac280b67
AV
1174 }
1175
5ff1d584 1176 status = le16_to_cpu(mbx->status);
9ba56b95 1177 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
5ff1d584
AV
1178 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1179 status = 0;
1180 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132 1181 ql_dbg(ql_dbg_async, vha, 0x5045,
cfb0919c
CD
1182 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1183 type, sp->handle, fcport->d_id.b.domain,
1184 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1185 le16_to_cpu(mbx->mb1));
ac280b67
AV
1186
1187 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1188 if (sp->type == SRB_LOGIN_CMD) {
99b0bec7
AV
1189 fcport->port_type = FCT_TARGET;
1190 if (le16_to_cpu(mbx->mb1) & BIT_0)
1191 fcport->port_type = FCT_INITIATOR;
6ac52608 1192 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 1193 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 1194 }
99b0bec7 1195 goto logio_done;
ac280b67
AV
1196 }
1197
1198 data[0] = le16_to_cpu(mbx->mb0);
1199 switch (data[0]) {
1200 case MBS_PORT_ID_USED:
1201 data[1] = le16_to_cpu(mbx->mb1);
1202 break;
1203 case MBS_LOOP_ID_USED:
1204 break;
1205 default:
1206 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1207 break;
1208 }
1209
7c3df132 1210 ql_log(ql_log_warn, vha, 0x5046,
cfb0919c
CD
1211 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1212 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1213 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1214 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
ac280b67 1215 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 1216 le16_to_cpu(mbx->mb7));
ac280b67 1217
99b0bec7 1218logio_done:
9ba56b95 1219 sp->done(vha, sp, 0);
ac280b67
AV
1220}
1221
9bc4f4fb
HZ
1222static void
1223qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1224 sts_entry_t *pkt, int iocb_type)
1225{
1226 const char func[] = "CT_IOCB";
1227 const char *type;
9bc4f4fb 1228 srb_t *sp;
9bc4f4fb
HZ
1229 struct fc_bsg_job *bsg_job;
1230 uint16_t comp_status;
9ba56b95 1231 int res;
9bc4f4fb
HZ
1232
1233 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1234 if (!sp)
1235 return;
1236
9ba56b95 1237 bsg_job = sp->u.bsg_job;
9bc4f4fb 1238
9ba56b95 1239 type = "ct pass-through";
9bc4f4fb
HZ
1240
1241 comp_status = le16_to_cpu(pkt->comp_status);
1242
1243 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1244 * fc payload to the caller
1245 */
1246 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1247 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1248
1249 if (comp_status != CS_COMPLETE) {
1250 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1251 res = DID_OK << 16;
9bc4f4fb
HZ
1252 bsg_job->reply->reply_payload_rcv_len =
1253 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1254
7c3df132
SK
1255 ql_log(ql_log_warn, vha, 0x5048,
1256 "CT pass-through-%s error "
9bc4f4fb 1257 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1258 type, comp_status,
1259 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1260 } else {
7c3df132
SK
1261 ql_log(ql_log_warn, vha, 0x5049,
1262 "CT pass-through-%s error "
1263 "comp_status-status=0x%x.\n", type, comp_status);
9ba56b95 1264 res = DID_ERROR << 16;
9bc4f4fb
HZ
1265 bsg_job->reply->reply_payload_rcv_len = 0;
1266 }
cfb0919c 1267 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
7c3df132 1268 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1269 } else {
9ba56b95 1270 res = DID_OK << 16;
9bc4f4fb
HZ
1271 bsg_job->reply->reply_payload_rcv_len =
1272 bsg_job->reply_payload.payload_len;
1273 bsg_job->reply_len = 0;
1274 }
1275
9ba56b95 1276 sp->done(vha, sp, res);
9bc4f4fb
HZ
1277}
1278
9a069e19
GM
1279static void
1280qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1281 struct sts_entry_24xx *pkt, int iocb_type)
1282{
1283 const char func[] = "ELS_CT_IOCB";
1284 const char *type;
9a069e19 1285 srb_t *sp;
9a069e19
GM
1286 struct fc_bsg_job *bsg_job;
1287 uint16_t comp_status;
1288 uint32_t fw_status[3];
1289 uint8_t* fw_sts_ptr;
9ba56b95 1290 int res;
9a069e19
GM
1291
1292 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1293 if (!sp)
1294 return;
9ba56b95 1295 bsg_job = sp->u.bsg_job;
9a069e19
GM
1296
1297 type = NULL;
9ba56b95 1298 switch (sp->type) {
9a069e19
GM
1299 case SRB_ELS_CMD_RPT:
1300 case SRB_ELS_CMD_HST:
1301 type = "els";
1302 break;
1303 case SRB_CT_CMD:
1304 type = "ct pass-through";
1305 break;
1306 default:
37fed3ee 1307 ql_dbg(ql_dbg_user, vha, 0x503e,
9ba56b95 1308 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
9a069e19
GM
1309 return;
1310 }
1311
1312 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1313 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1314 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1315
1316 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1317 * fc payload to the caller
1318 */
1319 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1320 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1321
1322 if (comp_status != CS_COMPLETE) {
1323 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1324 res = DID_OK << 16;
9a069e19 1325 bsg_job->reply->reply_payload_rcv_len =
9ba56b95 1326 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
9a069e19 1327
37fed3ee 1328 ql_dbg(ql_dbg_user, vha, 0x503f,
cfb0919c 1329 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1330 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
cfb0919c 1331 type, sp->handle, comp_status, fw_status[1], fw_status[2],
7c3df132
SK
1332 le16_to_cpu(((struct els_sts_entry_24xx *)
1333 pkt)->total_byte_count));
9a069e19
GM
1334 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1335 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1336 }
1337 else {
37fed3ee 1338 ql_dbg(ql_dbg_user, vha, 0x5040,
cfb0919c 1339 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1340 "error subcode 1=0x%x error subcode 2=0x%x.\n",
cfb0919c 1341 type, sp->handle, comp_status,
7c3df132
SK
1342 le16_to_cpu(((struct els_sts_entry_24xx *)
1343 pkt)->error_subcode_1),
1344 le16_to_cpu(((struct els_sts_entry_24xx *)
1345 pkt)->error_subcode_2));
9ba56b95 1346 res = DID_ERROR << 16;
9a069e19
GM
1347 bsg_job->reply->reply_payload_rcv_len = 0;
1348 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1349 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1350 }
37fed3ee 1351 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
7c3df132 1352 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1353 }
1354 else {
9ba56b95 1355 res = DID_OK << 16;
9a069e19
GM
1356 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1357 bsg_job->reply_len = 0;
1358 }
1359
9ba56b95 1360 sp->done(vha, sp, res);
9a069e19
GM
1361}
1362
ac280b67
AV
1363static void
1364qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1365 struct logio_entry_24xx *logio)
1366{
1367 const char func[] = "LOGIO-IOCB";
1368 const char *type;
ac280b67
AV
1369 fc_port_t *fcport;
1370 srb_t *sp;
4916392b 1371 struct srb_iocb *lio;
99b0bec7 1372 uint16_t *data;
ac280b67
AV
1373 uint32_t iop[2];
1374
1375 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1376 if (!sp)
1377 return;
1378
9ba56b95
GM
1379 lio = &sp->u.iocb_cmd;
1380 type = sp->name;
ac280b67 1381 fcport = sp->fcport;
4916392b 1382 data = lio->u.logio.data;
ac280b67 1383
5ff1d584 1384 data[0] = MBS_COMMAND_ERROR;
4916392b 1385 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1386 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1387 if (logio->entry_status) {
5e19ed90 1388 ql_log(ql_log_warn, fcport->vha, 0x5034,
cfb0919c 1389 "Async-%s error entry - hdl=%x"
d3fa9e7d 1390 "portid=%02x%02x%02x entry-status=%x.\n",
cfb0919c
CD
1391 type, sp->handle, fcport->d_id.b.domain,
1392 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1393 logio->entry_status);
1394 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
7c3df132 1395 (uint8_t *)logio, sizeof(*logio));
ac280b67 1396
99b0bec7 1397 goto logio_done;
ac280b67
AV
1398 }
1399
1400 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
5e19ed90 1401 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
cfb0919c
CD
1402 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1403 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1404 fcport->d_id.b.area, fcport->d_id.b.al_pa,
7c3df132 1405 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1406
1407 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1408 if (sp->type != SRB_LOGIN_CMD)
99b0bec7 1409 goto logio_done;
ac280b67
AV
1410
1411 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1412 if (iop[0] & BIT_4) {
1413 fcport->port_type = FCT_TARGET;
1414 if (iop[0] & BIT_8)
8474f3a0 1415 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1416 } else if (iop[0] & BIT_5)
ac280b67 1417 fcport->port_type = FCT_INITIATOR;
b0cd579c 1418
2d70c103
NB
1419 if (iop[0] & BIT_7)
1420 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1421
ac280b67
AV
1422 if (logio->io_parameter[7] || logio->io_parameter[8])
1423 fcport->supported_classes |= FC_COS_CLASS2;
1424 if (logio->io_parameter[9] || logio->io_parameter[10])
1425 fcport->supported_classes |= FC_COS_CLASS3;
1426
99b0bec7 1427 goto logio_done;
ac280b67
AV
1428 }
1429
1430 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1431 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1432 switch (iop[0]) {
1433 case LSC_SCODE_PORTID_USED:
1434 data[0] = MBS_PORT_ID_USED;
1435 data[1] = LSW(iop[1]);
1436 break;
1437 case LSC_SCODE_NPORT_USED:
1438 data[0] = MBS_LOOP_ID_USED;
1439 break;
ac280b67
AV
1440 default:
1441 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1442 break;
1443 }
1444
5e19ed90 1445 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
cfb0919c
CD
1446 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1447 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
d3fa9e7d 1448 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1449 le16_to_cpu(logio->comp_status),
1450 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1451 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1452
99b0bec7 1453logio_done:
9ba56b95 1454 sp->done(vha, sp, 0);
ac280b67
AV
1455}
1456
3822263e
MI
1457static void
1458qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1459 struct tsk_mgmt_entry *tsk)
1460{
1461 const char func[] = "TMF-IOCB";
1462 const char *type;
1463 fc_port_t *fcport;
1464 srb_t *sp;
1465 struct srb_iocb *iocb;
3822263e
MI
1466 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1467 int error = 1;
1468
1469 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1470 if (!sp)
1471 return;
1472
9ba56b95
GM
1473 iocb = &sp->u.iocb_cmd;
1474 type = sp->name;
3822263e
MI
1475 fcport = sp->fcport;
1476
1477 if (sts->entry_status) {
5e19ed90 1478 ql_log(ql_log_warn, fcport->vha, 0x5038,
cfb0919c
CD
1479 "Async-%s error - hdl=%x entry-status(%x).\n",
1480 type, sp->handle, sts->entry_status);
3822263e 1481 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
5e19ed90 1482 ql_log(ql_log_warn, fcport->vha, 0x5039,
cfb0919c
CD
1483 "Async-%s error - hdl=%x completion status(%x).\n",
1484 type, sp->handle, sts->comp_status);
3822263e
MI
1485 } else if (!(le16_to_cpu(sts->scsi_status) &
1486 SS_RESPONSE_INFO_LEN_VALID)) {
5e19ed90 1487 ql_log(ql_log_warn, fcport->vha, 0x503a,
cfb0919c
CD
1488 "Async-%s error - hdl=%x no response info(%x).\n",
1489 type, sp->handle, sts->scsi_status);
3822263e 1490 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
5e19ed90 1491 ql_log(ql_log_warn, fcport->vha, 0x503b,
cfb0919c
CD
1492 "Async-%s error - hdl=%x not enough response(%d).\n",
1493 type, sp->handle, sts->rsp_data_len);
3822263e 1494 } else if (sts->data[3]) {
5e19ed90 1495 ql_log(ql_log_warn, fcport->vha, 0x503c,
cfb0919c
CD
1496 "Async-%s error - hdl=%x response(%x).\n",
1497 type, sp->handle, sts->data[3]);
3822263e
MI
1498 } else {
1499 error = 0;
1500 }
1501
1502 if (error) {
1503 iocb->u.tmf.data = error;
7c3df132
SK
1504 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1505 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1506 }
1507
9ba56b95 1508 sp->done(vha, sp, 0);
3822263e
MI
1509}
1510
1da177e4
LT
1511/**
1512 * qla2x00_process_response_queue() - Process response queue entries.
1513 * @ha: SCSI driver HA context
1514 */
1515void
73208dfd 1516qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1517{
73208dfd
AC
1518 struct scsi_qla_host *vha;
1519 struct qla_hw_data *ha = rsp->hw;
3d71644c 1520 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1521 sts_entry_t *pkt;
1522 uint16_t handle_cnt;
1523 uint16_t cnt;
73208dfd 1524
2afa19a9 1525 vha = pci_get_drvdata(ha->pdev);
1da177e4 1526
e315cd28 1527 if (!vha->flags.online)
1da177e4
LT
1528 return;
1529
e315cd28
AC
1530 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1531 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1532
e315cd28
AC
1533 rsp->ring_index++;
1534 if (rsp->ring_index == rsp->length) {
1535 rsp->ring_index = 0;
1536 rsp->ring_ptr = rsp->ring;
1da177e4 1537 } else {
e315cd28 1538 rsp->ring_ptr++;
1da177e4
LT
1539 }
1540
1541 if (pkt->entry_status != 0) {
73208dfd 1542 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1543 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1544 wmb();
1545 continue;
1546 }
1547
1548 switch (pkt->entry_type) {
1549 case STATUS_TYPE:
73208dfd 1550 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1551 break;
1552 case STATUS_TYPE_21:
1553 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1554 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1555 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1556 ((sts21_entry_t *)pkt)->handle[cnt]);
1557 }
1558 break;
1559 case STATUS_TYPE_22:
1560 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1561 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1562 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1563 ((sts22_entry_t *)pkt)->handle[cnt]);
1564 }
1565 break;
1566 case STATUS_CONT_TYPE:
2afa19a9 1567 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1568 break;
ac280b67
AV
1569 case MBX_IOCB_TYPE:
1570 qla2x00_mbx_iocb_entry(vha, rsp->req,
1571 (struct mbx_entry *)pkt);
3822263e 1572 break;
9bc4f4fb
HZ
1573 case CT_IOCB_TYPE:
1574 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1575 break;
1da177e4
LT
1576 default:
1577 /* Type Not Supported. */
7c3df132
SK
1578 ql_log(ql_log_warn, vha, 0x504a,
1579 "Received unknown response pkt type %x "
1da177e4 1580 "entry status=%x.\n",
7c3df132 1581 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1582 break;
1583 }
1584 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1585 wmb();
1586 }
1587
1588 /* Adjust ring index */
e315cd28 1589 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1590}
1591
4733fcb1 1592static inline void
5544213b 1593qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
9ba56b95 1594 uint32_t sense_len, struct rsp_que *rsp, int res)
4733fcb1 1595{
7c3df132 1596 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95
GM
1597 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1598 uint32_t track_sense_len;
4733fcb1
AV
1599
1600 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1601 sense_len = SCSI_SENSE_BUFFERSIZE;
1602
9ba56b95
GM
1603 SET_CMD_SENSE_LEN(sp, sense_len);
1604 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1605 track_sense_len = sense_len;
1606
1607 if (sense_len > par_sense_len)
5544213b 1608 sense_len = par_sense_len;
4733fcb1
AV
1609
1610 memcpy(cp->sense_buffer, sense_data, sense_len);
1611
9ba56b95
GM
1612 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1613 track_sense_len -= sense_len;
1614 SET_CMD_SENSE_LEN(sp, track_sense_len);
1615
1616 if (track_sense_len != 0) {
2afa19a9 1617 rsp->status_srb = sp;
9ba56b95
GM
1618 cp->result = res;
1619 }
4733fcb1 1620
cfb0919c
CD
1621 if (sense_len) {
1622 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1623 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1624 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1625 cp);
7c3df132
SK
1626 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1627 cp->sense_buffer, sense_len);
cfb0919c 1628 }
4733fcb1
AV
1629}
1630
bad75002
AE
1631struct scsi_dif_tuple {
1632 __be16 guard; /* Checksum */
d6a03581 1633 __be16 app_tag; /* APPL identifier */
bad75002
AE
1634 __be32 ref_tag; /* Target LBA or indirect LBA */
1635};
1636
1637/*
1638 * Checks the guard or meta-data for the type of error
1639 * detected by the HBA. In case of errors, we set the
1640 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1641 * to indicate to the kernel that the HBA detected error.
1642 */
8cb2049c 1643static inline int
bad75002
AE
1644qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1645{
7c3df132 1646 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95 1647 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
8cb2049c
AE
1648 uint8_t *ap = &sts24->data[12];
1649 uint8_t *ep = &sts24->data[20];
bad75002
AE
1650 uint32_t e_ref_tag, a_ref_tag;
1651 uint16_t e_app_tag, a_app_tag;
1652 uint16_t e_guard, a_guard;
1653
8cb2049c
AE
1654 /*
1655 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1656 * would make guard field appear at offset 2
1657 */
1658 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1659 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1660 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1661 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1662 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1663 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1664
7c3df132
SK
1665 ql_dbg(ql_dbg_io, vha, 0x3023,
1666 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1667
7c3df132
SK
1668 ql_dbg(ql_dbg_io, vha, 0x3024,
1669 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1670 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1671 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1672 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1673 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1674
8cb2049c
AE
1675 /*
1676 * Ignore sector if:
1677 * For type 3: ref & app tag is all 'f's
1678 * For type 0,1,2: app tag is all 'f's
1679 */
1680 if ((a_app_tag == 0xffff) &&
1681 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1682 (a_ref_tag == 0xffffffff))) {
1683 uint32_t blocks_done, resid;
1684 sector_t lba_s = scsi_get_lba(cmd);
1685
1686 /* 2TB boundary case covered automatically with this */
1687 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1688
1689 resid = scsi_bufflen(cmd) - (blocks_done *
1690 cmd->device->sector_size);
1691
1692 scsi_set_resid(cmd, resid);
1693 cmd->result = DID_OK << 16;
1694
1695 /* Update protection tag */
1696 if (scsi_prot_sg_count(cmd)) {
1697 uint32_t i, j = 0, k = 0, num_ent;
1698 struct scatterlist *sg;
1699 struct sd_dif_tuple *spt;
1700
1701 /* Patch the corresponding protection tags */
1702 scsi_for_each_prot_sg(cmd, sg,
1703 scsi_prot_sg_count(cmd), i) {
1704 num_ent = sg_dma_len(sg) / 8;
1705 if (k + num_ent < blocks_done) {
1706 k += num_ent;
1707 continue;
1708 }
1709 j = blocks_done - k - 1;
1710 k = blocks_done;
1711 break;
1712 }
1713
1714 if (k != blocks_done) {
cfb0919c 1715 ql_log(ql_log_warn, vha, 0x302f,
8ec9c7fb
RD
1716 "unexpected tag values tag:lba=%x:%llx)\n",
1717 e_ref_tag, (unsigned long long)lba_s);
8cb2049c
AE
1718 return 1;
1719 }
1720
1721 spt = page_address(sg_page(sg)) + sg->offset;
1722 spt += j;
1723
1724 spt->app_tag = 0xffff;
1725 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1726 spt->ref_tag = 0xffffffff;
1727 }
1728
1729 return 0;
1730 }
1731
bad75002
AE
1732 /* check guard */
1733 if (e_guard != a_guard) {
1734 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1735 0x10, 0x1);
1736 set_driver_byte(cmd, DRIVER_SENSE);
1737 set_host_byte(cmd, DID_ABORT);
1738 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1739 return 1;
bad75002
AE
1740 }
1741
e02587d7
AE
1742 /* check ref tag */
1743 if (e_ref_tag != a_ref_tag) {
bad75002 1744 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1745 0x10, 0x3);
bad75002
AE
1746 set_driver_byte(cmd, DRIVER_SENSE);
1747 set_host_byte(cmd, DID_ABORT);
1748 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1749 return 1;
bad75002
AE
1750 }
1751
e02587d7
AE
1752 /* check appl tag */
1753 if (e_app_tag != a_app_tag) {
bad75002 1754 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1755 0x10, 0x2);
bad75002
AE
1756 set_driver_byte(cmd, DRIVER_SENSE);
1757 set_host_byte(cmd, DID_ABORT);
1758 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1759 return 1;
bad75002 1760 }
e02587d7 1761
8cb2049c 1762 return 1;
bad75002
AE
1763}
1764
a9b6f722
SK
1765static void
1766qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1767 struct req_que *req, uint32_t index)
1768{
1769 struct qla_hw_data *ha = vha->hw;
1770 srb_t *sp;
1771 uint16_t comp_status;
1772 uint16_t scsi_status;
1773 uint16_t thread_id;
1774 uint32_t rval = EXT_STATUS_OK;
1775 struct fc_bsg_job *bsg_job = NULL;
1776 sts_entry_t *sts;
1777 struct sts_entry_24xx *sts24;
1778 sts = (sts_entry_t *) pkt;
1779 sts24 = (struct sts_entry_24xx *) pkt;
1780
1781 /* Validate handle. */
8d93f550 1782 if (index >= req->num_outstanding_cmds) {
a9b6f722
SK
1783 ql_log(ql_log_warn, vha, 0x70af,
1784 "Invalid SCSI completion handle 0x%x.\n", index);
1785 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1786 return;
1787 }
1788
1789 sp = req->outstanding_cmds[index];
1790 if (sp) {
1791 /* Free outstanding command slot. */
1792 req->outstanding_cmds[index] = NULL;
1793 bsg_job = sp->u.bsg_job;
1794 } else {
1795 ql_log(ql_log_warn, vha, 0x70b0,
1796 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1797 req->id, index);
1798
1799 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1800 return;
1801 }
1802
1803 if (IS_FWI2_CAPABLE(ha)) {
1804 comp_status = le16_to_cpu(sts24->comp_status);
1805 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1806 } else {
1807 comp_status = le16_to_cpu(sts->comp_status);
1808 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1809 }
1810
1811 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1812 switch (comp_status) {
1813 case CS_COMPLETE:
1814 if (scsi_status == 0) {
1815 bsg_job->reply->reply_payload_rcv_len =
1816 bsg_job->reply_payload.payload_len;
1817 rval = EXT_STATUS_OK;
1818 }
1819 goto done;
1820
1821 case CS_DATA_OVERRUN:
1822 ql_dbg(ql_dbg_user, vha, 0x70b1,
1823 "Command completed with date overrun thread_id=%d\n",
1824 thread_id);
1825 rval = EXT_STATUS_DATA_OVERRUN;
1826 break;
1827
1828 case CS_DATA_UNDERRUN:
1829 ql_dbg(ql_dbg_user, vha, 0x70b2,
1830 "Command completed with date underrun thread_id=%d\n",
1831 thread_id);
1832 rval = EXT_STATUS_DATA_UNDERRUN;
1833 break;
1834 case CS_BIDIR_RD_OVERRUN:
1835 ql_dbg(ql_dbg_user, vha, 0x70b3,
1836 "Command completed with read data overrun thread_id=%d\n",
1837 thread_id);
1838 rval = EXT_STATUS_DATA_OVERRUN;
1839 break;
1840
1841 case CS_BIDIR_RD_WR_OVERRUN:
1842 ql_dbg(ql_dbg_user, vha, 0x70b4,
1843 "Command completed with read and write data overrun "
1844 "thread_id=%d\n", thread_id);
1845 rval = EXT_STATUS_DATA_OVERRUN;
1846 break;
1847
1848 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1849 ql_dbg(ql_dbg_user, vha, 0x70b5,
1850 "Command completed with read data over and write data "
1851 "underrun thread_id=%d\n", thread_id);
1852 rval = EXT_STATUS_DATA_OVERRUN;
1853 break;
1854
1855 case CS_BIDIR_RD_UNDERRUN:
1856 ql_dbg(ql_dbg_user, vha, 0x70b6,
1857 "Command completed with read data data underrun "
1858 "thread_id=%d\n", thread_id);
1859 rval = EXT_STATUS_DATA_UNDERRUN;
1860 break;
1861
1862 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1863 ql_dbg(ql_dbg_user, vha, 0x70b7,
1864 "Command completed with read data under and write data "
1865 "overrun thread_id=%d\n", thread_id);
1866 rval = EXT_STATUS_DATA_UNDERRUN;
1867 break;
1868
1869 case CS_BIDIR_RD_WR_UNDERRUN:
1870 ql_dbg(ql_dbg_user, vha, 0x70b8,
1871 "Command completed with read and write data underrun "
1872 "thread_id=%d\n", thread_id);
1873 rval = EXT_STATUS_DATA_UNDERRUN;
1874 break;
1875
1876 case CS_BIDIR_DMA:
1877 ql_dbg(ql_dbg_user, vha, 0x70b9,
1878 "Command completed with data DMA error thread_id=%d\n",
1879 thread_id);
1880 rval = EXT_STATUS_DMA_ERR;
1881 break;
1882
1883 case CS_TIMEOUT:
1884 ql_dbg(ql_dbg_user, vha, 0x70ba,
1885 "Command completed with timeout thread_id=%d\n",
1886 thread_id);
1887 rval = EXT_STATUS_TIMEOUT;
1888 break;
1889 default:
1890 ql_dbg(ql_dbg_user, vha, 0x70bb,
1891 "Command completed with completion status=0x%x "
1892 "thread_id=%d\n", comp_status, thread_id);
1893 rval = EXT_STATUS_ERR;
1894 break;
1895 }
1896 bsg_job->reply->reply_payload_rcv_len = 0;
1897
1898done:
1899 /* Return the vendor specific reply to API */
1900 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1901 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1902 /* Always return DID_OK, bsg will send the vendor specific response
1903 * in this case only */
1904 sp->done(vha, sp, (DID_OK << 6));
1905
1906}
1907
1da177e4
LT
1908/**
1909 * qla2x00_status_entry() - Process a Status IOCB entry.
1910 * @ha: SCSI driver HA context
1911 * @pkt: Entry pointer
1912 */
1913static void
73208dfd 1914qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1915{
1da177e4 1916 srb_t *sp;
1da177e4
LT
1917 fc_port_t *fcport;
1918 struct scsi_cmnd *cp;
9a853f71
AV
1919 sts_entry_t *sts;
1920 struct sts_entry_24xx *sts24;
1da177e4
LT
1921 uint16_t comp_status;
1922 uint16_t scsi_status;
b7d2280c 1923 uint16_t ox_id;
1da177e4
LT
1924 uint8_t lscsi_status;
1925 int32_t resid;
5544213b
AV
1926 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1927 fw_resid_len;
9a853f71 1928 uint8_t *rsp_info, *sense_data;
e315cd28 1929 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1930 uint32_t handle;
1931 uint16_t que;
1932 struct req_que *req;
b7d2280c 1933 int logit = 1;
9ba56b95 1934 int res = 0;
a9b6f722 1935 uint16_t state_flags = 0;
9a853f71
AV
1936
1937 sts = (sts_entry_t *) pkt;
1938 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1939 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1940 comp_status = le16_to_cpu(sts24->comp_status);
1941 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
a9b6f722 1942 state_flags = le16_to_cpu(sts24->state_flags);
9a853f71
AV
1943 } else {
1944 comp_status = le16_to_cpu(sts->comp_status);
1945 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1946 }
2afa19a9
AC
1947 handle = (uint32_t) LSW(sts->handle);
1948 que = MSW(sts->handle);
1949 req = ha->req_q_map[que];
a9083016 1950
1da177e4 1951 /* Validate handle. */
8d93f550 1952 if (handle < req->num_outstanding_cmds)
2afa19a9 1953 sp = req->outstanding_cmds[handle];
8d93f550 1954 else
1da177e4
LT
1955 sp = NULL;
1956
1957 if (sp == NULL) {
cfb0919c 1958 ql_dbg(ql_dbg_io, vha, 0x3017,
7c3df132 1959 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1960
8f7daead
GM
1961 if (IS_QLA82XX(ha))
1962 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1963 else
1964 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1965 qla2xxx_wake_dpc(vha);
1da177e4
LT
1966 return;
1967 }
a9b6f722
SK
1968
1969 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1970 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1971 return;
1972 }
1973
1974 /* Fast path completion. */
1975 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3c290d0b 1976 qla2x00_do_host_ramp_up(vha);
a9b6f722
SK
1977 qla2x00_process_completed_request(vha, req, handle);
1978
1979 return;
1980 }
1981
1982 req->outstanding_cmds[handle] = NULL;
9ba56b95 1983 cp = GET_CMD_SP(sp);
1da177e4 1984 if (cp == NULL) {
cfb0919c 1985 ql_dbg(ql_dbg_io, vha, 0x3018,
7c3df132
SK
1986 "Command already returned (0x%x/%p).\n",
1987 sts->handle, sp);
1da177e4
LT
1988
1989 return;
1990 }
1991
9a853f71 1992 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1993
bdf79621 1994 fcport = sp->fcport;
1da177e4 1995
b7d2280c 1996 ox_id = 0;
5544213b
AV
1997 sense_len = par_sense_len = rsp_info_len = resid_len =
1998 fw_resid_len = 0;
e428924c 1999 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
2000 if (scsi_status & SS_SENSE_LEN_VALID)
2001 sense_len = le32_to_cpu(sts24->sense_len);
2002 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2003 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2004 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2005 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2006 if (comp_status == CS_DATA_UNDERRUN)
2007 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
2008 rsp_info = sts24->data;
2009 sense_data = sts24->data;
2010 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 2011 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 2012 par_sense_len = sizeof(sts24->data);
9a853f71 2013 } else {
0f00a206
LC
2014 if (scsi_status & SS_SENSE_LEN_VALID)
2015 sense_len = le16_to_cpu(sts->req_sense_length);
2016 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2017 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
2018 resid_len = le32_to_cpu(sts->residual_length);
2019 rsp_info = sts->rsp_info;
2020 sense_data = sts->req_sense_data;
5544213b 2021 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
2022 }
2023
1da177e4
LT
2024 /* Check for any FCP transport errors. */
2025 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 2026 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 2027 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 2028 sense_data += rsp_info_len;
5544213b
AV
2029 par_sense_len -= rsp_info_len;
2030 }
9a853f71 2031 if (rsp_info_len > 3 && rsp_info[3]) {
5e19ed90 2032 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
7c3df132
SK
2033 "FCP I/O protocol failure (0x%x/0x%x).\n",
2034 rsp_info_len, rsp_info[3]);
1da177e4 2035
9ba56b95 2036 res = DID_BUS_BUSY << 16;
b7d2280c 2037 goto out;
1da177e4
LT
2038 }
2039 }
2040
3e8ce320
AV
2041 /* Check for overrun. */
2042 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2043 scsi_status & SS_RESIDUAL_OVER)
2044 comp_status = CS_DATA_OVERRUN;
2045
1da177e4
LT
2046 /*
2047 * Based on Host and scsi status generate status code for Linux
2048 */
2049 switch (comp_status) {
2050 case CS_COMPLETE:
df7baa50 2051 case CS_QUEUE_FULL:
1da177e4 2052 if (scsi_status == 0) {
9ba56b95 2053 res = DID_OK << 16;
1da177e4
LT
2054 break;
2055 }
2056 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 2057 resid = resid_len;
385d70b4 2058 scsi_set_resid(cp, resid);
0da69df1
AV
2059
2060 if (!lscsi_status &&
385d70b4 2061 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 2062 cp->underflow)) {
5e19ed90 2063 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
7c3df132 2064 "Mid-layer underflow "
b7d2280c 2065 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2066 resid, scsi_bufflen(cp));
0da69df1 2067
9ba56b95 2068 res = DID_ERROR << 16;
0da69df1
AV
2069 break;
2070 }
1da177e4 2071 }
9ba56b95 2072 res = DID_OK << 16 | lscsi_status;
1da177e4 2073
df7baa50 2074 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2075 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
7c3df132 2076 "QUEUE FULL detected.\n");
df7baa50
AV
2077 break;
2078 }
b7d2280c 2079 logit = 0;
1da177e4
LT
2080 if (lscsi_status != SS_CHECK_CONDITION)
2081 break;
2082
b80ca4f7 2083 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2084 if (!(scsi_status & SS_SENSE_LEN_VALID))
2085 break;
2086
5544213b 2087 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
9ba56b95 2088 rsp, res);
1da177e4
LT
2089 break;
2090
2091 case CS_DATA_UNDERRUN:
ed17c71b 2092 /* Use F/W calculated residual length. */
0f00a206
LC
2093 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2094 scsi_set_resid(cp, resid);
2095 if (scsi_status & SS_RESIDUAL_UNDER) {
2096 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
5e19ed90 2097 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
7c3df132
SK
2098 "Dropped frame(s) detected "
2099 "(0x%x of 0x%x bytes).\n",
2100 resid, scsi_bufflen(cp));
0f00a206 2101
9ba56b95 2102 res = DID_ERROR << 16 | lscsi_status;
4e85e3d9 2103 goto check_scsi_status;
6acf8190 2104 }
ed17c71b 2105
0f00a206
LC
2106 if (!lscsi_status &&
2107 ((unsigned)(scsi_bufflen(cp) - resid) <
2108 cp->underflow)) {
5e19ed90 2109 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
7c3df132 2110 "Mid-layer underflow "
b7d2280c 2111 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2112 resid, scsi_bufflen(cp));
e038a1be 2113
9ba56b95 2114 res = DID_ERROR << 16;
0f00a206
LC
2115 break;
2116 }
4aee5766
GM
2117 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2118 lscsi_status != SAM_STAT_BUSY) {
2119 /*
2120 * scsi status of task set and busy are considered to be
2121 * task not completed.
2122 */
2123
5e19ed90 2124 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
7c3df132 2125 "Dropped frame(s) detected (0x%x "
4aee5766
GM
2126 "of 0x%x bytes).\n", resid,
2127 scsi_bufflen(cp));
0f00a206 2128
9ba56b95 2129 res = DID_ERROR << 16 | lscsi_status;
0374f55e 2130 goto check_scsi_status;
4aee5766
GM
2131 } else {
2132 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2133 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2134 scsi_status, lscsi_status);
1da177e4
LT
2135 }
2136
9ba56b95 2137 res = DID_OK << 16 | lscsi_status;
b7d2280c 2138 logit = 0;
0f00a206 2139
0374f55e 2140check_scsi_status:
1da177e4 2141 /*
fa2a1ce5 2142 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
2143 * Status.
2144 */
2145 if (lscsi_status != 0) {
ffec28a3 2146 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2147 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
7c3df132 2148 "QUEUE FULL detected.\n");
b7d2280c 2149 logit = 1;
ffec28a3
AV
2150 break;
2151 }
1da177e4
LT
2152 if (lscsi_status != SS_CHECK_CONDITION)
2153 break;
2154
b80ca4f7 2155 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2156 if (!(scsi_status & SS_SENSE_LEN_VALID))
2157 break;
2158
5544213b 2159 qla2x00_handle_sense(sp, sense_data, par_sense_len,
9ba56b95 2160 sense_len, rsp, res);
1da177e4
LT
2161 }
2162 break;
2163
1da177e4
LT
2164 case CS_PORT_LOGGED_OUT:
2165 case CS_PORT_CONFIG_CHG:
2166 case CS_PORT_BUSY:
2167 case CS_INCOMPLETE:
2168 case CS_PORT_UNAVAILABLE:
b7d2280c 2169 case CS_TIMEOUT:
ff454b01
CD
2170 case CS_RESET:
2171
056a4483
MC
2172 /*
2173 * We are going to have the fc class block the rport
2174 * while we try to recover so instruct the mid layer
2175 * to requeue until the class decides how to handle this.
2176 */
9ba56b95 2177 res = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
2178
2179 if (comp_status == CS_TIMEOUT) {
2180 if (IS_FWI2_CAPABLE(ha))
2181 break;
2182 else if ((le16_to_cpu(sts->status_flags) &
2183 SF_LOGOUT_SENT) == 0)
2184 break;
2185 }
2186
5e19ed90 2187 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
7c3df132
SK
2188 "Port down status: port-state=0x%x.\n",
2189 atomic_read(&fcport->state));
b7d2280c 2190
a7a28504 2191 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 2192 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
2193 break;
2194
1da177e4 2195 case CS_ABORTED:
9ba56b95 2196 res = DID_RESET << 16;
1da177e4 2197 break;
bad75002
AE
2198
2199 case CS_DIF_ERROR:
8cb2049c 2200 logit = qla2x00_handle_dif_error(sp, sts24);
fb6e4668 2201 res = cp->result;
bad75002 2202 break;
9e522cd8
AE
2203
2204 case CS_TRANSPORT:
2205 res = DID_ERROR << 16;
2206
2207 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2208 break;
2209
2210 if (state_flags & BIT_4)
2211 scmd_printk(KERN_WARNING, cp,
2212 "Unsupported device '%s' found.\n",
2213 cp->device->vendor);
2214 break;
2215
1da177e4 2216 default:
9ba56b95 2217 res = DID_ERROR << 16;
1da177e4
LT
2218 break;
2219 }
2220
b7d2280c
AV
2221out:
2222 if (logit)
5e19ed90 2223 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
7c3df132 2224 "FCP command status: 0x%x-0x%x (0x%x) "
cfb0919c
CD
2225 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2226 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
7c3df132 2227 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
9ba56b95 2228 comp_status, scsi_status, res, vha->host_no,
cfb0919c
CD
2229 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2230 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2231 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2232 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2233 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
7c3df132 2234 resid_len, fw_resid_len);
b7d2280c 2235
3c290d0b
CD
2236 if (!res)
2237 qla2x00_do_host_ramp_up(vha);
2238
2afa19a9 2239 if (rsp->status_srb == NULL)
9ba56b95 2240 sp->done(ha, sp, res);
1da177e4
LT
2241}
2242
2243/**
2244 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2245 * @ha: SCSI driver HA context
2246 * @pkt: Entry pointer
2247 *
2248 * Extended sense data.
2249 */
2250static void
2afa19a9 2251qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4 2252{
9ba56b95 2253 uint8_t sense_sz = 0;
2afa19a9 2254 struct qla_hw_data *ha = rsp->hw;
7c3df132 2255 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
9ba56b95 2256 srb_t *sp = rsp->status_srb;
1da177e4 2257 struct scsi_cmnd *cp;
9ba56b95
GM
2258 uint32_t sense_len;
2259 uint8_t *sense_ptr;
1da177e4 2260
9ba56b95
GM
2261 if (!sp || !GET_CMD_SENSE_LEN(sp))
2262 return;
1da177e4 2263
9ba56b95
GM
2264 sense_len = GET_CMD_SENSE_LEN(sp);
2265 sense_ptr = GET_CMD_SENSE_PTR(sp);
1da177e4 2266
9ba56b95
GM
2267 cp = GET_CMD_SP(sp);
2268 if (cp == NULL) {
2269 ql_log(ql_log_warn, vha, 0x3025,
2270 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1da177e4 2271
9ba56b95
GM
2272 rsp->status_srb = NULL;
2273 return;
1da177e4 2274 }
1da177e4 2275
9ba56b95
GM
2276 if (sense_len > sizeof(pkt->data))
2277 sense_sz = sizeof(pkt->data);
2278 else
2279 sense_sz = sense_len;
c4631191 2280
9ba56b95
GM
2281 /* Move sense data. */
2282 if (IS_FWI2_CAPABLE(ha))
2283 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2284 memcpy(sense_ptr, pkt->data, sense_sz);
2285 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2286 sense_ptr, sense_sz);
c4631191 2287
9ba56b95
GM
2288 sense_len -= sense_sz;
2289 sense_ptr += sense_sz;
c4631191 2290
9ba56b95
GM
2291 SET_CMD_SENSE_PTR(sp, sense_ptr);
2292 SET_CMD_SENSE_LEN(sp, sense_len);
2293
2294 /* Place command on done queue. */
2295 if (sense_len == 0) {
2296 rsp->status_srb = NULL;
2297 sp->done(ha, sp, cp->result);
c4631191 2298 }
c4631191
GM
2299}
2300
1da177e4
LT
2301/**
2302 * qla2x00_error_entry() - Process an error entry.
2303 * @ha: SCSI driver HA context
2304 * @pkt: Entry pointer
2305 */
2306static void
73208dfd 2307qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
2308{
2309 srb_t *sp;
e315cd28 2310 struct qla_hw_data *ha = vha->hw;
c4631191 2311 const char func[] = "ERROR-IOCB";
2afa19a9 2312 uint16_t que = MSW(pkt->handle);
a6fe35c0 2313 struct req_que *req = NULL;
9ba56b95 2314 int res = DID_ERROR << 16;
7c3df132 2315
9ba56b95
GM
2316 ql_dbg(ql_dbg_async, vha, 0x502a,
2317 "type of error status in response: 0x%x\n", pkt->entry_status);
2318
a6fe35c0
AE
2319 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2320 goto fatal;
2321
2322 req = ha->req_q_map[que];
2323
9ba56b95
GM
2324 if (pkt->entry_status & RF_BUSY)
2325 res = DID_BUS_BUSY << 16;
1da177e4 2326
c4631191 2327 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
a6fe35c0 2328 if (sp) {
9ba56b95 2329 sp->done(ha, sp, res);
a6fe35c0 2330 return;
1da177e4 2331 }
a6fe35c0
AE
2332fatal:
2333 ql_log(ql_log_warn, vha, 0x5030,
2334 "Error entry - invalid handle/queue.\n");
2335
2336 if (IS_QLA82XX(ha))
2337 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2338 else
2339 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2340 qla2xxx_wake_dpc(vha);
1da177e4
LT
2341}
2342
9a853f71
AV
2343/**
2344 * qla24xx_mbx_completion() - Process mailbox command completions.
2345 * @ha: SCSI driver HA context
2346 * @mb0: Mailbox0 register
2347 */
2348static void
e315cd28 2349qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
2350{
2351 uint16_t cnt;
4fa94f83 2352 uint32_t mboxes;
9a853f71 2353 uint16_t __iomem *wptr;
e315cd28 2354 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
2355 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2356
4fa94f83
AV
2357 /* Read all mbox registers? */
2358 mboxes = (1 << ha->mbx_count) - 1;
2359 if (!ha->mcp)
a720101d 2360 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
4fa94f83
AV
2361 else
2362 mboxes = ha->mcp->in_mb;
2363
9a853f71
AV
2364 /* Load return mailbox registers. */
2365 ha->flags.mbox_int = 1;
2366 ha->mailbox_out[0] = mb0;
4fa94f83 2367 mboxes >>= 1;
9a853f71
AV
2368 wptr = (uint16_t __iomem *)&reg->mailbox1;
2369
2370 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
4fa94f83
AV
2371 if (mboxes & BIT_0)
2372 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2373
2374 mboxes >>= 1;
9a853f71
AV
2375 wptr++;
2376 }
9a853f71
AV
2377}
2378
2379/**
2380 * qla24xx_process_response_queue() - Process response queue entries.
2381 * @ha: SCSI driver HA context
2382 */
2afa19a9
AC
2383void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2384 struct rsp_que *rsp)
9a853f71 2385{
9a853f71 2386 struct sts_entry_24xx *pkt;
a9083016 2387 struct qla_hw_data *ha = vha->hw;
9a853f71 2388
e315cd28 2389 if (!vha->flags.online)
9a853f71
AV
2390 return;
2391
e315cd28
AC
2392 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2393 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2394
e315cd28
AC
2395 rsp->ring_index++;
2396 if (rsp->ring_index == rsp->length) {
2397 rsp->ring_index = 0;
2398 rsp->ring_ptr = rsp->ring;
9a853f71 2399 } else {
e315cd28 2400 rsp->ring_ptr++;
9a853f71
AV
2401 }
2402
2403 if (pkt->entry_status != 0) {
73208dfd 2404 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2d70c103
NB
2405
2406 (void)qlt_24xx_process_response_error(vha, pkt);
2407
9a853f71
AV
2408 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2409 wmb();
2410 continue;
2411 }
2412
2413 switch (pkt->entry_type) {
2414 case STATUS_TYPE:
73208dfd 2415 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2416 break;
2417 case STATUS_CONT_TYPE:
2afa19a9 2418 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2419 break;
2c3dfe3f 2420 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2421 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2422 (struct vp_rpt_id_entry_24xx *)pkt);
2423 break;
ac280b67
AV
2424 case LOGINOUT_PORT_IOCB_TYPE:
2425 qla24xx_logio_entry(vha, rsp->req,
2426 (struct logio_entry_24xx *)pkt);
2427 break;
3822263e
MI
2428 case TSK_MGMT_IOCB_TYPE:
2429 qla24xx_tm_iocb_entry(vha, rsp->req,
2430 (struct tsk_mgmt_entry *)pkt);
2431 break;
9a069e19
GM
2432 case CT_IOCB_TYPE:
2433 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
9a069e19
GM
2434 break;
2435 case ELS_IOCB_TYPE:
2436 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2437 break;
2d70c103
NB
2438 case ABTS_RECV_24XX:
2439 /* ensure that the ATIO queue is empty */
2440 qlt_24xx_process_atio_queue(vha);
2441 case ABTS_RESP_24XX:
2442 case CTIO_TYPE7:
2443 case NOTIFY_ACK_TYPE:
2444 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2445 break;
54883291
SK
2446 case MARKER_TYPE:
2447 /* Do nothing in this case, this check is to prevent it
2448 * from falling into default case
2449 */
2450 break;
9a853f71
AV
2451 default:
2452 /* Type Not Supported. */
7c3df132
SK
2453 ql_dbg(ql_dbg_async, vha, 0x5042,
2454 "Received unknown response pkt type %x "
9a853f71 2455 "entry status=%x.\n",
7c3df132 2456 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2457 break;
2458 }
2459 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2460 wmb();
2461 }
2462
2463 /* Adjust ring index */
a9083016
GM
2464 if (IS_QLA82XX(ha)) {
2465 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2466 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2467 } else
2468 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2469}
2470
05236a05 2471static void
e315cd28 2472qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2473{
2474 int rval;
2475 uint32_t cnt;
e315cd28 2476 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2477 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2478
6246b8a1 2479 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
05236a05
AV
2480 return;
2481
2482 rval = QLA_SUCCESS;
2483 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2484 RD_REG_DWORD(&reg->iobase_addr);
2485 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2486 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2487 rval == QLA_SUCCESS; cnt--) {
2488 if (cnt) {
2489 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2490 udelay(10);
2491 } else
2492 rval = QLA_FUNCTION_TIMEOUT;
2493 }
2494 if (rval == QLA_SUCCESS)
2495 goto next_test;
2496
2497 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2498 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2499 rval == QLA_SUCCESS; cnt--) {
2500 if (cnt) {
2501 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2502 udelay(10);
2503 } else
2504 rval = QLA_FUNCTION_TIMEOUT;
2505 }
2506 if (rval != QLA_SUCCESS)
2507 goto done;
2508
2509next_test:
2510 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2511 ql_log(ql_log_info, vha, 0x504c,
2512 "Additional code -- 0x55AA.\n");
05236a05
AV
2513
2514done:
2515 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2516 RD_REG_DWORD(&reg->iobase_window);
2517}
2518
9a853f71 2519/**
6246b8a1 2520 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
9a853f71
AV
2521 * @irq:
2522 * @dev_id: SCSI driver HA context
9a853f71
AV
2523 *
2524 * Called by system whenever the host adapter generates an interrupt.
2525 *
2526 * Returns handled flag.
2527 */
2528irqreturn_t
7d12e780 2529qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2530{
e315cd28
AC
2531 scsi_qla_host_t *vha;
2532 struct qla_hw_data *ha;
9a853f71
AV
2533 struct device_reg_24xx __iomem *reg;
2534 int status;
9a853f71
AV
2535 unsigned long iter;
2536 uint32_t stat;
2537 uint32_t hccr;
7d613ac6 2538 uint16_t mb[8];
e315cd28 2539 struct rsp_que *rsp;
43fac4d9 2540 unsigned long flags;
9a853f71 2541
e315cd28
AC
2542 rsp = (struct rsp_que *) dev_id;
2543 if (!rsp) {
3256b435
CD
2544 ql_log(ql_log_info, NULL, 0x5059,
2545 "%s: NULL response queue pointer.\n", __func__);
9a853f71
AV
2546 return IRQ_NONE;
2547 }
2548
e315cd28 2549 ha = rsp->hw;
9a853f71
AV
2550 reg = &ha->iobase->isp24;
2551 status = 0;
2552
85880801
AV
2553 if (unlikely(pci_channel_offline(ha->pdev)))
2554 return IRQ_HANDLED;
2555
43fac4d9 2556 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2557 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2558 for (iter = 50; iter--; ) {
2559 stat = RD_REG_DWORD(&reg->host_status);
2560 if (stat & HSRX_RISC_PAUSED) {
85880801 2561 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2562 break;
2563
9a853f71
AV
2564 hccr = RD_REG_DWORD(&reg->hccr);
2565
7c3df132
SK
2566 ql_log(ql_log_warn, vha, 0x504b,
2567 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2568 hccr);
05236a05 2569
e315cd28 2570 qla2xxx_check_risc_status(vha);
05236a05 2571
e315cd28
AC
2572 ha->isp_ops->fw_dump(vha, 1);
2573 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2574 break;
2575 } else if ((stat & HSRX_RISC_INT) == 0)
2576 break;
2577
2578 switch (stat & 0xff) {
fafbda9f
AE
2579 case INTR_ROM_MB_SUCCESS:
2580 case INTR_ROM_MB_FAILED:
2581 case INTR_MB_SUCCESS:
2582 case INTR_MB_FAILED:
e315cd28 2583 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2584 status |= MBX_INTERRUPT;
2585
2586 break;
fafbda9f 2587 case INTR_ASYNC_EVENT:
9a853f71
AV
2588 mb[0] = MSW(stat);
2589 mb[1] = RD_REG_WORD(&reg->mailbox1);
2590 mb[2] = RD_REG_WORD(&reg->mailbox2);
2591 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2592 qla2x00_async_event(vha, rsp, mb);
9a853f71 2593 break;
fafbda9f
AE
2594 case INTR_RSP_QUE_UPDATE:
2595 case INTR_RSP_QUE_UPDATE_83XX:
2afa19a9 2596 qla24xx_process_response_queue(vha, rsp);
9a853f71 2597 break;
fafbda9f 2598 case INTR_ATIO_QUE_UPDATE:
2d70c103
NB
2599 qlt_24xx_process_atio_queue(vha);
2600 break;
fafbda9f 2601 case INTR_ATIO_RSP_QUE_UPDATE:
2d70c103
NB
2602 qlt_24xx_process_atio_queue(vha);
2603 qla24xx_process_response_queue(vha, rsp);
2604 break;
9a853f71 2605 default:
7c3df132
SK
2606 ql_dbg(ql_dbg_async, vha, 0x504f,
2607 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2608 break;
2609 }
2610 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2611 RD_REG_DWORD_RELAXED(&reg->hccr);
cb860bbd
GM
2612 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2613 ndelay(3500);
9a853f71 2614 }
43fac4d9 2615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71
AV
2616
2617 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2618 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 2619 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2620 complete(&ha->mbx_intr_comp);
9a853f71
AV
2621 }
2622
2623 return IRQ_HANDLED;
2624}
2625
a8488abe
AV
2626static irqreturn_t
2627qla24xx_msix_rsp_q(int irq, void *dev_id)
2628{
e315cd28
AC
2629 struct qla_hw_data *ha;
2630 struct rsp_que *rsp;
a8488abe 2631 struct device_reg_24xx __iomem *reg;
2afa19a9 2632 struct scsi_qla_host *vha;
0f19bc68 2633 unsigned long flags;
a8488abe 2634
e315cd28
AC
2635 rsp = (struct rsp_que *) dev_id;
2636 if (!rsp) {
3256b435
CD
2637 ql_log(ql_log_info, NULL, 0x505a,
2638 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2639 return IRQ_NONE;
2640 }
2641 ha = rsp->hw;
a8488abe
AV
2642 reg = &ha->iobase->isp24;
2643
0f19bc68 2644 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2645
a67093d4 2646 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2647 qla24xx_process_response_queue(vha, rsp);
3155754a 2648 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2649 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2650 RD_REG_DWORD_RELAXED(&reg->hccr);
2651 }
0f19bc68 2652 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2653
2654 return IRQ_HANDLED;
2655}
2656
68ca949c
AC
2657static irqreturn_t
2658qla25xx_msix_rsp_q(int irq, void *dev_id)
2659{
2660 struct qla_hw_data *ha;
2661 struct rsp_que *rsp;
3155754a 2662 struct device_reg_24xx __iomem *reg;
0f19bc68 2663 unsigned long flags;
68ca949c
AC
2664
2665 rsp = (struct rsp_que *) dev_id;
2666 if (!rsp) {
3256b435
CD
2667 ql_log(ql_log_info, NULL, 0x505b,
2668 "%s: NULL response queue pointer.\n", __func__);
68ca949c
AC
2669 return IRQ_NONE;
2670 }
2671 ha = rsp->hw;
2672
3155754a 2673 /* Clear the interrupt, if enabled, for this response queue */
d424754c 2674 if (!ha->flags.disable_msix_handshake) {
3155754a 2675 reg = &ha->iobase->isp24;
0f19bc68 2676 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2677 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2678 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2679 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2680 }
68ca949c
AC
2681 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2682
2683 return IRQ_HANDLED;
2684}
2685
a8488abe
AV
2686static irqreturn_t
2687qla24xx_msix_default(int irq, void *dev_id)
2688{
e315cd28
AC
2689 scsi_qla_host_t *vha;
2690 struct qla_hw_data *ha;
2691 struct rsp_que *rsp;
a8488abe
AV
2692 struct device_reg_24xx __iomem *reg;
2693 int status;
a8488abe
AV
2694 uint32_t stat;
2695 uint32_t hccr;
7d613ac6 2696 uint16_t mb[8];
0f19bc68 2697 unsigned long flags;
a8488abe 2698
e315cd28
AC
2699 rsp = (struct rsp_que *) dev_id;
2700 if (!rsp) {
3256b435
CD
2701 ql_log(ql_log_info, NULL, 0x505c,
2702 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2703 return IRQ_NONE;
2704 }
2705 ha = rsp->hw;
a8488abe
AV
2706 reg = &ha->iobase->isp24;
2707 status = 0;
2708
0f19bc68 2709 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2710 vha = pci_get_drvdata(ha->pdev);
87f27015 2711 do {
a8488abe
AV
2712 stat = RD_REG_DWORD(&reg->host_status);
2713 if (stat & HSRX_RISC_PAUSED) {
85880801 2714 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2715 break;
2716
a8488abe
AV
2717 hccr = RD_REG_DWORD(&reg->hccr);
2718
7c3df132
SK
2719 ql_log(ql_log_info, vha, 0x5050,
2720 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2721 hccr);
05236a05 2722
e315cd28 2723 qla2xxx_check_risc_status(vha);
05236a05 2724
e315cd28
AC
2725 ha->isp_ops->fw_dump(vha, 1);
2726 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2727 break;
2728 } else if ((stat & HSRX_RISC_INT) == 0)
2729 break;
2730
2731 switch (stat & 0xff) {
fafbda9f
AE
2732 case INTR_ROM_MB_SUCCESS:
2733 case INTR_ROM_MB_FAILED:
2734 case INTR_MB_SUCCESS:
2735 case INTR_MB_FAILED:
e315cd28 2736 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2737 status |= MBX_INTERRUPT;
2738
2739 break;
fafbda9f 2740 case INTR_ASYNC_EVENT:
a8488abe
AV
2741 mb[0] = MSW(stat);
2742 mb[1] = RD_REG_WORD(&reg->mailbox1);
2743 mb[2] = RD_REG_WORD(&reg->mailbox2);
2744 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2745 qla2x00_async_event(vha, rsp, mb);
a8488abe 2746 break;
fafbda9f
AE
2747 case INTR_RSP_QUE_UPDATE:
2748 case INTR_RSP_QUE_UPDATE_83XX:
2afa19a9 2749 qla24xx_process_response_queue(vha, rsp);
a8488abe 2750 break;
fafbda9f 2751 case INTR_ATIO_QUE_UPDATE:
2d70c103
NB
2752 qlt_24xx_process_atio_queue(vha);
2753 break;
fafbda9f 2754 case INTR_ATIO_RSP_QUE_UPDATE:
2d70c103
NB
2755 qlt_24xx_process_atio_queue(vha);
2756 qla24xx_process_response_queue(vha, rsp);
2757 break;
a8488abe 2758 default:
7c3df132
SK
2759 ql_dbg(ql_dbg_async, vha, 0x5051,
2760 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2761 break;
2762 }
2763 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2764 } while (0);
0f19bc68 2765 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2766
2767 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2768 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 2769 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2770 complete(&ha->mbx_intr_comp);
a8488abe 2771 }
a8488abe
AV
2772 return IRQ_HANDLED;
2773}
2774
2775/* Interrupt handling helpers. */
2776
2777struct qla_init_msix_entry {
a8488abe 2778 const char *name;
476834c2 2779 irq_handler_t handler;
a8488abe
AV
2780};
2781
68ca949c 2782static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2783 { "qla2xxx (default)", qla24xx_msix_default },
2784 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2785 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2786};
2787
a9083016
GM
2788static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2789 { "qla2xxx (default)", qla82xx_msix_default },
2790 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2791};
2792
aa230bc5
AE
2793static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2794 { "qla2xxx (default)", qla24xx_msix_default },
2795 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2796 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2797};
2798
a8488abe 2799static void
e315cd28 2800qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2801{
2802 int i;
2803 struct qla_msix_entry *qentry;
7c3df132 2804 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2805
73208dfd
AC
2806 for (i = 0; i < ha->msix_count; i++) {
2807 qentry = &ha->msix_entries[i];
a8488abe 2808 if (qentry->have_irq)
73208dfd 2809 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2810 }
2811 pci_disable_msix(ha->pdev);
73208dfd
AC
2812 kfree(ha->msix_entries);
2813 ha->msix_entries = NULL;
2814 ha->flags.msix_enabled = 0;
7c3df132
SK
2815 ql_dbg(ql_dbg_init, vha, 0x0042,
2816 "Disabled the MSI.\n");
a8488abe
AV
2817}
2818
2819static int
73208dfd 2820qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2821{
ad038fa8 2822#define MIN_MSIX_COUNT 2
a8488abe 2823 int i, ret;
73208dfd 2824 struct msix_entry *entries;
a8488abe 2825 struct qla_msix_entry *qentry;
7c3df132 2826 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2827
2828 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2829 GFP_KERNEL);
7c3df132
SK
2830 if (!entries) {
2831 ql_log(ql_log_warn, vha, 0x00bc,
2832 "Failed to allocate memory for msix_entry.\n");
73208dfd 2833 return -ENOMEM;
7c3df132 2834 }
a8488abe 2835
73208dfd
AC
2836 for (i = 0; i < ha->msix_count; i++)
2837 entries[i].entry = i;
a8488abe 2838
73208dfd 2839 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2840 if (ret) {
ad038fa8
LC
2841 if (ret < MIN_MSIX_COUNT)
2842 goto msix_failed;
2843
7c3df132
SK
2844 ql_log(ql_log_warn, vha, 0x00c6,
2845 "MSI-X: Failed to enable support "
2846 "-- %d/%d\n Retry with %d vectors.\n",
2847 ha->msix_count, ret, ret);
73208dfd
AC
2848 ha->msix_count = ret;
2849 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2850 if (ret) {
ad038fa8 2851msix_failed:
7c3df132
SK
2852 ql_log(ql_log_fatal, vha, 0x00c7,
2853 "MSI-X: Failed to enable support, "
2854 "giving up -- %d/%d.\n",
2855 ha->msix_count, ret);
73208dfd
AC
2856 goto msix_out;
2857 }
2afa19a9 2858 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2859 }
2860 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2861 ha->msix_count, GFP_KERNEL);
2862 if (!ha->msix_entries) {
7c3df132
SK
2863 ql_log(ql_log_fatal, vha, 0x00c8,
2864 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2865 ret = -ENOMEM;
a8488abe
AV
2866 goto msix_out;
2867 }
2868 ha->flags.msix_enabled = 1;
2869
73208dfd
AC
2870 for (i = 0; i < ha->msix_count; i++) {
2871 qentry = &ha->msix_entries[i];
2872 qentry->vector = entries[i].vector;
2873 qentry->entry = entries[i].entry;
a8488abe 2874 qentry->have_irq = 0;
73208dfd 2875 qentry->rsp = NULL;
a8488abe
AV
2876 }
2877
2afa19a9 2878 /* Enable MSI-X vectors for the base queue */
aa230bc5 2879 for (i = 0; i < ha->msix_count; i++) {
2afa19a9 2880 qentry = &ha->msix_entries[i];
aa230bc5
AE
2881 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2882 ret = request_irq(qentry->vector,
2883 qla83xx_msix_entries[i].handler,
2884 0, qla83xx_msix_entries[i].name, rsp);
2885 } else if (IS_QLA82XX(ha)) {
a9083016
GM
2886 ret = request_irq(qentry->vector,
2887 qla82xx_msix_entries[i].handler,
2888 0, qla82xx_msix_entries[i].name, rsp);
2889 } else {
2890 ret = request_irq(qentry->vector,
2891 msix_entries[i].handler,
2892 0, msix_entries[i].name, rsp);
2893 }
2afa19a9 2894 if (ret) {
7c3df132
SK
2895 ql_log(ql_log_fatal, vha, 0x00cb,
2896 "MSI-X: unable to register handler -- %x/%d.\n",
2897 qentry->vector, ret);
2afa19a9
AC
2898 qla24xx_disable_msix(ha);
2899 ha->mqenable = 0;
2900 goto msix_out;
2901 }
2902 qentry->have_irq = 1;
2903 qentry->rsp = rsp;
2904 rsp->msix = qentry;
73208dfd 2905 }
73208dfd
AC
2906
2907 /* Enable MSI-X vector for response queue update for queue 0 */
6246b8a1
GM
2908 if (IS_QLA83XX(ha)) {
2909 if (ha->msixbase && ha->mqiobase &&
2910 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2911 ha->mqenable = 1;
2912 } else
2913 if (ha->mqiobase
2914 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2915 ha->mqenable = 1;
7c3df132
SK
2916 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2917 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2918 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2919 ql_dbg(ql_dbg_init, vha, 0x0055,
2920 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2921 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2922
a8488abe 2923msix_out:
73208dfd 2924 kfree(entries);
a8488abe
AV
2925 return ret;
2926}
2927
2928int
73208dfd 2929qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2930{
2931 int ret;
963b0fdd 2932 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2933 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2934
2935 /* If possible, enable MSI-X. */
6246b8a1
GM
2936 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2937 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
6377a7ae
BH
2938 goto skip_msi;
2939
2940 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2941 (ha->pdev->subsystem_device == 0x7040 ||
2942 ha->pdev->subsystem_device == 0x7041 ||
2943 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2944 ql_log(ql_log_warn, vha, 0x0034,
2945 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2946 ha->pdev->subsystem_vendor,
7c3df132 2947 ha->pdev->subsystem_device);
6377a7ae
BH
2948 goto skip_msi;
2949 }
a8488abe 2950
42cd4f5d 2951 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
7c3df132
SK
2952 ql_log(ql_log_warn, vha, 0x0035,
2953 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
42cd4f5d 2954 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
a8488abe
AV
2955 goto skip_msix;
2956 }
2957
73208dfd 2958 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2959 if (!ret) {
7c3df132
SK
2960 ql_dbg(ql_dbg_init, vha, 0x0036,
2961 "MSI-X: Enabled (0x%X, 0x%X).\n",
2962 ha->chip_revision, ha->fw_attributes);
963b0fdd 2963 goto clear_risc_ints;
a8488abe 2964 }
7c3df132
SK
2965 ql_log(ql_log_info, vha, 0x0037,
2966 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2967skip_msix:
cbedb601 2968
3a03eb79 2969 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
487370af 2970 !IS_QLA8001(ha) && !IS_QLA82XX(ha))
cbedb601
AV
2971 goto skip_msi;
2972
2973 ret = pci_enable_msi(ha->pdev);
2974 if (!ret) {
7c3df132
SK
2975 ql_dbg(ql_dbg_init, vha, 0x0038,
2976 "MSI: Enabled.\n");
cbedb601 2977 ha->flags.msi_enabled = 1;
a9083016 2978 } else
7c3df132
SK
2979 ql_log(ql_log_warn, vha, 0x0039,
2980 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
a033b655
GM
2981
2982 /* Skip INTx on ISP82xx. */
2983 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2984 return QLA_FUNCTION_FAILED;
2985
cbedb601
AV
2986skip_msi:
2987
fd34f556 2988 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2989 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2990 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2991 if (ret) {
7c3df132 2992 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2993 "Failed to reserve interrupt %d already in use.\n",
2994 ha->pdev->irq);
963b0fdd 2995 goto fail;
68d91cbd
SK
2996 } else if (!ha->flags.msi_enabled)
2997 ql_dbg(ql_dbg_init, vha, 0x0125,
2998 "INTa mode: Enabled.\n");
7992abfc 2999
963b0fdd
AV
3000clear_risc_ints:
3001
c6952483 3002 spin_lock_irq(&ha->hardware_lock);
c1114953 3003 if (!IS_FWI2_CAPABLE(ha))
963b0fdd 3004 WRT_REG_WORD(&reg->isp.semaphore, 0);
c6952483 3005 spin_unlock_irq(&ha->hardware_lock);
a8488abe 3006
963b0fdd 3007fail:
a8488abe
AV
3008 return ret;
3009}
3010
3011void
e315cd28 3012qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 3013{
e315cd28 3014 struct qla_hw_data *ha = vha->hw;
9a347ff4
CD
3015 struct rsp_que *rsp;
3016
3017 /*
3018 * We need to check that ha->rsp_q_map is valid in case we are called
3019 * from a probe failure context.
3020 */
3021 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3022 return;
3023 rsp = ha->rsp_q_map[0];
a8488abe
AV
3024
3025 if (ha->flags.msix_enabled)
3026 qla24xx_disable_msix(ha);
90a86fc0 3027 else if (ha->flags.msi_enabled) {
e315cd28 3028 free_irq(ha->pdev->irq, rsp);
cbedb601 3029 pci_disable_msi(ha->pdev);
90a86fc0
JC
3030 } else
3031 free_irq(ha->pdev->irq, rsp);
a8488abe 3032}
e315cd28 3033
73208dfd
AC
3034
3035int qla25xx_request_irq(struct rsp_que *rsp)
3036{
3037 struct qla_hw_data *ha = rsp->hw;
2afa19a9 3038 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 3039 struct qla_msix_entry *msix = rsp->msix;
7c3df132 3040 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
3041 int ret;
3042
3043 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3044 if (ret) {
7c3df132
SK
3045 ql_log(ql_log_fatal, vha, 0x00e6,
3046 "MSI-X: Unable to register handler -- %x/%d.\n",
3047 msix->vector, ret);
73208dfd
AC
3048 return ret;
3049 }
3050 msix->have_irq = 1;
3051 msix->rsp = rsp;
3052 return ret;
3053}