[SCSI] qla2xxx: Help Coverity with analyzing ct_sns_pkt initialization.
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
1e63395c 3 * Copyright (c) 2003-2013 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4 9
05236a05 10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
df7baa50 12#include <scsi/scsi_tcq.h>
9a069e19 13#include <scsi/scsi_bsg_fc.h>
bad75002 14#include <scsi/scsi_eh.h>
df7baa50 15
1da177e4 16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd 17static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 sts_entry_t *);
9a853f71 21
1da177e4
LT
22/**
23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
24 * @irq:
25 * @dev_id: SCSI driver HA context
1da177e4
LT
26 *
27 * Called by system whenever the host adapter generates an interrupt.
28 *
29 * Returns handled flag.
30 */
31irqreturn_t
7d12e780 32qla2100_intr_handler(int irq, void *dev_id)
1da177e4 33{
e315cd28
AC
34 scsi_qla_host_t *vha;
35 struct qla_hw_data *ha;
3d71644c 36 struct device_reg_2xxx __iomem *reg;
1da177e4 37 int status;
1da177e4 38 unsigned long iter;
14e660e6 39 uint16_t hccr;
9a853f71 40 uint16_t mb[4];
e315cd28 41 struct rsp_que *rsp;
43fac4d9 42 unsigned long flags;
1da177e4 43
e315cd28
AC
44 rsp = (struct rsp_que *) dev_id;
45 if (!rsp) {
3256b435
CD
46 ql_log(ql_log_info, NULL, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
48 return (IRQ_NONE);
49 }
50
e315cd28 51 ha = rsp->hw;
3d71644c 52 reg = &ha->iobase->isp;
1da177e4
LT
53 status = 0;
54
43fac4d9 55 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 56 vha = pci_get_drvdata(ha->pdev);
1da177e4 57 for (iter = 50; iter--; ) {
14e660e6
SJ
58 hccr = RD_REG_WORD(&reg->hccr);
59 if (hccr & HCCR_RISC_PAUSE) {
60 if (pci_channel_offline(ha->pdev))
61 break;
62
63 /*
64 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 65 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
66 * out of the RISC PAUSED state.
67 */
68 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
69 RD_REG_WORD(&reg->hccr);
70
e315cd28
AC
71 ha->isp_ops->fw_dump(vha, 1);
72 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
73 break;
74 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
75 break;
76
77 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
78 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
79 RD_REG_WORD(&reg->hccr);
80
81 /* Get mailbox data. */
9a853f71
AV
82 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
83 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 84 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 85 status |= MBX_INTERRUPT;
9a853f71
AV
86 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
87 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
88 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
89 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 90 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
91 } else {
92 /*EMPTY*/
7c3df132
SK
93 ql_dbg(ql_dbg_async, vha, 0x5025,
94 "Unrecognized interrupt type (%d).\n",
95 mb[0]);
1da177e4
LT
96 }
97 /* Release mailbox registers. */
98 WRT_REG_WORD(&reg->semaphore, 0);
99 RD_REG_WORD(&reg->semaphore);
100 } else {
73208dfd 101 qla2x00_process_response_queue(rsp);
1da177e4
LT
102
103 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
104 RD_REG_WORD(&reg->hccr);
105 }
106 }
36439832 107 qla2x00_handle_mbx_completion(ha, status);
43fac4d9 108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 109
1da177e4
LT
110 return (IRQ_HANDLED);
111}
112
113/**
114 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
115 * @irq:
116 * @dev_id: SCSI driver HA context
1da177e4
LT
117 *
118 * Called by system whenever the host adapter generates an interrupt.
119 *
120 * Returns handled flag.
121 */
122irqreturn_t
7d12e780 123qla2300_intr_handler(int irq, void *dev_id)
1da177e4 124{
e315cd28 125 scsi_qla_host_t *vha;
3d71644c 126 struct device_reg_2xxx __iomem *reg;
1da177e4 127 int status;
1da177e4
LT
128 unsigned long iter;
129 uint32_t stat;
1da177e4 130 uint16_t hccr;
9a853f71 131 uint16_t mb[4];
e315cd28
AC
132 struct rsp_que *rsp;
133 struct qla_hw_data *ha;
43fac4d9 134 unsigned long flags;
1da177e4 135
e315cd28
AC
136 rsp = (struct rsp_que *) dev_id;
137 if (!rsp) {
3256b435
CD
138 ql_log(ql_log_info, NULL, 0x5058,
139 "%s: NULL response queue pointer.\n", __func__);
1da177e4
LT
140 return (IRQ_NONE);
141 }
142
e315cd28 143 ha = rsp->hw;
3d71644c 144 reg = &ha->iobase->isp;
1da177e4
LT
145 status = 0;
146
43fac4d9 147 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 148 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
149 for (iter = 50; iter--; ) {
150 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
151 if (stat & HSR_RISC_PAUSED) {
85880801 152 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
153 break;
154
1da177e4
LT
155 hccr = RD_REG_WORD(&reg->hccr);
156 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
157 ql_log(ql_log_warn, vha, 0x5026,
158 "Parity error -- HCCR=%x, Dumping "
159 "firmware.\n", hccr);
1da177e4 160 else
7c3df132
SK
161 ql_log(ql_log_warn, vha, 0x5027,
162 "RISC paused -- HCCR=%x, Dumping "
163 "firmware.\n", hccr);
1da177e4
LT
164
165 /*
166 * Issue a "HARD" reset in order for the RISC
167 * interrupt bit to be cleared. Schedule a big
a06a0f8e 168 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
169 */
170 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171 RD_REG_WORD(&reg->hccr);
07f31805 172
e315cd28
AC
173 ha->isp_ops->fw_dump(vha, 1);
174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
175 break;
176 } else if ((stat & HSR_RISC_INT) == 0)
177 break;
178
1da177e4 179 switch (stat & 0xff) {
1da177e4
LT
180 case 0x1:
181 case 0x2:
182 case 0x10:
183 case 0x11:
e315cd28 184 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
185 status |= MBX_INTERRUPT;
186
187 /* Release mailbox registers. */
188 WRT_REG_WORD(&reg->semaphore, 0);
189 break;
190 case 0x12:
9a853f71
AV
191 mb[0] = MSW(stat);
192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 195 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
196 break;
197 case 0x13:
73208dfd 198 qla2x00_process_response_queue(rsp);
1da177e4
LT
199 break;
200 case 0x15:
9a853f71
AV
201 mb[0] = MBA_CMPLT_1_16BIT;
202 mb[1] = MSW(stat);
73208dfd 203 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
204 break;
205 case 0x16:
9a853f71
AV
206 mb[0] = MBA_SCSI_COMPLETION;
207 mb[1] = MSW(stat);
208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 209 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
210 break;
211 default:
7c3df132
SK
212 ql_dbg(ql_dbg_async, vha, 0x5028,
213 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
214 break;
215 }
216 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
217 RD_REG_WORD_RELAXED(&reg->hccr);
218 }
36439832 219 qla2x00_handle_mbx_completion(ha, status);
43fac4d9 220 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 221
1da177e4
LT
222 return (IRQ_HANDLED);
223}
224
225/**
226 * qla2x00_mbx_completion() - Process mailbox command completions.
227 * @ha: SCSI driver HA context
228 * @mb0: Mailbox0 register
229 */
230static void
e315cd28 231qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
232{
233 uint16_t cnt;
4fa94f83 234 uint32_t mboxes;
1da177e4 235 uint16_t __iomem *wptr;
e315cd28 236 struct qla_hw_data *ha = vha->hw;
3d71644c 237 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 238
4fa94f83
AV
239 /* Read all mbox registers? */
240 mboxes = (1 << ha->mbx_count) - 1;
241 if (!ha->mcp)
a720101d 242 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
4fa94f83
AV
243 else
244 mboxes = ha->mcp->in_mb;
245
1da177e4
LT
246 /* Load return mailbox registers. */
247 ha->flags.mbox_int = 1;
248 ha->mailbox_out[0] = mb0;
4fa94f83 249 mboxes >>= 1;
1da177e4
LT
250 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
251
252 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 253 if (IS_QLA2200(ha) && cnt == 8)
1da177e4 254 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
4fa94f83 255 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
1da177e4 256 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
4fa94f83 257 else if (mboxes & BIT_0)
1da177e4 258 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 259
1da177e4 260 wptr++;
4fa94f83 261 mboxes >>= 1;
1da177e4 262 }
1da177e4
LT
263}
264
8a659571
AV
265static void
266qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
267{
268 static char *event[] =
269 { "Complete", "Request Notification", "Time Extension" };
270 int rval;
271 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
272 uint16_t __iomem *wptr;
273 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
274
275 /* Seed data -- mailbox1 -> mailbox7. */
276 wptr = (uint16_t __iomem *)&reg24->mailbox1;
277 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
278 mb[cnt] = RD_REG_WORD(wptr);
279
7c3df132 280 ql_dbg(ql_dbg_async, vha, 0x5021,
6246b8a1 281 "Inter-Driver Communication %s -- "
7c3df132
SK
282 "%04x %04x %04x %04x %04x %04x %04x.\n",
283 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
284 mb[4], mb[5], mb[6]);
bf5b8ad7
CD
285 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
286 vha->hw->flags.idc_compl_status = 1;
287 if (vha->hw->notify_dcbx_comp)
288 complete(&vha->hw->dcbx_comp);
289 }
8a659571 290
bf5b8ad7
CD
291 /* Acknowledgement needed? [Notify && non-zero timeout]. */
292 timeout = (descr >> 8) & 0xf;
293 if (aen != MBA_IDC_NOTIFY || !timeout)
294 return;
8fcd6b8b 295
bf5b8ad7
CD
296 ql_dbg(ql_dbg_async, vha, 0x5022,
297 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
298 vha->host_no, event[aen & 0xff], timeout);
8a659571
AV
299
300 rval = qla2x00_post_idc_ack_work(vha, mb);
301 if (rval != QLA_SUCCESS)
7c3df132 302 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
303 "IDC failed to post ACK.\n");
304}
305
daae62a3 306#define LS_UNKNOWN 2
d0297c9a
JC
307const char *
308qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
daae62a3 309{
d0297c9a
JC
310 static const char * const link_speeds[] = {
311 "1", "2", "?", "4", "8", "16", "10"
312 };
daae62a3
CD
313
314 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d0297c9a
JC
315 return link_speeds[0];
316 else if (speed == 0x13)
317 return link_speeds[6];
318 else if (speed < 6)
319 return link_speeds[speed];
320 else
321 return link_speeds[LS_UNKNOWN];
daae62a3
CD
322}
323
fa492630 324static void
7d613ac6
SV
325qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
326{
327 struct qla_hw_data *ha = vha->hw;
328
329 /*
330 * 8200 AEN Interpretation:
331 * mb[0] = AEN code
332 * mb[1] = AEN Reason code
333 * mb[2] = LSW of Peg-Halt Status-1 Register
334 * mb[6] = MSW of Peg-Halt Status-1 Register
335 * mb[3] = LSW of Peg-Halt Status-2 register
336 * mb[7] = MSW of Peg-Halt Status-2 register
337 * mb[4] = IDC Device-State Register value
338 * mb[5] = IDC Driver-Presence Register value
339 */
340 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
341 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
342 mb[0], mb[1], mb[2], mb[6]);
343 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
344 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
345 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
346
347 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
348 IDC_HEARTBEAT_FAILURE)) {
349 ha->flags.nic_core_hung = 1;
350 ql_log(ql_log_warn, vha, 0x5060,
351 "83XX: F/W Error Reported: Check if reset required.\n");
352
353 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
354 uint32_t protocol_engine_id, fw_err_code, err_level;
355
356 /*
357 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
358 * - PEG-Halt Status-1 Register:
359 * (LSW = mb[2], MSW = mb[6])
360 * Bits 0-7 = protocol-engine ID
361 * Bits 8-28 = f/w error code
362 * Bits 29-31 = Error-level
363 * Error-level 0x1 = Non-Fatal error
364 * Error-level 0x2 = Recoverable Fatal error
365 * Error-level 0x4 = UnRecoverable Fatal error
366 * - PEG-Halt Status-2 Register:
367 * (LSW = mb[3], MSW = mb[7])
368 */
369 protocol_engine_id = (mb[2] & 0xff);
370 fw_err_code = (((mb[2] & 0xff00) >> 8) |
371 ((mb[6] & 0x1fff) << 8));
372 err_level = ((mb[6] & 0xe000) >> 13);
373 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
374 "Register: protocol_engine_id=0x%x "
375 "fw_err_code=0x%x err_level=0x%x.\n",
376 protocol_engine_id, fw_err_code, err_level);
377 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
378 "Register: 0x%x%x.\n", mb[7], mb[3]);
379 if (err_level == ERR_LEVEL_NON_FATAL) {
380 ql_log(ql_log_warn, vha, 0x5063,
381 "Not a fatal error, f/w has recovered "
382 "iteself.\n");
383 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
384 ql_log(ql_log_fatal, vha, 0x5064,
385 "Recoverable Fatal error: Chip reset "
386 "required.\n");
387 qla83xx_schedule_work(vha,
388 QLA83XX_NIC_CORE_RESET);
389 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
390 ql_log(ql_log_fatal, vha, 0x5065,
391 "Unrecoverable Fatal error: Set FAILED "
392 "state, reboot required.\n");
393 qla83xx_schedule_work(vha,
394 QLA83XX_NIC_CORE_UNRECOVERABLE);
395 }
396 }
397
398 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
399 uint16_t peg_fw_state, nw_interface_link_up;
400 uint16_t nw_interface_signal_detect, sfp_status;
401 uint16_t htbt_counter, htbt_monitor_enable;
402 uint16_t sfp_additonal_info, sfp_multirate;
403 uint16_t sfp_tx_fault, link_speed, dcbx_status;
404
405 /*
406 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
407 * - PEG-to-FC Status Register:
408 * (LSW = mb[2], MSW = mb[6])
409 * Bits 0-7 = Peg-Firmware state
410 * Bit 8 = N/W Interface Link-up
411 * Bit 9 = N/W Interface signal detected
412 * Bits 10-11 = SFP Status
413 * SFP Status 0x0 = SFP+ transceiver not expected
414 * SFP Status 0x1 = SFP+ transceiver not present
415 * SFP Status 0x2 = SFP+ transceiver invalid
416 * SFP Status 0x3 = SFP+ transceiver present and
417 * valid
418 * Bits 12-14 = Heartbeat Counter
419 * Bit 15 = Heartbeat Monitor Enable
420 * Bits 16-17 = SFP Additional Info
421 * SFP info 0x0 = Unregocnized transceiver for
422 * Ethernet
423 * SFP info 0x1 = SFP+ brand validation failed
424 * SFP info 0x2 = SFP+ speed validation failed
425 * SFP info 0x3 = SFP+ access error
426 * Bit 18 = SFP Multirate
427 * Bit 19 = SFP Tx Fault
428 * Bits 20-22 = Link Speed
429 * Bits 23-27 = Reserved
430 * Bits 28-30 = DCBX Status
431 * DCBX Status 0x0 = DCBX Disabled
432 * DCBX Status 0x1 = DCBX Enabled
433 * DCBX Status 0x2 = DCBX Exchange error
434 * Bit 31 = Reserved
435 */
436 peg_fw_state = (mb[2] & 0x00ff);
437 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
438 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
439 sfp_status = ((mb[2] & 0x0c00) >> 10);
440 htbt_counter = ((mb[2] & 0x7000) >> 12);
441 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
442 sfp_additonal_info = (mb[6] & 0x0003);
443 sfp_multirate = ((mb[6] & 0x0004) >> 2);
444 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
445 link_speed = ((mb[6] & 0x0070) >> 4);
446 dcbx_status = ((mb[6] & 0x7000) >> 12);
447
448 ql_log(ql_log_warn, vha, 0x5066,
449 "Peg-to-Fc Status Register:\n"
450 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
451 "nw_interface_signal_detect=0x%x"
452 "\nsfp_statis=0x%x.\n ", peg_fw_state,
453 nw_interface_link_up, nw_interface_signal_detect,
454 sfp_status);
455 ql_log(ql_log_warn, vha, 0x5067,
456 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
457 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
458 htbt_counter, htbt_monitor_enable,
459 sfp_additonal_info, sfp_multirate);
460 ql_log(ql_log_warn, vha, 0x5068,
461 "sfp_tx_fault=0x%x, link_state=0x%x, "
462 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
463 dcbx_status);
464
465 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
466 }
467
468 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
469 ql_log(ql_log_warn, vha, 0x5069,
470 "Heartbeat Failure encountered, chip reset "
471 "required.\n");
472
473 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
474 }
475 }
476
477 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
478 ql_log(ql_log_info, vha, 0x506a,
479 "IDC Device-State changed = 0x%x.\n", mb[4]);
6c3943cd
SK
480 if (ha->flags.nic_core_reset_owner)
481 return;
7d613ac6
SV
482 qla83xx_schedule_work(vha, MBA_IDC_AEN);
483 }
484}
485
bb4cf5b7
CD
486int
487qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
488{
489 struct qla_hw_data *ha = vha->hw;
490 scsi_qla_host_t *vp;
491 uint32_t vp_did;
492 unsigned long flags;
493 int ret = 0;
494
495 if (!ha->num_vhosts)
496 return ret;
497
498 spin_lock_irqsave(&ha->vport_slock, flags);
499 list_for_each_entry(vp, &ha->vp_list, list) {
500 vp_did = vp->d_id.b24;
501 if (vp_did == rscn_entry) {
502 ret = 1;
503 break;
504 }
505 }
506 spin_unlock_irqrestore(&ha->vport_slock, flags);
507
508 return ret;
509}
510
1da177e4
LT
511/**
512 * qla2x00_async_event() - Process aynchronous events.
513 * @ha: SCSI driver HA context
9a853f71 514 * @mb: Mailbox registers (0 - 3)
1da177e4 515 */
2c3dfe3f 516void
73208dfd 517qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 518{
1da177e4 519 uint16_t handle_cnt;
bdab23da 520 uint16_t cnt, mbx;
1da177e4 521 uint32_t handles[5];
e315cd28 522 struct qla_hw_data *ha = vha->hw;
3d71644c 523 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 524 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 525 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4 526 uint32_t rscn_entry, host_pid;
4d4df193 527 unsigned long flags;
1da177e4
LT
528
529 /* Setup to process RIO completion. */
530 handle_cnt = 0;
6246b8a1 531 if (IS_CNA_CAPABLE(ha))
3a03eb79 532 goto skip_rio;
1da177e4
LT
533 switch (mb[0]) {
534 case MBA_SCSI_COMPLETION:
9a853f71 535 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
536 handle_cnt = 1;
537 break;
538 case MBA_CMPLT_1_16BIT:
9a853f71 539 handles[0] = mb[1];
1da177e4
LT
540 handle_cnt = 1;
541 mb[0] = MBA_SCSI_COMPLETION;
542 break;
543 case MBA_CMPLT_2_16BIT:
9a853f71
AV
544 handles[0] = mb[1];
545 handles[1] = mb[2];
1da177e4
LT
546 handle_cnt = 2;
547 mb[0] = MBA_SCSI_COMPLETION;
548 break;
549 case MBA_CMPLT_3_16BIT:
9a853f71
AV
550 handles[0] = mb[1];
551 handles[1] = mb[2];
552 handles[2] = mb[3];
1da177e4
LT
553 handle_cnt = 3;
554 mb[0] = MBA_SCSI_COMPLETION;
555 break;
556 case MBA_CMPLT_4_16BIT:
9a853f71
AV
557 handles[0] = mb[1];
558 handles[1] = mb[2];
559 handles[2] = mb[3];
1da177e4
LT
560 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
561 handle_cnt = 4;
562 mb[0] = MBA_SCSI_COMPLETION;
563 break;
564 case MBA_CMPLT_5_16BIT:
9a853f71
AV
565 handles[0] = mb[1];
566 handles[1] = mb[2];
567 handles[2] = mb[3];
1da177e4
LT
568 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
569 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
570 handle_cnt = 5;
571 mb[0] = MBA_SCSI_COMPLETION;
572 break;
573 case MBA_CMPLT_2_32BIT:
9a853f71 574 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
575 handles[1] = le32_to_cpu(
576 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
577 RD_MAILBOX_REG(ha, reg, 6));
578 handle_cnt = 2;
579 mb[0] = MBA_SCSI_COMPLETION;
580 break;
581 default:
582 break;
583 }
3a03eb79 584skip_rio:
1da177e4
LT
585 switch (mb[0]) {
586 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 587 if (!vha->flags.online)
1da177e4
LT
588 break;
589
590 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
591 qla2x00_process_completed_request(vha, rsp->req,
592 handles[cnt]);
1da177e4
LT
593 break;
594
595 case MBA_RESET: /* Reset */
7c3df132
SK
596 ql_dbg(ql_dbg_async, vha, 0x5002,
597 "Asynchronous RESET.\n");
1da177e4 598
e315cd28 599 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
600 break;
601
602 case MBA_SYSTEM_ERR: /* System Error */
6246b8a1
GM
603 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
604 RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 605 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
606 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
607 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 608
e315cd28 609 ha->isp_ops->fw_dump(vha, 1);
1da177e4 610
e428924c 611 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 612 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 613 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
614 "Unrecoverable Hardware Error: adapter "
615 "marked OFFLINE!\n");
e315cd28 616 vha->flags.online = 0;
6246b8a1 617 vha->device_flags |= DFLG_DEV_FAILED;
b1d46989 618 } else {
25985edc 619 /* Check to see if MPI timeout occurred */
b1d46989
MI
620 if ((mbx & MBX_3) && (ha->flags.port0))
621 set_bit(MPI_RESET_NEEDED,
622 &vha->dpc_flags);
623
e315cd28 624 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 625 }
9a853f71 626 } else if (mb[1] == 0) {
7c3df132 627 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
628 "Unrecoverable Hardware Error: adapter marked "
629 "OFFLINE!\n");
e315cd28 630 vha->flags.online = 0;
6246b8a1 631 vha->device_flags |= DFLG_DEV_FAILED;
1da177e4 632 } else
e315cd28 633 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
634 break;
635
636 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
637 ql_log(ql_log_warn, vha, 0x5006,
638 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 639
e315cd28 640 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
641 break;
642
643 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
644 ql_log(ql_log_warn, vha, 0x5007,
645 "ISP Response Transfer Error.\n");
1da177e4 646
e315cd28 647 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
648 break;
649
650 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
651 ql_dbg(ql_dbg_async, vha, 0x5008,
652 "Asynchronous WAKEUP_THRES.\n");
1da177e4 653
2d70c103 654 break;
1da177e4 655 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
cfb0919c 656 ql_dbg(ql_dbg_async, vha, 0x5009,
7c3df132 657 "LIP occurred (%x).\n", mb[1]);
1da177e4 658
e315cd28
AC
659 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
660 atomic_set(&vha->loop_state, LOOP_DOWN);
661 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
662 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
663 }
664
e315cd28
AC
665 if (vha->vp_idx) {
666 atomic_set(&vha->vp_state, VP_FAILED);
667 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
668 }
669
e315cd28
AC
670 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
671 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 672
e315cd28
AC
673 vha->flags.management_server_logged_in = 0;
674 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
675 break;
676
677 case MBA_LOOP_UP: /* Loop Up Event */
daae62a3 678 if (IS_QLA2100(ha) || IS_QLA2200(ha))
d8b45213 679 ha->link_data_rate = PORT_SPEED_1GB;
daae62a3 680 else
1da177e4 681 ha->link_data_rate = mb[1];
1da177e4 682
cfb0919c 683 ql_dbg(ql_dbg_async, vha, 0x500a,
daae62a3 684 "LOOP UP detected (%s Gbps).\n",
d0297c9a 685 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1da177e4 686
e315cd28
AC
687 vha->flags.management_server_logged_in = 0;
688 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
689 break;
690
691 case MBA_LOOP_DOWN: /* Loop Down Event */
6246b8a1
GM
692 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
693 ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 694 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
cfb0919c 695 ql_dbg(ql_dbg_async, vha, 0x500b,
7c3df132
SK
696 "LOOP DOWN detected (%x %x %x %x).\n",
697 mb[1], mb[2], mb[3], mbx);
1da177e4 698
e315cd28
AC
699 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
700 atomic_set(&vha->loop_state, LOOP_DOWN);
701 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
702 vha->device_flags |= DFLG_NO_CABLE;
703 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
704 }
705
e315cd28
AC
706 if (vha->vp_idx) {
707 atomic_set(&vha->vp_state, VP_FAILED);
708 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
709 }
710
e315cd28 711 vha->flags.management_server_logged_in = 0;
d8b45213 712 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 713 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
714 break;
715
716 case MBA_LIP_RESET: /* LIP reset occurred */
cfb0919c 717 ql_dbg(ql_dbg_async, vha, 0x500c,
cc3ef7bc 718 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 719
e315cd28
AC
720 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
721 atomic_set(&vha->loop_state, LOOP_DOWN);
722 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
723 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
724 }
725
e315cd28
AC
726 if (vha->vp_idx) {
727 atomic_set(&vha->vp_state, VP_FAILED);
728 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
729 }
730
e315cd28 731 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
732
733 ha->operating_mode = LOOP;
e315cd28
AC
734 vha->flags.management_server_logged_in = 0;
735 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
736 break;
737
3a03eb79 738 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
739 case MBA_POINT_TO_POINT: /* Point-to-Point */
740 if (IS_QLA2100(ha))
741 break;
742
6246b8a1 743 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
7c3df132
SK
744 ql_dbg(ql_dbg_async, vha, 0x500d,
745 "DCBX Completed -- %04x %04x %04x.\n",
746 mb[1], mb[2], mb[3]);
23f2ebd1
SR
747 if (ha->notify_dcbx_comp)
748 complete(&ha->dcbx_comp);
749
750 } else
7c3df132
SK
751 ql_dbg(ql_dbg_async, vha, 0x500e,
752 "Asynchronous P2P MODE received.\n");
1da177e4
LT
753
754 /*
755 * Until there's a transition from loop down to loop up, treat
756 * this as loop down only.
757 */
e315cd28
AC
758 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
759 atomic_set(&vha->loop_state, LOOP_DOWN);
760 if (!atomic_read(&vha->loop_down_timer))
761 atomic_set(&vha->loop_down_timer,
1da177e4 762 LOOP_DOWN_TIME);
e315cd28 763 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
764 }
765
e315cd28
AC
766 if (vha->vp_idx) {
767 atomic_set(&vha->vp_state, VP_FAILED);
768 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
769 }
770
e315cd28
AC
771 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
772 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
773
774 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
775 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
776
777 ha->flags.gpsc_supported = 1;
e315cd28 778 vha->flags.management_server_logged_in = 0;
1da177e4
LT
779 break;
780
781 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
782 if (IS_QLA2100(ha))
783 break;
784
cfb0919c 785 ql_dbg(ql_dbg_async, vha, 0x500f,
1da177e4
LT
786 "Configuration change detected: value=%x.\n", mb[1]);
787
e315cd28
AC
788 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 atomic_set(&vha->loop_state, LOOP_DOWN);
790 if (!atomic_read(&vha->loop_down_timer))
791 atomic_set(&vha->loop_down_timer,
1da177e4 792 LOOP_DOWN_TIME);
e315cd28 793 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
794 }
795
e315cd28
AC
796 if (vha->vp_idx) {
797 atomic_set(&vha->vp_state, VP_FAILED);
798 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
799 }
800
e315cd28
AC
801 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
802 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
803 break;
804
805 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
806 /*
807 * Handle only global and vn-port update events
808 *
809 * Relevant inputs:
810 * mb[1] = N_Port handle of changed port
811 * OR 0xffff for global event
812 * mb[2] = New login state
813 * 7 = Port logged out
814 * mb[3] = LSB is vp_idx, 0xff = all vps
815 *
816 * Skip processing if:
817 * Event is global, vp_idx is NOT all vps,
818 * vp_idx does not match
819 * Event is not global, vp_idx does not match
820 */
12cec63e
AV
821 if (IS_QLA2XXX_MIDTYPE(ha) &&
822 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
823 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
824 break;
73208dfd 825
9764ff88
AV
826 /* Global event -- port logout or port unavailable. */
827 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
828 ql_dbg(ql_dbg_async, vha, 0x5010,
829 "Port unavailable %04x %04x %04x.\n",
830 mb[1], mb[2], mb[3]);
daae62a3
CD
831 ql_log(ql_log_warn, vha, 0x505e,
832 "Link is offline.\n");
9764ff88
AV
833
834 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
835 atomic_set(&vha->loop_state, LOOP_DOWN);
836 atomic_set(&vha->loop_down_timer,
837 LOOP_DOWN_TIME);
838 vha->device_flags |= DFLG_NO_CABLE;
839 qla2x00_mark_all_devices_lost(vha, 1);
840 }
841
842 if (vha->vp_idx) {
843 atomic_set(&vha->vp_state, VP_FAILED);
844 fc_vport_set_state(vha->fc_vport,
845 FC_VPORT_FAILED);
faadc5e7 846 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
847 }
848
849 vha->flags.management_server_logged_in = 0;
850 ha->link_data_rate = PORT_SPEED_UNKNOWN;
851 break;
852 }
853
1da177e4 854 /*
cc3ef7bc 855 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
856 * event etc. earlier indicating loop is down) then process
857 * it. Otherwise ignore it and Wait for RSCN to come in.
858 */
e315cd28 859 atomic_set(&vha->loop_down_timer, 0);
79cc785f 860 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
7c3df132
SK
861 ql_dbg(ql_dbg_async, vha, 0x5011,
862 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
863 mb[1], mb[2], mb[3]);
2d70c103
NB
864
865 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
866 break;
867 }
868
7c3df132
SK
869 ql_dbg(ql_dbg_async, vha, 0x5012,
870 "Port database changed %04x %04x %04x.\n",
871 mb[1], mb[2], mb[3]);
daae62a3
CD
872 ql_log(ql_log_warn, vha, 0x505f,
873 "Link is operational (%s Gbps).\n",
d0297c9a 874 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1da177e4
LT
875
876 /*
877 * Mark all devices as missing so we will login again.
878 */
e315cd28 879 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 880
e315cd28 881 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 882
2d70c103
NB
883 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
884 set_bit(SCR_PENDING, &vha->dpc_flags);
885
e315cd28
AC
886 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
887 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2d70c103
NB
888
889 qlt_async_event(mb[0], vha, mb);
1da177e4
LT
890 break;
891
892 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 893 /* Check if the Vport has issued a SCR */
e315cd28 894 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
895 break;
896 /* Only handle SCNs for our Vport index. */
0d6e61bc 897 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 898 break;
0d6e61bc 899
7c3df132
SK
900 ql_dbg(ql_dbg_async, vha, 0x5013,
901 "RSCN database changed -- %04x %04x %04x.\n",
902 mb[1], mb[2], mb[3]);
1da177e4 903
59d72d87 904 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
905 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
906 | vha->d_id.b.al_pa;
1da177e4 907 if (rscn_entry == host_pid) {
7c3df132
SK
908 ql_dbg(ql_dbg_async, vha, 0x5014,
909 "Ignoring RSCN update to local host "
910 "port ID (%06x).\n", host_pid);
1da177e4
LT
911 break;
912 }
913
59d72d87
RA
914 /* Ignore reserved bits from RSCN-payload. */
915 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1da177e4 916
bb4cf5b7
CD
917 /* Skip RSCNs for virtual ports on the same physical port */
918 if (qla2x00_is_a_vp_did(vha, rscn_entry))
919 break;
920
e315cd28
AC
921 atomic_set(&vha->loop_down_timer, 0);
922 vha->flags.management_server_logged_in = 0;
1da177e4 923
e315cd28
AC
924 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
925 set_bit(RSCN_UPDATE, &vha->dpc_flags);
926 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
927 break;
928
929 /* case MBA_RIO_RESPONSE: */
930 case MBA_ZIO_RESPONSE:
7c3df132
SK
931 ql_dbg(ql_dbg_async, vha, 0x5015,
932 "[R|Z]IO update completion.\n");
1da177e4 933
e428924c 934 if (IS_FWI2_CAPABLE(ha))
2afa19a9 935 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 936 else
73208dfd 937 qla2x00_process_response_queue(rsp);
1da177e4 938 break;
9a853f71
AV
939
940 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
941 ql_dbg(ql_dbg_async, vha, 0x5016,
942 "Discard RND Frame -- %04x %04x %04x.\n",
943 mb[1], mb[2], mb[3]);
9a853f71 944 break;
45ebeb56
AV
945
946 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
947 ql_dbg(ql_dbg_async, vha, 0x5017,
948 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 949 break;
4d4df193
HK
950
951 case MBA_ISP84XX_ALERT:
7c3df132
SK
952 ql_dbg(ql_dbg_async, vha, 0x5018,
953 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
954 mb[1], mb[2], mb[3]);
4d4df193
HK
955
956 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
957 switch (mb[1]) {
958 case A84_PANIC_RECOVERY:
7c3df132
SK
959 ql_log(ql_log_info, vha, 0x5019,
960 "Alert 84XX: panic recovery %04x %04x.\n",
961 mb[2], mb[3]);
4d4df193
HK
962 break;
963 case A84_OP_LOGIN_COMPLETE:
964 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
965 ql_log(ql_log_info, vha, 0x501a,
966 "Alert 84XX: firmware version %x.\n",
967 ha->cs84xx->op_fw_version);
4d4df193
HK
968 break;
969 case A84_DIAG_LOGIN_COMPLETE:
970 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
971 ql_log(ql_log_info, vha, 0x501b,
972 "Alert 84XX: diagnostic firmware version %x.\n",
973 ha->cs84xx->diag_fw_version);
4d4df193
HK
974 break;
975 case A84_GOLD_LOGIN_COMPLETE:
976 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
977 ha->cs84xx->fw_update = 1;
7c3df132
SK
978 ql_log(ql_log_info, vha, 0x501c,
979 "Alert 84XX: gold firmware version %x.\n",
980 ha->cs84xx->gold_fw_version);
4d4df193
HK
981 break;
982 default:
7c3df132
SK
983 ql_log(ql_log_warn, vha, 0x501d,
984 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
985 mb[1], mb[2], mb[3]);
986 }
987 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
988 break;
3a03eb79 989 case MBA_DCBX_START:
7c3df132
SK
990 ql_dbg(ql_dbg_async, vha, 0x501e,
991 "DCBX Started -- %04x %04x %04x.\n",
992 mb[1], mb[2], mb[3]);
3a03eb79
AV
993 break;
994 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
995 ql_dbg(ql_dbg_async, vha, 0x501f,
996 "DCBX Parameters Updated -- %04x %04x %04x.\n",
997 mb[1], mb[2], mb[3]);
3a03eb79
AV
998 break;
999 case MBA_FCF_CONF_ERR:
7c3df132
SK
1000 ql_dbg(ql_dbg_async, vha, 0x5020,
1001 "FCF Configuration Error -- %04x %04x %04x.\n",
1002 mb[1], mb[2], mb[3]);
3a03eb79 1003 break;
3a03eb79 1004 case MBA_IDC_NOTIFY:
67b2a31f
CD
1005 if (IS_QLA8031(vha->hw)) {
1006 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1007 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1008 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1009 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
8fcd6b8b 1010 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
67b2a31f
CD
1011 /*
1012 * Extend loop down timer since port is active.
1013 */
1014 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1015 atomic_set(&vha->loop_down_timer,
1016 LOOP_DOWN_TIME);
8fcd6b8b
CD
1017 qla2xxx_wake_dpc(vha);
1018 }
67b2a31f 1019 }
8fcd6b8b 1020 case MBA_IDC_COMPLETE:
f356bef1
CD
1021 if (ha->notify_lb_portup_comp)
1022 complete(&ha->lb_portup_comp);
1023 /* Fallthru */
3a03eb79 1024 case MBA_IDC_TIME_EXT:
bf5b8ad7 1025 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
7d613ac6
SV
1026 qla81xx_idc_event(vha, mb[0], mb[1]);
1027 break;
1028
1029 case MBA_IDC_AEN:
1030 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1031 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1032 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1033 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1034 qla83xx_handle_8200_aen(vha, mb);
3a03eb79 1035 break;
7d613ac6 1036
6246b8a1
GM
1037 default:
1038 ql_dbg(ql_dbg_async, vha, 0x5057,
1039 "Unknown AEN:%04x %04x %04x %04x\n",
1040 mb[0], mb[1], mb[2], mb[3]);
1da177e4 1041 }
2c3dfe3f 1042
2d70c103
NB
1043 qlt_async_event(mb[0], vha, mb);
1044
e315cd28 1045 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 1046 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
1047}
1048
1049/**
1050 * qla2x00_process_completed_request() - Process a Fast Post response.
1051 * @ha: SCSI driver HA context
1052 * @index: SRB index
1053 */
8ae6d9c7 1054void
73208dfd 1055qla2x00_process_completed_request(struct scsi_qla_host *vha,
8ae6d9c7 1056 struct req_que *req, uint32_t index)
1da177e4
LT
1057{
1058 srb_t *sp;
e315cd28 1059 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1060
1061 /* Validate handle. */
8d93f550 1062 if (index >= req->num_outstanding_cmds) {
7c3df132
SK
1063 ql_log(ql_log_warn, vha, 0x3014,
1064 "Invalid SCSI command index (%x).\n", index);
1da177e4 1065
8f7daead
GM
1066 if (IS_QLA82XX(ha))
1067 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1068 else
1069 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1070 return;
1071 }
1072
e315cd28 1073 sp = req->outstanding_cmds[index];
1da177e4
LT
1074 if (sp) {
1075 /* Free outstanding command slot. */
e315cd28 1076 req->outstanding_cmds[index] = NULL;
1da177e4 1077
1da177e4 1078 /* Save ISP completion status */
9ba56b95 1079 sp->done(ha, sp, DID_OK << 16);
1da177e4 1080 } else {
7c3df132 1081 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 1082
8f7daead
GM
1083 if (IS_QLA82XX(ha))
1084 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1085 else
1086 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
1087 }
1088}
1089
8ae6d9c7 1090srb_t *
ac280b67
AV
1091qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1092 struct req_que *req, void *iocb)
1093{
1094 struct qla_hw_data *ha = vha->hw;
1095 sts_entry_t *pkt = iocb;
1096 srb_t *sp = NULL;
1097 uint16_t index;
1098
1099 index = LSW(pkt->handle);
8d93f550 1100 if (index >= req->num_outstanding_cmds) {
7c3df132
SK
1101 ql_log(ql_log_warn, vha, 0x5031,
1102 "Invalid command index (%x).\n", index);
8f7daead
GM
1103 if (IS_QLA82XX(ha))
1104 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1105 else
1106 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
1107 goto done;
1108 }
1109 sp = req->outstanding_cmds[index];
1110 if (!sp) {
7c3df132
SK
1111 ql_log(ql_log_warn, vha, 0x5032,
1112 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
1113 return sp;
1114 }
1115 if (sp->handle != index) {
7c3df132
SK
1116 ql_log(ql_log_warn, vha, 0x5033,
1117 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
1118 return NULL;
1119 }
9a069e19 1120
ac280b67 1121 req->outstanding_cmds[index] = NULL;
9a069e19 1122
ac280b67
AV
1123done:
1124 return sp;
1125}
1126
1127static void
1128qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1129 struct mbx_entry *mbx)
1130{
1131 const char func[] = "MBX-IOCB";
1132 const char *type;
ac280b67
AV
1133 fc_port_t *fcport;
1134 srb_t *sp;
4916392b 1135 struct srb_iocb *lio;
99b0bec7 1136 uint16_t *data;
5ff1d584 1137 uint16_t status;
ac280b67
AV
1138
1139 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1140 if (!sp)
1141 return;
1142
9ba56b95
GM
1143 lio = &sp->u.iocb_cmd;
1144 type = sp->name;
ac280b67 1145 fcport = sp->fcport;
4916392b 1146 data = lio->u.logio.data;
ac280b67 1147
5ff1d584 1148 data[0] = MBS_COMMAND_ERROR;
4916392b 1149 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1150 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1151 if (mbx->entry_status) {
7c3df132 1152 ql_dbg(ql_dbg_async, vha, 0x5043,
cfb0919c 1153 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
d3fa9e7d 1154 "entry-status=%x status=%x state-flag=%x "
cfb0919c
CD
1155 "status-flags=%x.\n", type, sp->handle,
1156 fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
1157 fcport->d_id.b.al_pa, mbx->entry_status,
1158 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 1159 le16_to_cpu(mbx->status_flags));
d3fa9e7d 1160
cfb0919c 1161 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
7c3df132 1162 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 1163
99b0bec7 1164 goto logio_done;
ac280b67
AV
1165 }
1166
5ff1d584 1167 status = le16_to_cpu(mbx->status);
9ba56b95 1168 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
5ff1d584
AV
1169 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1170 status = 0;
1171 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132 1172 ql_dbg(ql_dbg_async, vha, 0x5045,
cfb0919c
CD
1173 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1174 type, sp->handle, fcport->d_id.b.domain,
1175 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1176 le16_to_cpu(mbx->mb1));
ac280b67
AV
1177
1178 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1179 if (sp->type == SRB_LOGIN_CMD) {
99b0bec7
AV
1180 fcport->port_type = FCT_TARGET;
1181 if (le16_to_cpu(mbx->mb1) & BIT_0)
1182 fcport->port_type = FCT_INITIATOR;
6ac52608 1183 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 1184 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 1185 }
99b0bec7 1186 goto logio_done;
ac280b67
AV
1187 }
1188
1189 data[0] = le16_to_cpu(mbx->mb0);
1190 switch (data[0]) {
1191 case MBS_PORT_ID_USED:
1192 data[1] = le16_to_cpu(mbx->mb1);
1193 break;
1194 case MBS_LOOP_ID_USED:
1195 break;
1196 default:
1197 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1198 break;
1199 }
1200
7c3df132 1201 ql_log(ql_log_warn, vha, 0x5046,
cfb0919c
CD
1202 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1203 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1204 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1205 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
ac280b67 1206 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 1207 le16_to_cpu(mbx->mb7));
ac280b67 1208
99b0bec7 1209logio_done:
9ba56b95 1210 sp->done(vha, sp, 0);
ac280b67
AV
1211}
1212
9bc4f4fb
HZ
1213static void
1214qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1215 sts_entry_t *pkt, int iocb_type)
1216{
1217 const char func[] = "CT_IOCB";
1218 const char *type;
9bc4f4fb 1219 srb_t *sp;
9bc4f4fb
HZ
1220 struct fc_bsg_job *bsg_job;
1221 uint16_t comp_status;
9ba56b95 1222 int res;
9bc4f4fb
HZ
1223
1224 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1225 if (!sp)
1226 return;
1227
9ba56b95 1228 bsg_job = sp->u.bsg_job;
9bc4f4fb 1229
9ba56b95 1230 type = "ct pass-through";
9bc4f4fb
HZ
1231
1232 comp_status = le16_to_cpu(pkt->comp_status);
1233
1234 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1235 * fc payload to the caller
1236 */
1237 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1238 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1239
1240 if (comp_status != CS_COMPLETE) {
1241 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1242 res = DID_OK << 16;
9bc4f4fb
HZ
1243 bsg_job->reply->reply_payload_rcv_len =
1244 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1245
7c3df132
SK
1246 ql_log(ql_log_warn, vha, 0x5048,
1247 "CT pass-through-%s error "
9bc4f4fb 1248 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1249 type, comp_status,
1250 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1251 } else {
7c3df132
SK
1252 ql_log(ql_log_warn, vha, 0x5049,
1253 "CT pass-through-%s error "
1254 "comp_status-status=0x%x.\n", type, comp_status);
9ba56b95 1255 res = DID_ERROR << 16;
9bc4f4fb
HZ
1256 bsg_job->reply->reply_payload_rcv_len = 0;
1257 }
cfb0919c 1258 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
7c3df132 1259 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1260 } else {
9ba56b95 1261 res = DID_OK << 16;
9bc4f4fb
HZ
1262 bsg_job->reply->reply_payload_rcv_len =
1263 bsg_job->reply_payload.payload_len;
1264 bsg_job->reply_len = 0;
1265 }
1266
9ba56b95 1267 sp->done(vha, sp, res);
9bc4f4fb
HZ
1268}
1269
9a069e19
GM
1270static void
1271qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1272 struct sts_entry_24xx *pkt, int iocb_type)
1273{
1274 const char func[] = "ELS_CT_IOCB";
1275 const char *type;
9a069e19 1276 srb_t *sp;
9a069e19
GM
1277 struct fc_bsg_job *bsg_job;
1278 uint16_t comp_status;
1279 uint32_t fw_status[3];
1280 uint8_t* fw_sts_ptr;
9ba56b95 1281 int res;
9a069e19
GM
1282
1283 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1284 if (!sp)
1285 return;
9ba56b95 1286 bsg_job = sp->u.bsg_job;
9a069e19
GM
1287
1288 type = NULL;
9ba56b95 1289 switch (sp->type) {
9a069e19
GM
1290 case SRB_ELS_CMD_RPT:
1291 case SRB_ELS_CMD_HST:
1292 type = "els";
1293 break;
1294 case SRB_CT_CMD:
1295 type = "ct pass-through";
1296 break;
1297 default:
37fed3ee 1298 ql_dbg(ql_dbg_user, vha, 0x503e,
9ba56b95 1299 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
9a069e19
GM
1300 return;
1301 }
1302
1303 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1304 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1305 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1306
1307 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1308 * fc payload to the caller
1309 */
1310 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1311 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1312
1313 if (comp_status != CS_COMPLETE) {
1314 if (comp_status == CS_DATA_UNDERRUN) {
9ba56b95 1315 res = DID_OK << 16;
9a069e19 1316 bsg_job->reply->reply_payload_rcv_len =
9ba56b95 1317 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
9a069e19 1318
37fed3ee 1319 ql_dbg(ql_dbg_user, vha, 0x503f,
cfb0919c 1320 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1321 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
cfb0919c 1322 type, sp->handle, comp_status, fw_status[1], fw_status[2],
7c3df132
SK
1323 le16_to_cpu(((struct els_sts_entry_24xx *)
1324 pkt)->total_byte_count));
9a069e19
GM
1325 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1326 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1327 }
1328 else {
37fed3ee 1329 ql_dbg(ql_dbg_user, vha, 0x5040,
cfb0919c 1330 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
9a069e19 1331 "error subcode 1=0x%x error subcode 2=0x%x.\n",
cfb0919c 1332 type, sp->handle, comp_status,
7c3df132
SK
1333 le16_to_cpu(((struct els_sts_entry_24xx *)
1334 pkt)->error_subcode_1),
1335 le16_to_cpu(((struct els_sts_entry_24xx *)
1336 pkt)->error_subcode_2));
9ba56b95 1337 res = DID_ERROR << 16;
9a069e19
GM
1338 bsg_job->reply->reply_payload_rcv_len = 0;
1339 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1340 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1341 }
37fed3ee 1342 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
7c3df132 1343 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1344 }
1345 else {
9ba56b95 1346 res = DID_OK << 16;
9a069e19
GM
1347 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1348 bsg_job->reply_len = 0;
1349 }
1350
9ba56b95 1351 sp->done(vha, sp, res);
9a069e19
GM
1352}
1353
ac280b67
AV
1354static void
1355qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1356 struct logio_entry_24xx *logio)
1357{
1358 const char func[] = "LOGIO-IOCB";
1359 const char *type;
ac280b67
AV
1360 fc_port_t *fcport;
1361 srb_t *sp;
4916392b 1362 struct srb_iocb *lio;
99b0bec7 1363 uint16_t *data;
ac280b67
AV
1364 uint32_t iop[2];
1365
1366 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1367 if (!sp)
1368 return;
1369
9ba56b95
GM
1370 lio = &sp->u.iocb_cmd;
1371 type = sp->name;
ac280b67 1372 fcport = sp->fcport;
4916392b 1373 data = lio->u.logio.data;
ac280b67 1374
5ff1d584 1375 data[0] = MBS_COMMAND_ERROR;
4916392b 1376 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1377 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1378 if (logio->entry_status) {
5e19ed90 1379 ql_log(ql_log_warn, fcport->vha, 0x5034,
cfb0919c 1380 "Async-%s error entry - hdl=%x"
d3fa9e7d 1381 "portid=%02x%02x%02x entry-status=%x.\n",
cfb0919c
CD
1382 type, sp->handle, fcport->d_id.b.domain,
1383 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1384 logio->entry_status);
1385 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
7c3df132 1386 (uint8_t *)logio, sizeof(*logio));
ac280b67 1387
99b0bec7 1388 goto logio_done;
ac280b67
AV
1389 }
1390
1391 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
5e19ed90 1392 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
cfb0919c
CD
1393 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1394 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1395 fcport->d_id.b.area, fcport->d_id.b.al_pa,
7c3df132 1396 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1397
1398 data[0] = MBS_COMMAND_COMPLETE;
9ba56b95 1399 if (sp->type != SRB_LOGIN_CMD)
99b0bec7 1400 goto logio_done;
ac280b67
AV
1401
1402 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1403 if (iop[0] & BIT_4) {
1404 fcport->port_type = FCT_TARGET;
1405 if (iop[0] & BIT_8)
8474f3a0 1406 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1407 } else if (iop[0] & BIT_5)
ac280b67 1408 fcport->port_type = FCT_INITIATOR;
b0cd579c 1409
2d70c103
NB
1410 if (iop[0] & BIT_7)
1411 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1412
ac280b67
AV
1413 if (logio->io_parameter[7] || logio->io_parameter[8])
1414 fcport->supported_classes |= FC_COS_CLASS2;
1415 if (logio->io_parameter[9] || logio->io_parameter[10])
1416 fcport->supported_classes |= FC_COS_CLASS3;
1417
99b0bec7 1418 goto logio_done;
ac280b67
AV
1419 }
1420
1421 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1422 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1423 switch (iop[0]) {
1424 case LSC_SCODE_PORTID_USED:
1425 data[0] = MBS_PORT_ID_USED;
1426 data[1] = LSW(iop[1]);
1427 break;
1428 case LSC_SCODE_NPORT_USED:
1429 data[0] = MBS_LOOP_ID_USED;
1430 break;
ac280b67
AV
1431 default:
1432 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1433 break;
1434 }
1435
5e19ed90 1436 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
cfb0919c
CD
1437 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1438 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
d3fa9e7d 1439 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1440 le16_to_cpu(logio->comp_status),
1441 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1442 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1443
99b0bec7 1444logio_done:
9ba56b95 1445 sp->done(vha, sp, 0);
ac280b67
AV
1446}
1447
3822263e
MI
1448static void
1449qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1450 struct tsk_mgmt_entry *tsk)
1451{
1452 const char func[] = "TMF-IOCB";
1453 const char *type;
1454 fc_port_t *fcport;
1455 srb_t *sp;
1456 struct srb_iocb *iocb;
3822263e
MI
1457 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1458 int error = 1;
1459
1460 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1461 if (!sp)
1462 return;
1463
9ba56b95
GM
1464 iocb = &sp->u.iocb_cmd;
1465 type = sp->name;
3822263e
MI
1466 fcport = sp->fcport;
1467
1468 if (sts->entry_status) {
5e19ed90 1469 ql_log(ql_log_warn, fcport->vha, 0x5038,
cfb0919c
CD
1470 "Async-%s error - hdl=%x entry-status(%x).\n",
1471 type, sp->handle, sts->entry_status);
3822263e 1472 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
5e19ed90 1473 ql_log(ql_log_warn, fcport->vha, 0x5039,
cfb0919c
CD
1474 "Async-%s error - hdl=%x completion status(%x).\n",
1475 type, sp->handle, sts->comp_status);
3822263e
MI
1476 } else if (!(le16_to_cpu(sts->scsi_status) &
1477 SS_RESPONSE_INFO_LEN_VALID)) {
5e19ed90 1478 ql_log(ql_log_warn, fcport->vha, 0x503a,
cfb0919c
CD
1479 "Async-%s error - hdl=%x no response info(%x).\n",
1480 type, sp->handle, sts->scsi_status);
3822263e 1481 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
5e19ed90 1482 ql_log(ql_log_warn, fcport->vha, 0x503b,
cfb0919c
CD
1483 "Async-%s error - hdl=%x not enough response(%d).\n",
1484 type, sp->handle, sts->rsp_data_len);
3822263e 1485 } else if (sts->data[3]) {
5e19ed90 1486 ql_log(ql_log_warn, fcport->vha, 0x503c,
cfb0919c
CD
1487 "Async-%s error - hdl=%x response(%x).\n",
1488 type, sp->handle, sts->data[3]);
3822263e
MI
1489 } else {
1490 error = 0;
1491 }
1492
1493 if (error) {
1494 iocb->u.tmf.data = error;
7c3df132
SK
1495 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1496 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1497 }
1498
9ba56b95 1499 sp->done(vha, sp, 0);
3822263e
MI
1500}
1501
1da177e4
LT
1502/**
1503 * qla2x00_process_response_queue() - Process response queue entries.
1504 * @ha: SCSI driver HA context
1505 */
1506void
73208dfd 1507qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1508{
73208dfd
AC
1509 struct scsi_qla_host *vha;
1510 struct qla_hw_data *ha = rsp->hw;
3d71644c 1511 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1512 sts_entry_t *pkt;
1513 uint16_t handle_cnt;
1514 uint16_t cnt;
73208dfd 1515
2afa19a9 1516 vha = pci_get_drvdata(ha->pdev);
1da177e4 1517
e315cd28 1518 if (!vha->flags.online)
1da177e4
LT
1519 return;
1520
e315cd28
AC
1521 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1522 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1523
e315cd28
AC
1524 rsp->ring_index++;
1525 if (rsp->ring_index == rsp->length) {
1526 rsp->ring_index = 0;
1527 rsp->ring_ptr = rsp->ring;
1da177e4 1528 } else {
e315cd28 1529 rsp->ring_ptr++;
1da177e4
LT
1530 }
1531
1532 if (pkt->entry_status != 0) {
73208dfd 1533 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1534 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1535 wmb();
1536 continue;
1537 }
1538
1539 switch (pkt->entry_type) {
1540 case STATUS_TYPE:
73208dfd 1541 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1542 break;
1543 case STATUS_TYPE_21:
1544 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1545 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1546 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1547 ((sts21_entry_t *)pkt)->handle[cnt]);
1548 }
1549 break;
1550 case STATUS_TYPE_22:
1551 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1552 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1553 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1554 ((sts22_entry_t *)pkt)->handle[cnt]);
1555 }
1556 break;
1557 case STATUS_CONT_TYPE:
2afa19a9 1558 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1559 break;
ac280b67
AV
1560 case MBX_IOCB_TYPE:
1561 qla2x00_mbx_iocb_entry(vha, rsp->req,
1562 (struct mbx_entry *)pkt);
3822263e 1563 break;
9bc4f4fb
HZ
1564 case CT_IOCB_TYPE:
1565 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1566 break;
1da177e4
LT
1567 default:
1568 /* Type Not Supported. */
7c3df132
SK
1569 ql_log(ql_log_warn, vha, 0x504a,
1570 "Received unknown response pkt type %x "
1da177e4 1571 "entry status=%x.\n",
7c3df132 1572 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1573 break;
1574 }
1575 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1576 wmb();
1577 }
1578
1579 /* Adjust ring index */
e315cd28 1580 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1581}
1582
4733fcb1 1583static inline void
5544213b 1584qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
9ba56b95 1585 uint32_t sense_len, struct rsp_que *rsp, int res)
4733fcb1 1586{
7c3df132 1587 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95
GM
1588 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1589 uint32_t track_sense_len;
4733fcb1
AV
1590
1591 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1592 sense_len = SCSI_SENSE_BUFFERSIZE;
1593
9ba56b95
GM
1594 SET_CMD_SENSE_LEN(sp, sense_len);
1595 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1596 track_sense_len = sense_len;
1597
1598 if (sense_len > par_sense_len)
5544213b 1599 sense_len = par_sense_len;
4733fcb1
AV
1600
1601 memcpy(cp->sense_buffer, sense_data, sense_len);
1602
9ba56b95
GM
1603 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1604 track_sense_len -= sense_len;
1605 SET_CMD_SENSE_LEN(sp, track_sense_len);
1606
1607 if (track_sense_len != 0) {
2afa19a9 1608 rsp->status_srb = sp;
9ba56b95
GM
1609 cp->result = res;
1610 }
4733fcb1 1611
cfb0919c
CD
1612 if (sense_len) {
1613 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1614 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1615 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1616 cp);
7c3df132
SK
1617 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1618 cp->sense_buffer, sense_len);
cfb0919c 1619 }
4733fcb1
AV
1620}
1621
bad75002
AE
1622struct scsi_dif_tuple {
1623 __be16 guard; /* Checksum */
d6a03581 1624 __be16 app_tag; /* APPL identifier */
bad75002
AE
1625 __be32 ref_tag; /* Target LBA or indirect LBA */
1626};
1627
1628/*
1629 * Checks the guard or meta-data for the type of error
1630 * detected by the HBA. In case of errors, we set the
1631 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1632 * to indicate to the kernel that the HBA detected error.
1633 */
8cb2049c 1634static inline int
bad75002
AE
1635qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1636{
7c3df132 1637 struct scsi_qla_host *vha = sp->fcport->vha;
9ba56b95 1638 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
8cb2049c
AE
1639 uint8_t *ap = &sts24->data[12];
1640 uint8_t *ep = &sts24->data[20];
bad75002
AE
1641 uint32_t e_ref_tag, a_ref_tag;
1642 uint16_t e_app_tag, a_app_tag;
1643 uint16_t e_guard, a_guard;
1644
8cb2049c
AE
1645 /*
1646 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1647 * would make guard field appear at offset 2
1648 */
1649 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1650 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1651 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1652 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1653 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1654 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1655
7c3df132
SK
1656 ql_dbg(ql_dbg_io, vha, 0x3023,
1657 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1658
7c3df132
SK
1659 ql_dbg(ql_dbg_io, vha, 0x3024,
1660 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1661 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1662 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1663 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1664 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1665
8cb2049c
AE
1666 /*
1667 * Ignore sector if:
1668 * For type 3: ref & app tag is all 'f's
1669 * For type 0,1,2: app tag is all 'f's
1670 */
1671 if ((a_app_tag == 0xffff) &&
1672 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1673 (a_ref_tag == 0xffffffff))) {
1674 uint32_t blocks_done, resid;
1675 sector_t lba_s = scsi_get_lba(cmd);
1676
1677 /* 2TB boundary case covered automatically with this */
1678 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1679
1680 resid = scsi_bufflen(cmd) - (blocks_done *
1681 cmd->device->sector_size);
1682
1683 scsi_set_resid(cmd, resid);
1684 cmd->result = DID_OK << 16;
1685
1686 /* Update protection tag */
1687 if (scsi_prot_sg_count(cmd)) {
1688 uint32_t i, j = 0, k = 0, num_ent;
1689 struct scatterlist *sg;
1690 struct sd_dif_tuple *spt;
1691
1692 /* Patch the corresponding protection tags */
1693 scsi_for_each_prot_sg(cmd, sg,
1694 scsi_prot_sg_count(cmd), i) {
1695 num_ent = sg_dma_len(sg) / 8;
1696 if (k + num_ent < blocks_done) {
1697 k += num_ent;
1698 continue;
1699 }
1700 j = blocks_done - k - 1;
1701 k = blocks_done;
1702 break;
1703 }
1704
1705 if (k != blocks_done) {
cfb0919c 1706 ql_log(ql_log_warn, vha, 0x302f,
8ec9c7fb
RD
1707 "unexpected tag values tag:lba=%x:%llx)\n",
1708 e_ref_tag, (unsigned long long)lba_s);
8cb2049c
AE
1709 return 1;
1710 }
1711
1712 spt = page_address(sg_page(sg)) + sg->offset;
1713 spt += j;
1714
1715 spt->app_tag = 0xffff;
1716 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1717 spt->ref_tag = 0xffffffff;
1718 }
1719
1720 return 0;
1721 }
1722
bad75002
AE
1723 /* check guard */
1724 if (e_guard != a_guard) {
1725 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1726 0x10, 0x1);
1727 set_driver_byte(cmd, DRIVER_SENSE);
1728 set_host_byte(cmd, DID_ABORT);
1729 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1730 return 1;
bad75002
AE
1731 }
1732
e02587d7
AE
1733 /* check ref tag */
1734 if (e_ref_tag != a_ref_tag) {
bad75002 1735 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1736 0x10, 0x3);
bad75002
AE
1737 set_driver_byte(cmd, DRIVER_SENSE);
1738 set_host_byte(cmd, DID_ABORT);
1739 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1740 return 1;
bad75002
AE
1741 }
1742
e02587d7
AE
1743 /* check appl tag */
1744 if (e_app_tag != a_app_tag) {
bad75002 1745 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1746 0x10, 0x2);
bad75002
AE
1747 set_driver_byte(cmd, DRIVER_SENSE);
1748 set_host_byte(cmd, DID_ABORT);
1749 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1750 return 1;
bad75002 1751 }
e02587d7 1752
8cb2049c 1753 return 1;
bad75002
AE
1754}
1755
a9b6f722
SK
1756static void
1757qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1758 struct req_que *req, uint32_t index)
1759{
1760 struct qla_hw_data *ha = vha->hw;
1761 srb_t *sp;
1762 uint16_t comp_status;
1763 uint16_t scsi_status;
1764 uint16_t thread_id;
1765 uint32_t rval = EXT_STATUS_OK;
1766 struct fc_bsg_job *bsg_job = NULL;
1767 sts_entry_t *sts;
1768 struct sts_entry_24xx *sts24;
1769 sts = (sts_entry_t *) pkt;
1770 sts24 = (struct sts_entry_24xx *) pkt;
1771
1772 /* Validate handle. */
8d93f550 1773 if (index >= req->num_outstanding_cmds) {
a9b6f722
SK
1774 ql_log(ql_log_warn, vha, 0x70af,
1775 "Invalid SCSI completion handle 0x%x.\n", index);
1776 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1777 return;
1778 }
1779
1780 sp = req->outstanding_cmds[index];
1781 if (sp) {
1782 /* Free outstanding command slot. */
1783 req->outstanding_cmds[index] = NULL;
1784 bsg_job = sp->u.bsg_job;
1785 } else {
1786 ql_log(ql_log_warn, vha, 0x70b0,
1787 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1788 req->id, index);
1789
1790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1791 return;
1792 }
1793
1794 if (IS_FWI2_CAPABLE(ha)) {
1795 comp_status = le16_to_cpu(sts24->comp_status);
1796 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1797 } else {
1798 comp_status = le16_to_cpu(sts->comp_status);
1799 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1800 }
1801
1802 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1803 switch (comp_status) {
1804 case CS_COMPLETE:
1805 if (scsi_status == 0) {
1806 bsg_job->reply->reply_payload_rcv_len =
1807 bsg_job->reply_payload.payload_len;
1808 rval = EXT_STATUS_OK;
1809 }
1810 goto done;
1811
1812 case CS_DATA_OVERRUN:
1813 ql_dbg(ql_dbg_user, vha, 0x70b1,
1814 "Command completed with date overrun thread_id=%d\n",
1815 thread_id);
1816 rval = EXT_STATUS_DATA_OVERRUN;
1817 break;
1818
1819 case CS_DATA_UNDERRUN:
1820 ql_dbg(ql_dbg_user, vha, 0x70b2,
1821 "Command completed with date underrun thread_id=%d\n",
1822 thread_id);
1823 rval = EXT_STATUS_DATA_UNDERRUN;
1824 break;
1825 case CS_BIDIR_RD_OVERRUN:
1826 ql_dbg(ql_dbg_user, vha, 0x70b3,
1827 "Command completed with read data overrun thread_id=%d\n",
1828 thread_id);
1829 rval = EXT_STATUS_DATA_OVERRUN;
1830 break;
1831
1832 case CS_BIDIR_RD_WR_OVERRUN:
1833 ql_dbg(ql_dbg_user, vha, 0x70b4,
1834 "Command completed with read and write data overrun "
1835 "thread_id=%d\n", thread_id);
1836 rval = EXT_STATUS_DATA_OVERRUN;
1837 break;
1838
1839 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1840 ql_dbg(ql_dbg_user, vha, 0x70b5,
1841 "Command completed with read data over and write data "
1842 "underrun thread_id=%d\n", thread_id);
1843 rval = EXT_STATUS_DATA_OVERRUN;
1844 break;
1845
1846 case CS_BIDIR_RD_UNDERRUN:
1847 ql_dbg(ql_dbg_user, vha, 0x70b6,
1848 "Command completed with read data data underrun "
1849 "thread_id=%d\n", thread_id);
1850 rval = EXT_STATUS_DATA_UNDERRUN;
1851 break;
1852
1853 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1854 ql_dbg(ql_dbg_user, vha, 0x70b7,
1855 "Command completed with read data under and write data "
1856 "overrun thread_id=%d\n", thread_id);
1857 rval = EXT_STATUS_DATA_UNDERRUN;
1858 break;
1859
1860 case CS_BIDIR_RD_WR_UNDERRUN:
1861 ql_dbg(ql_dbg_user, vha, 0x70b8,
1862 "Command completed with read and write data underrun "
1863 "thread_id=%d\n", thread_id);
1864 rval = EXT_STATUS_DATA_UNDERRUN;
1865 break;
1866
1867 case CS_BIDIR_DMA:
1868 ql_dbg(ql_dbg_user, vha, 0x70b9,
1869 "Command completed with data DMA error thread_id=%d\n",
1870 thread_id);
1871 rval = EXT_STATUS_DMA_ERR;
1872 break;
1873
1874 case CS_TIMEOUT:
1875 ql_dbg(ql_dbg_user, vha, 0x70ba,
1876 "Command completed with timeout thread_id=%d\n",
1877 thread_id);
1878 rval = EXT_STATUS_TIMEOUT;
1879 break;
1880 default:
1881 ql_dbg(ql_dbg_user, vha, 0x70bb,
1882 "Command completed with completion status=0x%x "
1883 "thread_id=%d\n", comp_status, thread_id);
1884 rval = EXT_STATUS_ERR;
1885 break;
1886 }
1887 bsg_job->reply->reply_payload_rcv_len = 0;
1888
1889done:
1890 /* Return the vendor specific reply to API */
1891 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1892 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1893 /* Always return DID_OK, bsg will send the vendor specific response
1894 * in this case only */
1895 sp->done(vha, sp, (DID_OK << 6));
1896
1897}
1898
1da177e4
LT
1899/**
1900 * qla2x00_status_entry() - Process a Status IOCB entry.
1901 * @ha: SCSI driver HA context
1902 * @pkt: Entry pointer
1903 */
1904static void
73208dfd 1905qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1906{
1da177e4 1907 srb_t *sp;
1da177e4
LT
1908 fc_port_t *fcport;
1909 struct scsi_cmnd *cp;
9a853f71
AV
1910 sts_entry_t *sts;
1911 struct sts_entry_24xx *sts24;
1da177e4
LT
1912 uint16_t comp_status;
1913 uint16_t scsi_status;
b7d2280c 1914 uint16_t ox_id;
1da177e4
LT
1915 uint8_t lscsi_status;
1916 int32_t resid;
5544213b
AV
1917 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1918 fw_resid_len;
9a853f71 1919 uint8_t *rsp_info, *sense_data;
e315cd28 1920 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1921 uint32_t handle;
1922 uint16_t que;
1923 struct req_que *req;
b7d2280c 1924 int logit = 1;
9ba56b95 1925 int res = 0;
a9b6f722 1926 uint16_t state_flags = 0;
9a853f71
AV
1927
1928 sts = (sts_entry_t *) pkt;
1929 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1930 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1931 comp_status = le16_to_cpu(sts24->comp_status);
1932 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
a9b6f722 1933 state_flags = le16_to_cpu(sts24->state_flags);
9a853f71
AV
1934 } else {
1935 comp_status = le16_to_cpu(sts->comp_status);
1936 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1937 }
2afa19a9
AC
1938 handle = (uint32_t) LSW(sts->handle);
1939 que = MSW(sts->handle);
1940 req = ha->req_q_map[que];
a9083016 1941
1da177e4 1942 /* Validate handle. */
8d93f550 1943 if (handle < req->num_outstanding_cmds)
2afa19a9 1944 sp = req->outstanding_cmds[handle];
8d93f550 1945 else
1da177e4
LT
1946 sp = NULL;
1947
1948 if (sp == NULL) {
cfb0919c 1949 ql_dbg(ql_dbg_io, vha, 0x3017,
7c3df132 1950 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1951
8f7daead
GM
1952 if (IS_QLA82XX(ha))
1953 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1954 else
1955 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1956 qla2xxx_wake_dpc(vha);
1da177e4
LT
1957 return;
1958 }
a9b6f722
SK
1959
1960 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1961 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1962 return;
1963 }
1964
1965 /* Fast path completion. */
1966 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3c290d0b 1967 qla2x00_do_host_ramp_up(vha);
a9b6f722
SK
1968 qla2x00_process_completed_request(vha, req, handle);
1969
1970 return;
1971 }
1972
1973 req->outstanding_cmds[handle] = NULL;
9ba56b95 1974 cp = GET_CMD_SP(sp);
1da177e4 1975 if (cp == NULL) {
cfb0919c 1976 ql_dbg(ql_dbg_io, vha, 0x3018,
7c3df132
SK
1977 "Command already returned (0x%x/%p).\n",
1978 sts->handle, sp);
1da177e4
LT
1979
1980 return;
1981 }
1982
8ae6d9c7 1983 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1984
bdf79621 1985 fcport = sp->fcport;
1da177e4 1986
b7d2280c 1987 ox_id = 0;
5544213b
AV
1988 sense_len = par_sense_len = rsp_info_len = resid_len =
1989 fw_resid_len = 0;
e428924c 1990 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
1991 if (scsi_status & SS_SENSE_LEN_VALID)
1992 sense_len = le32_to_cpu(sts24->sense_len);
1993 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1994 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1995 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1996 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1997 if (comp_status == CS_DATA_UNDERRUN)
1998 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1999 rsp_info = sts24->data;
2000 sense_data = sts24->data;
2001 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 2002 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 2003 par_sense_len = sizeof(sts24->data);
9a853f71 2004 } else {
0f00a206
LC
2005 if (scsi_status & SS_SENSE_LEN_VALID)
2006 sense_len = le16_to_cpu(sts->req_sense_length);
2007 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2008 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
2009 resid_len = le32_to_cpu(sts->residual_length);
2010 rsp_info = sts->rsp_info;
2011 sense_data = sts->req_sense_data;
5544213b 2012 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
2013 }
2014
1da177e4
LT
2015 /* Check for any FCP transport errors. */
2016 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 2017 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 2018 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 2019 sense_data += rsp_info_len;
5544213b
AV
2020 par_sense_len -= rsp_info_len;
2021 }
9a853f71 2022 if (rsp_info_len > 3 && rsp_info[3]) {
5e19ed90 2023 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
7c3df132
SK
2024 "FCP I/O protocol failure (0x%x/0x%x).\n",
2025 rsp_info_len, rsp_info[3]);
1da177e4 2026
9ba56b95 2027 res = DID_BUS_BUSY << 16;
b7d2280c 2028 goto out;
1da177e4
LT
2029 }
2030 }
2031
3e8ce320
AV
2032 /* Check for overrun. */
2033 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2034 scsi_status & SS_RESIDUAL_OVER)
2035 comp_status = CS_DATA_OVERRUN;
2036
1da177e4
LT
2037 /*
2038 * Based on Host and scsi status generate status code for Linux
2039 */
2040 switch (comp_status) {
2041 case CS_COMPLETE:
df7baa50 2042 case CS_QUEUE_FULL:
1da177e4 2043 if (scsi_status == 0) {
9ba56b95 2044 res = DID_OK << 16;
1da177e4
LT
2045 break;
2046 }
2047 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 2048 resid = resid_len;
385d70b4 2049 scsi_set_resid(cp, resid);
0da69df1
AV
2050
2051 if (!lscsi_status &&
385d70b4 2052 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 2053 cp->underflow)) {
5e19ed90 2054 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
7c3df132 2055 "Mid-layer underflow "
b7d2280c 2056 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2057 resid, scsi_bufflen(cp));
0da69df1 2058
9ba56b95 2059 res = DID_ERROR << 16;
0da69df1
AV
2060 break;
2061 }
1da177e4 2062 }
9ba56b95 2063 res = DID_OK << 16 | lscsi_status;
1da177e4 2064
df7baa50 2065 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2066 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
7c3df132 2067 "QUEUE FULL detected.\n");
df7baa50
AV
2068 break;
2069 }
b7d2280c 2070 logit = 0;
1da177e4
LT
2071 if (lscsi_status != SS_CHECK_CONDITION)
2072 break;
2073
b80ca4f7 2074 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2075 if (!(scsi_status & SS_SENSE_LEN_VALID))
2076 break;
2077
5544213b 2078 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
9ba56b95 2079 rsp, res);
1da177e4
LT
2080 break;
2081
2082 case CS_DATA_UNDERRUN:
ed17c71b 2083 /* Use F/W calculated residual length. */
0f00a206
LC
2084 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2085 scsi_set_resid(cp, resid);
2086 if (scsi_status & SS_RESIDUAL_UNDER) {
2087 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
5e19ed90 2088 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
7c3df132
SK
2089 "Dropped frame(s) detected "
2090 "(0x%x of 0x%x bytes).\n",
2091 resid, scsi_bufflen(cp));
0f00a206 2092
9ba56b95 2093 res = DID_ERROR << 16 | lscsi_status;
4e85e3d9 2094 goto check_scsi_status;
6acf8190 2095 }
ed17c71b 2096
0f00a206
LC
2097 if (!lscsi_status &&
2098 ((unsigned)(scsi_bufflen(cp) - resid) <
2099 cp->underflow)) {
5e19ed90 2100 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
7c3df132 2101 "Mid-layer underflow "
b7d2280c 2102 "detected (0x%x of 0x%x bytes).\n",
7c3df132 2103 resid, scsi_bufflen(cp));
e038a1be 2104
9ba56b95 2105 res = DID_ERROR << 16;
0f00a206
LC
2106 break;
2107 }
4aee5766
GM
2108 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2109 lscsi_status != SAM_STAT_BUSY) {
2110 /*
2111 * scsi status of task set and busy are considered to be
2112 * task not completed.
2113 */
2114
5e19ed90 2115 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
7c3df132 2116 "Dropped frame(s) detected (0x%x "
4aee5766
GM
2117 "of 0x%x bytes).\n", resid,
2118 scsi_bufflen(cp));
0f00a206 2119
9ba56b95 2120 res = DID_ERROR << 16 | lscsi_status;
0374f55e 2121 goto check_scsi_status;
4aee5766
GM
2122 } else {
2123 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2124 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2125 scsi_status, lscsi_status);
1da177e4
LT
2126 }
2127
9ba56b95 2128 res = DID_OK << 16 | lscsi_status;
b7d2280c 2129 logit = 0;
0f00a206 2130
0374f55e 2131check_scsi_status:
1da177e4 2132 /*
fa2a1ce5 2133 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
2134 * Status.
2135 */
2136 if (lscsi_status != 0) {
ffec28a3 2137 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
5e19ed90 2138 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
7c3df132 2139 "QUEUE FULL detected.\n");
b7d2280c 2140 logit = 1;
ffec28a3
AV
2141 break;
2142 }
1da177e4
LT
2143 if (lscsi_status != SS_CHECK_CONDITION)
2144 break;
2145
b80ca4f7 2146 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
2147 if (!(scsi_status & SS_SENSE_LEN_VALID))
2148 break;
2149
5544213b 2150 qla2x00_handle_sense(sp, sense_data, par_sense_len,
9ba56b95 2151 sense_len, rsp, res);
1da177e4
LT
2152 }
2153 break;
2154
1da177e4
LT
2155 case CS_PORT_LOGGED_OUT:
2156 case CS_PORT_CONFIG_CHG:
2157 case CS_PORT_BUSY:
2158 case CS_INCOMPLETE:
2159 case CS_PORT_UNAVAILABLE:
b7d2280c 2160 case CS_TIMEOUT:
ff454b01
CD
2161 case CS_RESET:
2162
056a4483
MC
2163 /*
2164 * We are going to have the fc class block the rport
2165 * while we try to recover so instruct the mid layer
2166 * to requeue until the class decides how to handle this.
2167 */
9ba56b95 2168 res = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
2169
2170 if (comp_status == CS_TIMEOUT) {
2171 if (IS_FWI2_CAPABLE(ha))
2172 break;
2173 else if ((le16_to_cpu(sts->status_flags) &
2174 SF_LOGOUT_SENT) == 0)
2175 break;
2176 }
2177
5e19ed90 2178 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
7c3df132
SK
2179 "Port down status: port-state=0x%x.\n",
2180 atomic_read(&fcport->state));
b7d2280c 2181
a7a28504 2182 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 2183 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
2184 break;
2185
1da177e4 2186 case CS_ABORTED:
9ba56b95 2187 res = DID_RESET << 16;
1da177e4 2188 break;
bad75002
AE
2189
2190 case CS_DIF_ERROR:
8cb2049c 2191 logit = qla2x00_handle_dif_error(sp, sts24);
fb6e4668 2192 res = cp->result;
bad75002 2193 break;
9e522cd8
AE
2194
2195 case CS_TRANSPORT:
2196 res = DID_ERROR << 16;
2197
2198 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2199 break;
2200
2201 if (state_flags & BIT_4)
2202 scmd_printk(KERN_WARNING, cp,
2203 "Unsupported device '%s' found.\n",
2204 cp->device->vendor);
2205 break;
2206
1da177e4 2207 default:
9ba56b95 2208 res = DID_ERROR << 16;
1da177e4
LT
2209 break;
2210 }
2211
b7d2280c
AV
2212out:
2213 if (logit)
5e19ed90 2214 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
7c3df132 2215 "FCP command status: 0x%x-0x%x (0x%x) "
cfb0919c
CD
2216 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2217 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
7c3df132 2218 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
9ba56b95 2219 comp_status, scsi_status, res, vha->host_no,
cfb0919c
CD
2220 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2221 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2222 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2223 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2224 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
7c3df132 2225 resid_len, fw_resid_len);
b7d2280c 2226
3c290d0b
CD
2227 if (!res)
2228 qla2x00_do_host_ramp_up(vha);
2229
2afa19a9 2230 if (rsp->status_srb == NULL)
9ba56b95 2231 sp->done(ha, sp, res);
1da177e4
LT
2232}
2233
2234/**
2235 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2236 * @ha: SCSI driver HA context
2237 * @pkt: Entry pointer
2238 *
2239 * Extended sense data.
2240 */
2241static void
2afa19a9 2242qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4 2243{
9ba56b95 2244 uint8_t sense_sz = 0;
2afa19a9 2245 struct qla_hw_data *ha = rsp->hw;
7c3df132 2246 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
9ba56b95 2247 srb_t *sp = rsp->status_srb;
1da177e4 2248 struct scsi_cmnd *cp;
9ba56b95
GM
2249 uint32_t sense_len;
2250 uint8_t *sense_ptr;
1da177e4 2251
9ba56b95
GM
2252 if (!sp || !GET_CMD_SENSE_LEN(sp))
2253 return;
1da177e4 2254
9ba56b95
GM
2255 sense_len = GET_CMD_SENSE_LEN(sp);
2256 sense_ptr = GET_CMD_SENSE_PTR(sp);
1da177e4 2257
9ba56b95
GM
2258 cp = GET_CMD_SP(sp);
2259 if (cp == NULL) {
2260 ql_log(ql_log_warn, vha, 0x3025,
2261 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1da177e4 2262
9ba56b95
GM
2263 rsp->status_srb = NULL;
2264 return;
1da177e4 2265 }
1da177e4 2266
9ba56b95
GM
2267 if (sense_len > sizeof(pkt->data))
2268 sense_sz = sizeof(pkt->data);
2269 else
2270 sense_sz = sense_len;
c4631191 2271
9ba56b95
GM
2272 /* Move sense data. */
2273 if (IS_FWI2_CAPABLE(ha))
2274 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2275 memcpy(sense_ptr, pkt->data, sense_sz);
2276 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2277 sense_ptr, sense_sz);
c4631191 2278
9ba56b95
GM
2279 sense_len -= sense_sz;
2280 sense_ptr += sense_sz;
c4631191 2281
9ba56b95
GM
2282 SET_CMD_SENSE_PTR(sp, sense_ptr);
2283 SET_CMD_SENSE_LEN(sp, sense_len);
2284
2285 /* Place command on done queue. */
2286 if (sense_len == 0) {
2287 rsp->status_srb = NULL;
2288 sp->done(ha, sp, cp->result);
c4631191 2289 }
c4631191
GM
2290}
2291
1da177e4
LT
2292/**
2293 * qla2x00_error_entry() - Process an error entry.
2294 * @ha: SCSI driver HA context
2295 * @pkt: Entry pointer
2296 */
2297static void
73208dfd 2298qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
2299{
2300 srb_t *sp;
e315cd28 2301 struct qla_hw_data *ha = vha->hw;
c4631191 2302 const char func[] = "ERROR-IOCB";
2afa19a9 2303 uint16_t que = MSW(pkt->handle);
a6fe35c0 2304 struct req_que *req = NULL;
9ba56b95 2305 int res = DID_ERROR << 16;
7c3df132 2306
9ba56b95
GM
2307 ql_dbg(ql_dbg_async, vha, 0x502a,
2308 "type of error status in response: 0x%x\n", pkt->entry_status);
2309
a6fe35c0
AE
2310 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2311 goto fatal;
2312
2313 req = ha->req_q_map[que];
2314
9ba56b95
GM
2315 if (pkt->entry_status & RF_BUSY)
2316 res = DID_BUS_BUSY << 16;
1da177e4 2317
c4631191 2318 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
a6fe35c0 2319 if (sp) {
9ba56b95 2320 sp->done(ha, sp, res);
a6fe35c0 2321 return;
1da177e4 2322 }
a6fe35c0
AE
2323fatal:
2324 ql_log(ql_log_warn, vha, 0x5030,
2325 "Error entry - invalid handle/queue.\n");
2326
2327 if (IS_QLA82XX(ha))
2328 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2329 else
2330 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2331 qla2xxx_wake_dpc(vha);
1da177e4
LT
2332}
2333
9a853f71
AV
2334/**
2335 * qla24xx_mbx_completion() - Process mailbox command completions.
2336 * @ha: SCSI driver HA context
2337 * @mb0: Mailbox0 register
2338 */
2339static void
e315cd28 2340qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
2341{
2342 uint16_t cnt;
4fa94f83 2343 uint32_t mboxes;
9a853f71 2344 uint16_t __iomem *wptr;
e315cd28 2345 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
2346 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2347
4fa94f83
AV
2348 /* Read all mbox registers? */
2349 mboxes = (1 << ha->mbx_count) - 1;
2350 if (!ha->mcp)
a720101d 2351 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
4fa94f83
AV
2352 else
2353 mboxes = ha->mcp->in_mb;
2354
9a853f71
AV
2355 /* Load return mailbox registers. */
2356 ha->flags.mbox_int = 1;
2357 ha->mailbox_out[0] = mb0;
4fa94f83 2358 mboxes >>= 1;
9a853f71
AV
2359 wptr = (uint16_t __iomem *)&reg->mailbox1;
2360
2361 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
4fa94f83
AV
2362 if (mboxes & BIT_0)
2363 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2364
2365 mboxes >>= 1;
9a853f71
AV
2366 wptr++;
2367 }
9a853f71
AV
2368}
2369
2370/**
2371 * qla24xx_process_response_queue() - Process response queue entries.
2372 * @ha: SCSI driver HA context
2373 */
2afa19a9
AC
2374void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2375 struct rsp_que *rsp)
9a853f71 2376{
9a853f71 2377 struct sts_entry_24xx *pkt;
a9083016 2378 struct qla_hw_data *ha = vha->hw;
9a853f71 2379
e315cd28 2380 if (!vha->flags.online)
9a853f71
AV
2381 return;
2382
e315cd28
AC
2383 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2384 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2385
e315cd28
AC
2386 rsp->ring_index++;
2387 if (rsp->ring_index == rsp->length) {
2388 rsp->ring_index = 0;
2389 rsp->ring_ptr = rsp->ring;
9a853f71 2390 } else {
e315cd28 2391 rsp->ring_ptr++;
9a853f71
AV
2392 }
2393
2394 if (pkt->entry_status != 0) {
73208dfd 2395 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2d70c103
NB
2396
2397 (void)qlt_24xx_process_response_error(vha, pkt);
2398
9a853f71
AV
2399 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2400 wmb();
2401 continue;
2402 }
2403
2404 switch (pkt->entry_type) {
2405 case STATUS_TYPE:
73208dfd 2406 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2407 break;
2408 case STATUS_CONT_TYPE:
2afa19a9 2409 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2410 break;
2c3dfe3f 2411 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2412 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2413 (struct vp_rpt_id_entry_24xx *)pkt);
2414 break;
ac280b67
AV
2415 case LOGINOUT_PORT_IOCB_TYPE:
2416 qla24xx_logio_entry(vha, rsp->req,
2417 (struct logio_entry_24xx *)pkt);
2418 break;
3822263e
MI
2419 case TSK_MGMT_IOCB_TYPE:
2420 qla24xx_tm_iocb_entry(vha, rsp->req,
2421 (struct tsk_mgmt_entry *)pkt);
2422 break;
9a069e19
GM
2423 case CT_IOCB_TYPE:
2424 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
9a069e19
GM
2425 break;
2426 case ELS_IOCB_TYPE:
2427 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2428 break;
2d70c103
NB
2429 case ABTS_RECV_24XX:
2430 /* ensure that the ATIO queue is empty */
2431 qlt_24xx_process_atio_queue(vha);
2432 case ABTS_RESP_24XX:
2433 case CTIO_TYPE7:
2434 case NOTIFY_ACK_TYPE:
2435 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2436 break;
54883291
SK
2437 case MARKER_TYPE:
2438 /* Do nothing in this case, this check is to prevent it
2439 * from falling into default case
2440 */
2441 break;
9a853f71
AV
2442 default:
2443 /* Type Not Supported. */
7c3df132
SK
2444 ql_dbg(ql_dbg_async, vha, 0x5042,
2445 "Received unknown response pkt type %x "
9a853f71 2446 "entry status=%x.\n",
7c3df132 2447 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2448 break;
2449 }
2450 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2451 wmb();
2452 }
2453
2454 /* Adjust ring index */
a9083016
GM
2455 if (IS_QLA82XX(ha)) {
2456 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2457 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2458 } else
2459 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2460}
2461
05236a05 2462static void
e315cd28 2463qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2464{
2465 int rval;
2466 uint32_t cnt;
e315cd28 2467 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2468 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2469
6246b8a1 2470 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
05236a05
AV
2471 return;
2472
2473 rval = QLA_SUCCESS;
2474 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2475 RD_REG_DWORD(&reg->iobase_addr);
2476 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2477 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2478 rval == QLA_SUCCESS; cnt--) {
2479 if (cnt) {
2480 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2481 udelay(10);
2482 } else
2483 rval = QLA_FUNCTION_TIMEOUT;
2484 }
2485 if (rval == QLA_SUCCESS)
2486 goto next_test;
2487
2488 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2489 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2490 rval == QLA_SUCCESS; cnt--) {
2491 if (cnt) {
2492 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2493 udelay(10);
2494 } else
2495 rval = QLA_FUNCTION_TIMEOUT;
2496 }
2497 if (rval != QLA_SUCCESS)
2498 goto done;
2499
2500next_test:
2501 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2502 ql_log(ql_log_info, vha, 0x504c,
2503 "Additional code -- 0x55AA.\n");
05236a05
AV
2504
2505done:
2506 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2507 RD_REG_DWORD(&reg->iobase_window);
2508}
2509
9a853f71 2510/**
6246b8a1 2511 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
9a853f71
AV
2512 * @irq:
2513 * @dev_id: SCSI driver HA context
9a853f71
AV
2514 *
2515 * Called by system whenever the host adapter generates an interrupt.
2516 *
2517 * Returns handled flag.
2518 */
2519irqreturn_t
7d12e780 2520qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2521{
e315cd28
AC
2522 scsi_qla_host_t *vha;
2523 struct qla_hw_data *ha;
9a853f71
AV
2524 struct device_reg_24xx __iomem *reg;
2525 int status;
9a853f71
AV
2526 unsigned long iter;
2527 uint32_t stat;
2528 uint32_t hccr;
7d613ac6 2529 uint16_t mb[8];
e315cd28 2530 struct rsp_que *rsp;
43fac4d9 2531 unsigned long flags;
9a853f71 2532
e315cd28
AC
2533 rsp = (struct rsp_que *) dev_id;
2534 if (!rsp) {
3256b435
CD
2535 ql_log(ql_log_info, NULL, 0x5059,
2536 "%s: NULL response queue pointer.\n", __func__);
9a853f71
AV
2537 return IRQ_NONE;
2538 }
2539
e315cd28 2540 ha = rsp->hw;
9a853f71
AV
2541 reg = &ha->iobase->isp24;
2542 status = 0;
2543
85880801
AV
2544 if (unlikely(pci_channel_offline(ha->pdev)))
2545 return IRQ_HANDLED;
2546
43fac4d9 2547 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2548 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2549 for (iter = 50; iter--; ) {
2550 stat = RD_REG_DWORD(&reg->host_status);
2551 if (stat & HSRX_RISC_PAUSED) {
85880801 2552 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2553 break;
2554
9a853f71
AV
2555 hccr = RD_REG_DWORD(&reg->hccr);
2556
7c3df132
SK
2557 ql_log(ql_log_warn, vha, 0x504b,
2558 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2559 hccr);
05236a05 2560
e315cd28 2561 qla2xxx_check_risc_status(vha);
05236a05 2562
e315cd28
AC
2563 ha->isp_ops->fw_dump(vha, 1);
2564 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2565 break;
2566 } else if ((stat & HSRX_RISC_INT) == 0)
2567 break;
2568
2569 switch (stat & 0xff) {
fafbda9f
AE
2570 case INTR_ROM_MB_SUCCESS:
2571 case INTR_ROM_MB_FAILED:
2572 case INTR_MB_SUCCESS:
2573 case INTR_MB_FAILED:
e315cd28 2574 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2575 status |= MBX_INTERRUPT;
2576
2577 break;
fafbda9f 2578 case INTR_ASYNC_EVENT:
9a853f71
AV
2579 mb[0] = MSW(stat);
2580 mb[1] = RD_REG_WORD(&reg->mailbox1);
2581 mb[2] = RD_REG_WORD(&reg->mailbox2);
2582 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2583 qla2x00_async_event(vha, rsp, mb);
9a853f71 2584 break;
fafbda9f
AE
2585 case INTR_RSP_QUE_UPDATE:
2586 case INTR_RSP_QUE_UPDATE_83XX:
2afa19a9 2587 qla24xx_process_response_queue(vha, rsp);
9a853f71 2588 break;
fafbda9f 2589 case INTR_ATIO_QUE_UPDATE:
2d70c103
NB
2590 qlt_24xx_process_atio_queue(vha);
2591 break;
fafbda9f 2592 case INTR_ATIO_RSP_QUE_UPDATE:
2d70c103
NB
2593 qlt_24xx_process_atio_queue(vha);
2594 qla24xx_process_response_queue(vha, rsp);
2595 break;
9a853f71 2596 default:
7c3df132
SK
2597 ql_dbg(ql_dbg_async, vha, 0x504f,
2598 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2599 break;
2600 }
2601 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2602 RD_REG_DWORD_RELAXED(&reg->hccr);
cb860bbd
GM
2603 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2604 ndelay(3500);
9a853f71 2605 }
36439832 2606 qla2x00_handle_mbx_completion(ha, status);
43fac4d9 2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71 2608
9a853f71
AV
2609 return IRQ_HANDLED;
2610}
2611
a8488abe
AV
2612static irqreturn_t
2613qla24xx_msix_rsp_q(int irq, void *dev_id)
2614{
e315cd28
AC
2615 struct qla_hw_data *ha;
2616 struct rsp_que *rsp;
a8488abe 2617 struct device_reg_24xx __iomem *reg;
2afa19a9 2618 struct scsi_qla_host *vha;
0f19bc68 2619 unsigned long flags;
a8488abe 2620
e315cd28
AC
2621 rsp = (struct rsp_que *) dev_id;
2622 if (!rsp) {
3256b435
CD
2623 ql_log(ql_log_info, NULL, 0x505a,
2624 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2625 return IRQ_NONE;
2626 }
2627 ha = rsp->hw;
a8488abe
AV
2628 reg = &ha->iobase->isp24;
2629
0f19bc68 2630 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2631
a67093d4 2632 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2633 qla24xx_process_response_queue(vha, rsp);
3155754a 2634 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2635 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2636 RD_REG_DWORD_RELAXED(&reg->hccr);
2637 }
0f19bc68 2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2639
2640 return IRQ_HANDLED;
2641}
2642
68ca949c
AC
2643static irqreturn_t
2644qla25xx_msix_rsp_q(int irq, void *dev_id)
2645{
2646 struct qla_hw_data *ha;
2647 struct rsp_que *rsp;
3155754a 2648 struct device_reg_24xx __iomem *reg;
0f19bc68 2649 unsigned long flags;
68ca949c
AC
2650
2651 rsp = (struct rsp_que *) dev_id;
2652 if (!rsp) {
3256b435
CD
2653 ql_log(ql_log_info, NULL, 0x505b,
2654 "%s: NULL response queue pointer.\n", __func__);
68ca949c
AC
2655 return IRQ_NONE;
2656 }
2657 ha = rsp->hw;
2658
3155754a 2659 /* Clear the interrupt, if enabled, for this response queue */
d424754c 2660 if (!ha->flags.disable_msix_handshake) {
3155754a 2661 reg = &ha->iobase->isp24;
0f19bc68 2662 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2663 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2664 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2666 }
68ca949c
AC
2667 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2668
2669 return IRQ_HANDLED;
2670}
2671
a8488abe
AV
2672static irqreturn_t
2673qla24xx_msix_default(int irq, void *dev_id)
2674{
e315cd28
AC
2675 scsi_qla_host_t *vha;
2676 struct qla_hw_data *ha;
2677 struct rsp_que *rsp;
a8488abe
AV
2678 struct device_reg_24xx __iomem *reg;
2679 int status;
a8488abe
AV
2680 uint32_t stat;
2681 uint32_t hccr;
7d613ac6 2682 uint16_t mb[8];
0f19bc68 2683 unsigned long flags;
a8488abe 2684
e315cd28
AC
2685 rsp = (struct rsp_que *) dev_id;
2686 if (!rsp) {
3256b435
CD
2687 ql_log(ql_log_info, NULL, 0x505c,
2688 "%s: NULL response queue pointer.\n", __func__);
e315cd28
AC
2689 return IRQ_NONE;
2690 }
2691 ha = rsp->hw;
a8488abe
AV
2692 reg = &ha->iobase->isp24;
2693 status = 0;
2694
0f19bc68 2695 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2696 vha = pci_get_drvdata(ha->pdev);
87f27015 2697 do {
a8488abe
AV
2698 stat = RD_REG_DWORD(&reg->host_status);
2699 if (stat & HSRX_RISC_PAUSED) {
85880801 2700 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2701 break;
2702
a8488abe
AV
2703 hccr = RD_REG_DWORD(&reg->hccr);
2704
7c3df132
SK
2705 ql_log(ql_log_info, vha, 0x5050,
2706 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2707 hccr);
05236a05 2708
e315cd28 2709 qla2xxx_check_risc_status(vha);
05236a05 2710
e315cd28
AC
2711 ha->isp_ops->fw_dump(vha, 1);
2712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2713 break;
2714 } else if ((stat & HSRX_RISC_INT) == 0)
2715 break;
2716
2717 switch (stat & 0xff) {
fafbda9f
AE
2718 case INTR_ROM_MB_SUCCESS:
2719 case INTR_ROM_MB_FAILED:
2720 case INTR_MB_SUCCESS:
2721 case INTR_MB_FAILED:
e315cd28 2722 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2723 status |= MBX_INTERRUPT;
2724
2725 break;
fafbda9f 2726 case INTR_ASYNC_EVENT:
a8488abe
AV
2727 mb[0] = MSW(stat);
2728 mb[1] = RD_REG_WORD(&reg->mailbox1);
2729 mb[2] = RD_REG_WORD(&reg->mailbox2);
2730 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2731 qla2x00_async_event(vha, rsp, mb);
a8488abe 2732 break;
fafbda9f
AE
2733 case INTR_RSP_QUE_UPDATE:
2734 case INTR_RSP_QUE_UPDATE_83XX:
2afa19a9 2735 qla24xx_process_response_queue(vha, rsp);
a8488abe 2736 break;
fafbda9f 2737 case INTR_ATIO_QUE_UPDATE:
2d70c103
NB
2738 qlt_24xx_process_atio_queue(vha);
2739 break;
fafbda9f 2740 case INTR_ATIO_RSP_QUE_UPDATE:
2d70c103
NB
2741 qlt_24xx_process_atio_queue(vha);
2742 qla24xx_process_response_queue(vha, rsp);
2743 break;
a8488abe 2744 default:
7c3df132
SK
2745 ql_dbg(ql_dbg_async, vha, 0x5051,
2746 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2747 break;
2748 }
2749 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2750 } while (0);
36439832 2751 qla2x00_handle_mbx_completion(ha, status);
0f19bc68 2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe 2753
a8488abe
AV
2754 return IRQ_HANDLED;
2755}
2756
2757/* Interrupt handling helpers. */
2758
2759struct qla_init_msix_entry {
a8488abe 2760 const char *name;
476834c2 2761 irq_handler_t handler;
a8488abe
AV
2762};
2763
68ca949c 2764static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2765 { "qla2xxx (default)", qla24xx_msix_default },
2766 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2767 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2768};
2769
a9083016
GM
2770static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2771 { "qla2xxx (default)", qla82xx_msix_default },
2772 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2773};
2774
aa230bc5
AE
2775static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2776 { "qla2xxx (default)", qla24xx_msix_default },
2777 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2778 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2779};
2780
a8488abe 2781static void
e315cd28 2782qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2783{
2784 int i;
2785 struct qla_msix_entry *qentry;
7c3df132 2786 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2787
73208dfd
AC
2788 for (i = 0; i < ha->msix_count; i++) {
2789 qentry = &ha->msix_entries[i];
a8488abe 2790 if (qentry->have_irq)
73208dfd 2791 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2792 }
2793 pci_disable_msix(ha->pdev);
73208dfd
AC
2794 kfree(ha->msix_entries);
2795 ha->msix_entries = NULL;
2796 ha->flags.msix_enabled = 0;
7c3df132
SK
2797 ql_dbg(ql_dbg_init, vha, 0x0042,
2798 "Disabled the MSI.\n");
a8488abe
AV
2799}
2800
2801static int
73208dfd 2802qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2803{
ad038fa8 2804#define MIN_MSIX_COUNT 2
a8488abe 2805 int i, ret;
73208dfd 2806 struct msix_entry *entries;
a8488abe 2807 struct qla_msix_entry *qentry;
7c3df132 2808 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2809
2810 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2811 GFP_KERNEL);
7c3df132
SK
2812 if (!entries) {
2813 ql_log(ql_log_warn, vha, 0x00bc,
2814 "Failed to allocate memory for msix_entry.\n");
73208dfd 2815 return -ENOMEM;
7c3df132 2816 }
a8488abe 2817
73208dfd
AC
2818 for (i = 0; i < ha->msix_count; i++)
2819 entries[i].entry = i;
a8488abe 2820
73208dfd 2821 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2822 if (ret) {
ad038fa8
LC
2823 if (ret < MIN_MSIX_COUNT)
2824 goto msix_failed;
2825
7c3df132
SK
2826 ql_log(ql_log_warn, vha, 0x00c6,
2827 "MSI-X: Failed to enable support "
2828 "-- %d/%d\n Retry with %d vectors.\n",
2829 ha->msix_count, ret, ret);
73208dfd
AC
2830 ha->msix_count = ret;
2831 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2832 if (ret) {
ad038fa8 2833msix_failed:
7c3df132
SK
2834 ql_log(ql_log_fatal, vha, 0x00c7,
2835 "MSI-X: Failed to enable support, "
2836 "giving up -- %d/%d.\n",
2837 ha->msix_count, ret);
73208dfd
AC
2838 goto msix_out;
2839 }
2afa19a9 2840 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2841 }
2842 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2843 ha->msix_count, GFP_KERNEL);
2844 if (!ha->msix_entries) {
7c3df132
SK
2845 ql_log(ql_log_fatal, vha, 0x00c8,
2846 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2847 ret = -ENOMEM;
a8488abe
AV
2848 goto msix_out;
2849 }
2850 ha->flags.msix_enabled = 1;
2851
73208dfd
AC
2852 for (i = 0; i < ha->msix_count; i++) {
2853 qentry = &ha->msix_entries[i];
2854 qentry->vector = entries[i].vector;
2855 qentry->entry = entries[i].entry;
a8488abe 2856 qentry->have_irq = 0;
73208dfd 2857 qentry->rsp = NULL;
a8488abe
AV
2858 }
2859
2afa19a9 2860 /* Enable MSI-X vectors for the base queue */
aa230bc5 2861 for (i = 0; i < ha->msix_count; i++) {
2afa19a9 2862 qentry = &ha->msix_entries[i];
aa230bc5
AE
2863 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2864 ret = request_irq(qentry->vector,
2865 qla83xx_msix_entries[i].handler,
2866 0, qla83xx_msix_entries[i].name, rsp);
2867 } else if (IS_QLA82XX(ha)) {
a9083016
GM
2868 ret = request_irq(qentry->vector,
2869 qla82xx_msix_entries[i].handler,
2870 0, qla82xx_msix_entries[i].name, rsp);
2871 } else {
2872 ret = request_irq(qentry->vector,
2873 msix_entries[i].handler,
2874 0, msix_entries[i].name, rsp);
2875 }
2afa19a9 2876 if (ret) {
7c3df132
SK
2877 ql_log(ql_log_fatal, vha, 0x00cb,
2878 "MSI-X: unable to register handler -- %x/%d.\n",
2879 qentry->vector, ret);
2afa19a9
AC
2880 qla24xx_disable_msix(ha);
2881 ha->mqenable = 0;
2882 goto msix_out;
2883 }
2884 qentry->have_irq = 1;
2885 qentry->rsp = rsp;
2886 rsp->msix = qentry;
73208dfd 2887 }
73208dfd
AC
2888
2889 /* Enable MSI-X vector for response queue update for queue 0 */
6246b8a1
GM
2890 if (IS_QLA83XX(ha)) {
2891 if (ha->msixbase && ha->mqiobase &&
2892 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2893 ha->mqenable = 1;
2894 } else
2895 if (ha->mqiobase
2896 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2897 ha->mqenable = 1;
7c3df132
SK
2898 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2899 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2900 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2901 ql_dbg(ql_dbg_init, vha, 0x0055,
2902 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2903 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2904
a8488abe 2905msix_out:
73208dfd 2906 kfree(entries);
a8488abe
AV
2907 return ret;
2908}
2909
2910int
73208dfd 2911qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2912{
2913 int ret;
963b0fdd 2914 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2915 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2916
2917 /* If possible, enable MSI-X. */
6246b8a1 2918 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
8ae6d9c7 2919 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
6377a7ae
BH
2920 goto skip_msi;
2921
2922 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2923 (ha->pdev->subsystem_device == 0x7040 ||
2924 ha->pdev->subsystem_device == 0x7041 ||
2925 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2926 ql_log(ql_log_warn, vha, 0x0034,
2927 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2928 ha->pdev->subsystem_vendor,
7c3df132 2929 ha->pdev->subsystem_device);
6377a7ae
BH
2930 goto skip_msi;
2931 }
a8488abe 2932
42cd4f5d 2933 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
7c3df132
SK
2934 ql_log(ql_log_warn, vha, 0x0035,
2935 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
42cd4f5d 2936 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
a8488abe
AV
2937 goto skip_msix;
2938 }
2939
73208dfd 2940 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2941 if (!ret) {
7c3df132
SK
2942 ql_dbg(ql_dbg_init, vha, 0x0036,
2943 "MSI-X: Enabled (0x%X, 0x%X).\n",
2944 ha->chip_revision, ha->fw_attributes);
963b0fdd 2945 goto clear_risc_ints;
a8488abe 2946 }
7c3df132
SK
2947 ql_log(ql_log_info, vha, 0x0037,
2948 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2949skip_msix:
cbedb601 2950
3a03eb79 2951 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
8ae6d9c7 2952 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
cbedb601
AV
2953 goto skip_msi;
2954
2955 ret = pci_enable_msi(ha->pdev);
2956 if (!ret) {
7c3df132
SK
2957 ql_dbg(ql_dbg_init, vha, 0x0038,
2958 "MSI: Enabled.\n");
cbedb601 2959 ha->flags.msi_enabled = 1;
a9083016 2960 } else
7c3df132
SK
2961 ql_log(ql_log_warn, vha, 0x0039,
2962 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
a033b655
GM
2963
2964 /* Skip INTx on ISP82xx. */
2965 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2966 return QLA_FUNCTION_FAILED;
2967
cbedb601
AV
2968skip_msi:
2969
fd34f556 2970 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2971 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2972 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2973 if (ret) {
7c3df132 2974 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2975 "Failed to reserve interrupt %d already in use.\n",
2976 ha->pdev->irq);
963b0fdd 2977 goto fail;
8ae6d9c7 2978 } else if (!ha->flags.msi_enabled) {
68d91cbd
SK
2979 ql_dbg(ql_dbg_init, vha, 0x0125,
2980 "INTa mode: Enabled.\n");
8ae6d9c7
GM
2981 ha->flags.mr_intr_valid = 1;
2982 }
7992abfc 2983
963b0fdd
AV
2984clear_risc_ints:
2985
c6952483 2986 spin_lock_irq(&ha->hardware_lock);
c1114953 2987 if (!IS_FWI2_CAPABLE(ha))
963b0fdd 2988 WRT_REG_WORD(&reg->isp.semaphore, 0);
c6952483 2989 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2990
963b0fdd 2991fail:
a8488abe
AV
2992 return ret;
2993}
2994
2995void
e315cd28 2996qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2997{
e315cd28 2998 struct qla_hw_data *ha = vha->hw;
9a347ff4
CD
2999 struct rsp_que *rsp;
3000
3001 /*
3002 * We need to check that ha->rsp_q_map is valid in case we are called
3003 * from a probe failure context.
3004 */
3005 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3006 return;
3007 rsp = ha->rsp_q_map[0];
a8488abe
AV
3008
3009 if (ha->flags.msix_enabled)
3010 qla24xx_disable_msix(ha);
90a86fc0 3011 else if (ha->flags.msi_enabled) {
e315cd28 3012 free_irq(ha->pdev->irq, rsp);
cbedb601 3013 pci_disable_msi(ha->pdev);
90a86fc0
JC
3014 } else
3015 free_irq(ha->pdev->irq, rsp);
a8488abe 3016}
e315cd28 3017
73208dfd
AC
3018
3019int qla25xx_request_irq(struct rsp_que *rsp)
3020{
3021 struct qla_hw_data *ha = rsp->hw;
2afa19a9 3022 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 3023 struct qla_msix_entry *msix = rsp->msix;
7c3df132 3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
3025 int ret;
3026
3027 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3028 if (ret) {
7c3df132
SK
3029 ql_log(ql_log_fatal, vha, 0x00e6,
3030 "MSI-X: Unable to register handler -- %x/%d.\n",
3031 msix->vector, ret);
73208dfd
AC
3032 return ret;
3033 }
3034 msix->have_irq = 1;
3035 msix->rsp = rsp;
3036 return ret;
3037}