[SCSI] qla2xxx: T10 DIF - Fix incorrect error reporting.
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
07e264b7 3 * Copyright (c) 2003-2011 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
05236a05 9#include <linux/delay.h>
5a0e3ad6 10#include <linux/slab.h>
df7baa50 11#include <scsi/scsi_tcq.h>
9a069e19 12#include <scsi/scsi_bsg_fc.h>
bad75002 13#include <scsi/scsi_eh.h>
df7baa50 14
1da177e4 15static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
16static void qla2x00_process_completed_request(struct scsi_qla_host *,
17 struct req_que *, uint32_t);
18static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
2afa19a9 19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
73208dfd
AC
20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *);
9a853f71 22
1da177e4
LT
23/**
24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @irq:
26 * @dev_id: SCSI driver HA context
1da177e4
LT
27 *
28 * Called by system whenever the host adapter generates an interrupt.
29 *
30 * Returns handled flag.
31 */
32irqreturn_t
7d12e780 33qla2100_intr_handler(int irq, void *dev_id)
1da177e4 34{
e315cd28
AC
35 scsi_qla_host_t *vha;
36 struct qla_hw_data *ha;
3d71644c 37 struct device_reg_2xxx __iomem *reg;
1da177e4 38 int status;
1da177e4 39 unsigned long iter;
14e660e6 40 uint16_t hccr;
9a853f71 41 uint16_t mb[4];
e315cd28 42 struct rsp_que *rsp;
43fac4d9 43 unsigned long flags;
1da177e4 44
e315cd28
AC
45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) {
1da177e4 47 printk(KERN_INFO
7c3df132 48 "%s(): NULL response queue pointer.\n", __func__);
1da177e4
LT
49 return (IRQ_NONE);
50 }
51
e315cd28 52 ha = rsp->hw;
3d71644c 53 reg = &ha->iobase->isp;
1da177e4
LT
54 status = 0;
55
43fac4d9 56 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 57 vha = pci_get_drvdata(ha->pdev);
1da177e4 58 for (iter = 50; iter--; ) {
14e660e6
SJ
59 hccr = RD_REG_WORD(&reg->hccr);
60 if (hccr & HCCR_RISC_PAUSE) {
61 if (pci_channel_offline(ha->pdev))
62 break;
63
64 /*
65 * Issue a "HARD" reset in order for the RISC interrupt
a06a0f8e 66 * bit to be cleared. Schedule a big hammer to get
14e660e6
SJ
67 * out of the RISC PAUSED state.
68 */
69 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70 RD_REG_WORD(&reg->hccr);
71
e315cd28
AC
72 ha->isp_ops->fw_dump(vha, 1);
73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
74 break;
75 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
76 break;
77
78 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80 RD_REG_WORD(&reg->hccr);
81
82 /* Get mailbox data. */
9a853f71
AV
83 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 85 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 86 status |= MBX_INTERRUPT;
9a853f71
AV
87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 91 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
92 } else {
93 /*EMPTY*/
7c3df132
SK
94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "Unrecognized interrupt type (%d).\n",
96 mb[0]);
1da177e4
LT
97 }
98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0);
100 RD_REG_WORD(&reg->semaphore);
101 } else {
73208dfd 102 qla2x00_process_response_queue(rsp);
1da177e4
LT
103
104 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105 RD_REG_WORD(&reg->hccr);
106 }
107 }
43fac4d9 108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 109
1da177e4
LT
110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 113 complete(&ha->mbx_intr_comp);
1da177e4
LT
114 }
115
1da177e4
LT
116 return (IRQ_HANDLED);
117}
118
119/**
120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121 * @irq:
122 * @dev_id: SCSI driver HA context
1da177e4
LT
123 *
124 * Called by system whenever the host adapter generates an interrupt.
125 *
126 * Returns handled flag.
127 */
128irqreturn_t
7d12e780 129qla2300_intr_handler(int irq, void *dev_id)
1da177e4 130{
e315cd28 131 scsi_qla_host_t *vha;
3d71644c 132 struct device_reg_2xxx __iomem *reg;
1da177e4 133 int status;
1da177e4
LT
134 unsigned long iter;
135 uint32_t stat;
1da177e4 136 uint16_t hccr;
9a853f71 137 uint16_t mb[4];
e315cd28
AC
138 struct rsp_que *rsp;
139 struct qla_hw_data *ha;
43fac4d9 140 unsigned long flags;
1da177e4 141
e315cd28
AC
142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) {
1da177e4 144 printk(KERN_INFO
7c3df132 145 "%s(): NULL response queue pointer.\n", __func__);
1da177e4
LT
146 return (IRQ_NONE);
147 }
148
e315cd28 149 ha = rsp->hw;
3d71644c 150 reg = &ha->iobase->isp;
1da177e4
LT
151 status = 0;
152
43fac4d9 153 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 154 vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
155 for (iter = 50; iter--; ) {
156 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157 if (stat & HSR_RISC_PAUSED) {
85880801 158 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
159 break;
160
1da177e4
LT
161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
7c3df132
SK
163 ql_log(ql_log_warn, vha, 0x5026,
164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
1da177e4 166 else
7c3df132
SK
167 ql_log(ql_log_warn, vha, 0x5027,
168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
1da177e4
LT
170
171 /*
172 * Issue a "HARD" reset in order for the RISC
173 * interrupt bit to be cleared. Schedule a big
a06a0f8e 174 * hammer to get out of the RISC PAUSED state.
1da177e4
LT
175 */
176 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
177 RD_REG_WORD(&reg->hccr);
07f31805 178
e315cd28
AC
179 ha->isp_ops->fw_dump(vha, 1);
180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
181 break;
182 } else if ((stat & HSR_RISC_INT) == 0)
183 break;
184
1da177e4 185 switch (stat & 0xff) {
1da177e4
LT
186 case 0x1:
187 case 0x2:
188 case 0x10:
189 case 0x11:
e315cd28 190 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
191 status |= MBX_INTERRUPT;
192
193 /* Release mailbox registers. */
194 WRT_REG_WORD(&reg->semaphore, 0);
195 break;
196 case 0x12:
9a853f71
AV
197 mb[0] = MSW(stat);
198 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
199 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
200 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 201 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
202 break;
203 case 0x13:
73208dfd 204 qla2x00_process_response_queue(rsp);
1da177e4
LT
205 break;
206 case 0x15:
9a853f71
AV
207 mb[0] = MBA_CMPLT_1_16BIT;
208 mb[1] = MSW(stat);
73208dfd 209 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
210 break;
211 case 0x16:
9a853f71
AV
212 mb[0] = MBA_SCSI_COMPLETION;
213 mb[1] = MSW(stat);
214 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 215 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
216 break;
217 default:
7c3df132
SK
218 ql_dbg(ql_dbg_async, vha, 0x5028,
219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
1da177e4
LT
220 break;
221 }
222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
223 RD_REG_WORD_RELAXED(&reg->hccr);
224 }
43fac4d9 225 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 226
1da177e4
LT
227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 230 complete(&ha->mbx_intr_comp);
1da177e4
LT
231 }
232
1da177e4
LT
233 return (IRQ_HANDLED);
234}
235
236/**
237 * qla2x00_mbx_completion() - Process mailbox command completions.
238 * @ha: SCSI driver HA context
239 * @mb0: Mailbox0 register
240 */
241static void
e315cd28 242qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
243{
244 uint16_t cnt;
245 uint16_t __iomem *wptr;
e315cd28 246 struct qla_hw_data *ha = vha->hw;
3d71644c 247 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
248
249 /* Load return mailbox registers. */
250 ha->flags.mbox_int = 1;
251 ha->mailbox_out[0] = mb0;
252 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
253
254 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 255 if (IS_QLA2200(ha) && cnt == 8)
1da177e4
LT
256 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
257 if (cnt == 4 || cnt == 5)
258 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
259 else
260 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 261
1da177e4
LT
262 wptr++;
263 }
264
265 if (ha->mcp) {
7c3df132
SK
266 ql_dbg(ql_dbg_async, vha, 0x5000,
267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
1da177e4 268 } else {
7c3df132
SK
269 ql_dbg(ql_dbg_async, vha, 0x5001,
270 "MBX pointer ERROR.\n");
1da177e4
LT
271 }
272}
273
8a659571
AV
274static void
275qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
276{
277 static char *event[] =
278 { "Complete", "Request Notification", "Time Extension" };
279 int rval;
280 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
281 uint16_t __iomem *wptr;
282 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
283
284 /* Seed data -- mailbox1 -> mailbox7. */
285 wptr = (uint16_t __iomem *)&reg24->mailbox1;
286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
287 mb[cnt] = RD_REG_WORD(wptr);
288
7c3df132
SK
289 ql_dbg(ql_dbg_async, vha, 0x5021,
290 "Inter-Driver Commucation %s -- "
291 "%04x %04x %04x %04x %04x %04x %04x.\n",
292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
293 mb[4], mb[5], mb[6]);
8a659571
AV
294
295 /* Acknowledgement needed? [Notify && non-zero timeout]. */
296 timeout = (descr >> 8) & 0xf;
297 if (aen != MBA_IDC_NOTIFY || !timeout)
298 return;
299
7c3df132
SK
300 ql_dbg(ql_dbg_async, vha, 0x5022,
301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout);
8a659571
AV
303
304 rval = qla2x00_post_idc_ack_work(vha, mb);
305 if (rval != QLA_SUCCESS)
7c3df132 306 ql_log(ql_log_warn, vha, 0x5023,
8a659571
AV
307 "IDC failed to post ACK.\n");
308}
309
1da177e4
LT
310/**
311 * qla2x00_async_event() - Process aynchronous events.
312 * @ha: SCSI driver HA context
9a853f71 313 * @mb: Mailbox registers (0 - 3)
1da177e4 314 */
2c3dfe3f 315void
73208dfd 316qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 317{
9a853f71 318#define LS_UNKNOWN 2
3a03eb79 319 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
1da177e4 320 char *link_speed;
1da177e4 321 uint16_t handle_cnt;
bdab23da 322 uint16_t cnt, mbx;
1da177e4 323 uint32_t handles[5];
e315cd28 324 struct qla_hw_data *ha = vha->hw;
3d71644c 325 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
bdab23da 326 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
bc5c2aad 327 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1da177e4
LT
328 uint32_t rscn_entry, host_pid;
329 uint8_t rscn_queue_index;
4d4df193 330 unsigned long flags;
1da177e4
LT
331
332 /* Setup to process RIO completion. */
333 handle_cnt = 0;
a9083016 334 if (IS_QLA8XXX_TYPE(ha))
3a03eb79 335 goto skip_rio;
1da177e4
LT
336 switch (mb[0]) {
337 case MBA_SCSI_COMPLETION:
9a853f71 338 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
339 handle_cnt = 1;
340 break;
341 case MBA_CMPLT_1_16BIT:
9a853f71 342 handles[0] = mb[1];
1da177e4
LT
343 handle_cnt = 1;
344 mb[0] = MBA_SCSI_COMPLETION;
345 break;
346 case MBA_CMPLT_2_16BIT:
9a853f71
AV
347 handles[0] = mb[1];
348 handles[1] = mb[2];
1da177e4
LT
349 handle_cnt = 2;
350 mb[0] = MBA_SCSI_COMPLETION;
351 break;
352 case MBA_CMPLT_3_16BIT:
9a853f71
AV
353 handles[0] = mb[1];
354 handles[1] = mb[2];
355 handles[2] = mb[3];
1da177e4
LT
356 handle_cnt = 3;
357 mb[0] = MBA_SCSI_COMPLETION;
358 break;
359 case MBA_CMPLT_4_16BIT:
9a853f71
AV
360 handles[0] = mb[1];
361 handles[1] = mb[2];
362 handles[2] = mb[3];
1da177e4
LT
363 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
364 handle_cnt = 4;
365 mb[0] = MBA_SCSI_COMPLETION;
366 break;
367 case MBA_CMPLT_5_16BIT:
9a853f71
AV
368 handles[0] = mb[1];
369 handles[1] = mb[2];
370 handles[2] = mb[3];
1da177e4
LT
371 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
372 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
373 handle_cnt = 5;
374 mb[0] = MBA_SCSI_COMPLETION;
375 break;
376 case MBA_CMPLT_2_32BIT:
9a853f71 377 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
378 handles[1] = le32_to_cpu(
379 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
380 RD_MAILBOX_REG(ha, reg, 6));
381 handle_cnt = 2;
382 mb[0] = MBA_SCSI_COMPLETION;
383 break;
384 default:
385 break;
386 }
3a03eb79 387skip_rio:
1da177e4
LT
388 switch (mb[0]) {
389 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 390 if (!vha->flags.online)
1da177e4
LT
391 break;
392
393 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
394 qla2x00_process_completed_request(vha, rsp->req,
395 handles[cnt]);
1da177e4
LT
396 break;
397
398 case MBA_RESET: /* Reset */
7c3df132
SK
399 ql_dbg(ql_dbg_async, vha, 0x5002,
400 "Asynchronous RESET.\n");
1da177e4 401
e315cd28 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
403 break;
404
405 case MBA_SYSTEM_ERR: /* System Error */
bdab23da 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
7c3df132 407 ql_log(ql_log_warn, vha, 0x5003,
bdab23da
AV
408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
1da177e4 410
e315cd28 411 ha->isp_ops->fw_dump(vha, 1);
1da177e4 412
e428924c 413 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 414 if (mb[1] == 0 && mb[2] == 0) {
7c3df132 415 ql_log(ql_log_fatal, vha, 0x5004,
9a853f71
AV
416 "Unrecoverable Hardware Error: adapter "
417 "marked OFFLINE!\n");
e315cd28 418 vha->flags.online = 0;
b1d46989 419 } else {
25985edc 420 /* Check to see if MPI timeout occurred */
b1d46989
MI
421 if ((mbx & MBX_3) && (ha->flags.port0))
422 set_bit(MPI_RESET_NEEDED,
423 &vha->dpc_flags);
424
e315cd28 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
b1d46989 426 }
9a853f71 427 } else if (mb[1] == 0) {
7c3df132 428 ql_log(ql_log_fatal, vha, 0x5005,
1da177e4
LT
429 "Unrecoverable Hardware Error: adapter marked "
430 "OFFLINE!\n");
e315cd28 431 vha->flags.online = 0;
1da177e4 432 } else
e315cd28 433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
434 break;
435
436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7c3df132
SK
437 ql_log(ql_log_warn, vha, 0x5006,
438 "ISP Request Transfer Error (%x).\n", mb[1]);
1da177e4 439
e315cd28 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
441 break;
442
443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7c3df132
SK
444 ql_log(ql_log_warn, vha, 0x5007,
445 "ISP Response Transfer Error.\n");
1da177e4 446
e315cd28 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
448 break;
449
450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
7c3df132
SK
451 ql_dbg(ql_dbg_async, vha, 0x5008,
452 "Asynchronous WAKEUP_THRES.\n");
1da177e4
LT
453 break;
454
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
7c3df132
SK
456 ql_log(ql_log_info, vha, 0x5009,
457 "LIP occurred (%x).\n", mb[1]);
1da177e4 458
e315cd28
AC
459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
460 atomic_set(&vha->loop_state, LOOP_DOWN);
461 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
462 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
463 }
464
e315cd28
AC
465 if (vha->vp_idx) {
466 atomic_set(&vha->vp_state, VP_FAILED);
467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
468 }
469
e315cd28
AC
470 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
471 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 472
e315cd28
AC
473 vha->flags.management_server_logged_in = 0;
474 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
475 break;
476
477 case MBA_LOOP_UP: /* Loop Up Event */
1da177e4
LT
478 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
479 link_speed = link_speeds[0];
d8b45213 480 ha->link_data_rate = PORT_SPEED_1GB;
1da177e4 481 } else {
9a853f71 482 link_speed = link_speeds[LS_UNKNOWN];
1da177e4
LT
483 if (mb[1] < 5)
484 link_speed = link_speeds[mb[1]];
3a03eb79
AV
485 else if (mb[1] == 0x13)
486 link_speed = link_speeds[5];
1da177e4
LT
487 ha->link_data_rate = mb[1];
488 }
489
7c3df132
SK
490 ql_log(ql_log_info, vha, 0x500a,
491 "LOOP UP detected (%s Gbps).\n", link_speed);
1da177e4 492
e315cd28
AC
493 vha->flags.management_server_logged_in = 0;
494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
495 break;
496
497 case MBA_LOOP_DOWN: /* Loop Down Event */
bdab23da 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
bc5c2aad 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
7c3df132
SK
500 ql_log(ql_log_info, vha, 0x500b,
501 "LOOP DOWN detected (%x %x %x %x).\n",
502 mb[1], mb[2], mb[3], mbx);
1da177e4 503
e315cd28
AC
504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
505 atomic_set(&vha->loop_state, LOOP_DOWN);
506 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
507 vha->device_flags |= DFLG_NO_CABLE;
508 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
509 }
510
e315cd28
AC
511 if (vha->vp_idx) {
512 atomic_set(&vha->vp_state, VP_FAILED);
513 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
514 }
515
e315cd28 516 vha->flags.management_server_logged_in = 0;
d8b45213 517 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 518 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
519 break;
520
521 case MBA_LIP_RESET: /* LIP reset occurred */
7c3df132 522 ql_log(ql_log_info, vha, 0x500c,
cc3ef7bc 523 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 524
e315cd28
AC
525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
526 atomic_set(&vha->loop_state, LOOP_DOWN);
527 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
528 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
529 }
530
e315cd28
AC
531 if (vha->vp_idx) {
532 atomic_set(&vha->vp_state, VP_FAILED);
533 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
534 }
535
e315cd28 536 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
537
538 ha->operating_mode = LOOP;
e315cd28
AC
539 vha->flags.management_server_logged_in = 0;
540 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
541 break;
542
3a03eb79 543 /* case MBA_DCBX_COMPLETE: */
1da177e4
LT
544 case MBA_POINT_TO_POINT: /* Point-to-Point */
545 if (IS_QLA2100(ha))
546 break;
547
23f2ebd1 548 if (IS_QLA8XXX_TYPE(ha)) {
7c3df132
SK
549 ql_dbg(ql_dbg_async, vha, 0x500d,
550 "DCBX Completed -- %04x %04x %04x.\n",
551 mb[1], mb[2], mb[3]);
23f2ebd1
SR
552 if (ha->notify_dcbx_comp)
553 complete(&ha->dcbx_comp);
554
555 } else
7c3df132
SK
556 ql_dbg(ql_dbg_async, vha, 0x500e,
557 "Asynchronous P2P MODE received.\n");
1da177e4
LT
558
559 /*
560 * Until there's a transition from loop down to loop up, treat
561 * this as loop down only.
562 */
e315cd28
AC
563 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
564 atomic_set(&vha->loop_state, LOOP_DOWN);
565 if (!atomic_read(&vha->loop_down_timer))
566 atomic_set(&vha->loop_down_timer,
1da177e4 567 LOOP_DOWN_TIME);
e315cd28 568 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
569 }
570
e315cd28
AC
571 if (vha->vp_idx) {
572 atomic_set(&vha->vp_state, VP_FAILED);
573 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
574 }
575
e315cd28
AC
576 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
577 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
578
579 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
580 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
581
582 ha->flags.gpsc_supported = 1;
e315cd28 583 vha->flags.management_server_logged_in = 0;
1da177e4
LT
584 break;
585
586 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
587 if (IS_QLA2100(ha))
588 break;
589
7c3df132 590 ql_log(ql_log_info, vha, 0x500f,
1da177e4
LT
591 "Configuration change detected: value=%x.\n", mb[1]);
592
e315cd28
AC
593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
594 atomic_set(&vha->loop_state, LOOP_DOWN);
595 if (!atomic_read(&vha->loop_down_timer))
596 atomic_set(&vha->loop_down_timer,
1da177e4 597 LOOP_DOWN_TIME);
e315cd28 598 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
599 }
600
e315cd28
AC
601 if (vha->vp_idx) {
602 atomic_set(&vha->vp_state, VP_FAILED);
603 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
604 }
605
e315cd28
AC
606 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
607 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
608 break;
609
610 case MBA_PORT_UPDATE: /* Port database update */
55903b9d
SV
611 /*
612 * Handle only global and vn-port update events
613 *
614 * Relevant inputs:
615 * mb[1] = N_Port handle of changed port
616 * OR 0xffff for global event
617 * mb[2] = New login state
618 * 7 = Port logged out
619 * mb[3] = LSB is vp_idx, 0xff = all vps
620 *
621 * Skip processing if:
622 * Event is global, vp_idx is NOT all vps,
623 * vp_idx does not match
624 * Event is not global, vp_idx does not match
625 */
12cec63e
AV
626 if (IS_QLA2XXX_MIDTYPE(ha) &&
627 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
628 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
629 break;
73208dfd 630
9764ff88
AV
631 /* Global event -- port logout or port unavailable. */
632 if (mb[1] == 0xffff && mb[2] == 0x7) {
7c3df132
SK
633 ql_dbg(ql_dbg_async, vha, 0x5010,
634 "Port unavailable %04x %04x %04x.\n",
635 mb[1], mb[2], mb[3]);
9764ff88
AV
636
637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
638 atomic_set(&vha->loop_state, LOOP_DOWN);
639 atomic_set(&vha->loop_down_timer,
640 LOOP_DOWN_TIME);
641 vha->device_flags |= DFLG_NO_CABLE;
642 qla2x00_mark_all_devices_lost(vha, 1);
643 }
644
645 if (vha->vp_idx) {
646 atomic_set(&vha->vp_state, VP_FAILED);
647 fc_vport_set_state(vha->fc_vport,
648 FC_VPORT_FAILED);
faadc5e7 649 qla2x00_mark_all_devices_lost(vha, 1);
9764ff88
AV
650 }
651
652 vha->flags.management_server_logged_in = 0;
653 ha->link_data_rate = PORT_SPEED_UNKNOWN;
654 break;
655 }
656
1da177e4 657 /*
cc3ef7bc 658 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
659 * event etc. earlier indicating loop is down) then process
660 * it. Otherwise ignore it and Wait for RSCN to come in.
661 */
e315cd28
AC
662 atomic_set(&vha->loop_down_timer, 0);
663 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
664 atomic_read(&vha->loop_state) != LOOP_DEAD) {
7c3df132
SK
665 ql_dbg(ql_dbg_async, vha, 0x5011,
666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
667 mb[1], mb[2], mb[3]);
1da177e4
LT
668 break;
669 }
670
7c3df132
SK
671 ql_dbg(ql_dbg_async, vha, 0x5012,
672 "Port database changed %04x %04x %04x.\n",
673 mb[1], mb[2], mb[3]);
1da177e4
LT
674
675 /*
676 * Mark all devices as missing so we will login again.
677 */
e315cd28 678 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 679
e315cd28 680 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 681
e315cd28 682 vha->flags.rscn_queue_overflow = 1;
1da177e4 683
e315cd28
AC
684 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
685 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
686 break;
687
688 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 689 /* Check if the Vport has issued a SCR */
e315cd28 690 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
691 break;
692 /* Only handle SCNs for our Vport index. */
0d6e61bc 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
3c397400 694 break;
0d6e61bc 695
7c3df132
SK
696 ql_dbg(ql_dbg_async, vha, 0x5013,
697 "RSCN database changed -- %04x %04x %04x.\n",
698 mb[1], mb[2], mb[3]);
1da177e4 699
59d72d87 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
702 | vha->d_id.b.al_pa;
1da177e4 703 if (rscn_entry == host_pid) {
7c3df132
SK
704 ql_dbg(ql_dbg_async, vha, 0x5014,
705 "Ignoring RSCN update to local host "
706 "port ID (%06x).\n", host_pid);
1da177e4
LT
707 break;
708 }
709
59d72d87
RA
710 /* Ignore reserved bits from RSCN-payload. */
711 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
e315cd28 712 rscn_queue_index = vha->rscn_in_ptr + 1;
1da177e4
LT
713 if (rscn_queue_index == MAX_RSCN_COUNT)
714 rscn_queue_index = 0;
e315cd28
AC
715 if (rscn_queue_index != vha->rscn_out_ptr) {
716 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
717 vha->rscn_in_ptr = rscn_queue_index;
1da177e4 718 } else {
e315cd28 719 vha->flags.rscn_queue_overflow = 1;
1da177e4
LT
720 }
721
e315cd28
AC
722 atomic_set(&vha->loop_state, LOOP_UPDATE);
723 atomic_set(&vha->loop_down_timer, 0);
724 vha->flags.management_server_logged_in = 0;
1da177e4 725
e315cd28
AC
726 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
727 set_bit(RSCN_UPDATE, &vha->dpc_flags);
728 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
729 break;
730
731 /* case MBA_RIO_RESPONSE: */
732 case MBA_ZIO_RESPONSE:
7c3df132
SK
733 ql_dbg(ql_dbg_async, vha, 0x5015,
734 "[R|Z]IO update completion.\n");
1da177e4 735
e428924c 736 if (IS_FWI2_CAPABLE(ha))
2afa19a9 737 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 738 else
73208dfd 739 qla2x00_process_response_queue(rsp);
1da177e4 740 break;
9a853f71
AV
741
742 case MBA_DISCARD_RND_FRAME:
7c3df132
SK
743 ql_dbg(ql_dbg_async, vha, 0x5016,
744 "Discard RND Frame -- %04x %04x %04x.\n",
745 mb[1], mb[2], mb[3]);
9a853f71 746 break;
45ebeb56
AV
747
748 case MBA_TRACE_NOTIFICATION:
7c3df132
SK
749 ql_dbg(ql_dbg_async, vha, 0x5017,
750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
45ebeb56 751 break;
4d4df193
HK
752
753 case MBA_ISP84XX_ALERT:
7c3df132
SK
754 ql_dbg(ql_dbg_async, vha, 0x5018,
755 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]);
4d4df193
HK
757
758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
759 switch (mb[1]) {
760 case A84_PANIC_RECOVERY:
7c3df132
SK
761 ql_log(ql_log_info, vha, 0x5019,
762 "Alert 84XX: panic recovery %04x %04x.\n",
763 mb[2], mb[3]);
4d4df193
HK
764 break;
765 case A84_OP_LOGIN_COMPLETE:
766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
767 ql_log(ql_log_info, vha, 0x501a,
768 "Alert 84XX: firmware version %x.\n",
769 ha->cs84xx->op_fw_version);
4d4df193
HK
770 break;
771 case A84_DIAG_LOGIN_COMPLETE:
772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
7c3df132
SK
773 ql_log(ql_log_info, vha, 0x501b,
774 "Alert 84XX: diagnostic firmware version %x.\n",
775 ha->cs84xx->diag_fw_version);
4d4df193
HK
776 break;
777 case A84_GOLD_LOGIN_COMPLETE:
778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
779 ha->cs84xx->fw_update = 1;
7c3df132
SK
780 ql_log(ql_log_info, vha, 0x501c,
781 "Alert 84XX: gold firmware version %x.\n",
782 ha->cs84xx->gold_fw_version);
4d4df193
HK
783 break;
784 default:
7c3df132
SK
785 ql_log(ql_log_warn, vha, 0x501d,
786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
4d4df193
HK
787 mb[1], mb[2], mb[3]);
788 }
789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
790 break;
3a03eb79 791 case MBA_DCBX_START:
7c3df132
SK
792 ql_dbg(ql_dbg_async, vha, 0x501e,
793 "DCBX Started -- %04x %04x %04x.\n",
794 mb[1], mb[2], mb[3]);
3a03eb79
AV
795 break;
796 case MBA_DCBX_PARAM_UPDATE:
7c3df132
SK
797 ql_dbg(ql_dbg_async, vha, 0x501f,
798 "DCBX Parameters Updated -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]);
3a03eb79
AV
800 break;
801 case MBA_FCF_CONF_ERR:
7c3df132
SK
802 ql_dbg(ql_dbg_async, vha, 0x5020,
803 "FCF Configuration Error -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]);
3a03eb79
AV
805 break;
806 case MBA_IDC_COMPLETE:
3a03eb79 807 case MBA_IDC_NOTIFY:
3a03eb79 808 case MBA_IDC_TIME_EXT:
8a659571 809 qla81xx_idc_event(vha, mb[0], mb[1]);
3a03eb79 810 break;
1da177e4 811 }
2c3dfe3f 812
e315cd28 813 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 814 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
815}
816
817/**
818 * qla2x00_process_completed_request() - Process a Fast Post response.
819 * @ha: SCSI driver HA context
820 * @index: SRB index
821 */
822static void
73208dfd
AC
823qla2x00_process_completed_request(struct scsi_qla_host *vha,
824 struct req_que *req, uint32_t index)
1da177e4
LT
825{
826 srb_t *sp;
e315cd28 827 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
828
829 /* Validate handle. */
830 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
831 ql_log(ql_log_warn, vha, 0x3014,
832 "Invalid SCSI command index (%x).\n", index);
1da177e4 833
8f7daead
GM
834 if (IS_QLA82XX(ha))
835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
836 else
837 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
838 return;
839 }
840
e315cd28 841 sp = req->outstanding_cmds[index];
1da177e4
LT
842 if (sp) {
843 /* Free outstanding command slot. */
e315cd28 844 req->outstanding_cmds[index] = NULL;
1da177e4 845
1da177e4
LT
846 /* Save ISP completion status */
847 sp->cmd->result = DID_OK << 16;
73208dfd 848 qla2x00_sp_compl(ha, sp);
1da177e4 849 } else {
7c3df132 850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1da177e4 851
8f7daead
GM
852 if (IS_QLA82XX(ha))
853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
854 else
855 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
856 }
857}
858
ac280b67
AV
859static srb_t *
860qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
861 struct req_que *req, void *iocb)
862{
863 struct qla_hw_data *ha = vha->hw;
864 sts_entry_t *pkt = iocb;
865 srb_t *sp = NULL;
866 uint16_t index;
867
868 index = LSW(pkt->handle);
869 if (index >= MAX_OUTSTANDING_COMMANDS) {
7c3df132
SK
870 ql_log(ql_log_warn, vha, 0x5031,
871 "Invalid command index (%x).\n", index);
8f7daead
GM
872 if (IS_QLA82XX(ha))
873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
874 else
875 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ac280b67
AV
876 goto done;
877 }
878 sp = req->outstanding_cmds[index];
879 if (!sp) {
7c3df132
SK
880 ql_log(ql_log_warn, vha, 0x5032,
881 "Invalid completion handle (%x) -- timed-out.\n", index);
ac280b67
AV
882 return sp;
883 }
884 if (sp->handle != index) {
7c3df132
SK
885 ql_log(ql_log_warn, vha, 0x5033,
886 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
ac280b67
AV
887 return NULL;
888 }
9a069e19 889
ac280b67 890 req->outstanding_cmds[index] = NULL;
9a069e19 891
ac280b67
AV
892done:
893 return sp;
894}
895
896static void
897qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
898 struct mbx_entry *mbx)
899{
900 const char func[] = "MBX-IOCB";
901 const char *type;
ac280b67
AV
902 fc_port_t *fcport;
903 srb_t *sp;
4916392b
MI
904 struct srb_iocb *lio;
905 struct srb_ctx *ctx;
99b0bec7 906 uint16_t *data;
5ff1d584 907 uint16_t status;
ac280b67
AV
908
909 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
910 if (!sp)
911 return;
912
4916392b
MI
913 ctx = sp->ctx;
914 lio = ctx->u.iocb_cmd;
915 type = ctx->name;
ac280b67 916 fcport = sp->fcport;
4916392b 917 data = lio->u.logio.data;
ac280b67 918
5ff1d584 919 data[0] = MBS_COMMAND_ERROR;
4916392b 920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 921 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 922 if (mbx->entry_status) {
7c3df132
SK
923 ql_dbg(ql_dbg_async, vha, 0x5043,
924 "Async-%s error entry - portid=%02x%02x%02x "
d3fa9e7d
AV
925 "entry-status=%x status=%x state-flag=%x "
926 "status-flags=%x.\n",
7c3df132 927 type, fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d
AV
928 fcport->d_id.b.al_pa, mbx->entry_status,
929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
7c3df132 930 le16_to_cpu(mbx->status_flags));
d3fa9e7d 931
7c3df132
SK
932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
933 (uint8_t *)mbx, sizeof(*mbx));
ac280b67 934
99b0bec7 935 goto logio_done;
ac280b67
AV
936 }
937
5ff1d584 938 status = le16_to_cpu(mbx->status);
4916392b 939 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
5ff1d584
AV
940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
941 status = 0;
942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
7c3df132
SK
943 ql_dbg(ql_dbg_async, vha, 0x5045,
944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
945 type, fcport->d_id.b.domain, fcport->d_id.b.area,
946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
ac280b67
AV
947
948 data[0] = MBS_COMMAND_COMPLETE;
4916392b 949 if (ctx->type == SRB_LOGIN_CMD) {
99b0bec7
AV
950 fcport->port_type = FCT_TARGET;
951 if (le16_to_cpu(mbx->mb1) & BIT_0)
952 fcport->port_type = FCT_INITIATOR;
6ac52608 953 else if (le16_to_cpu(mbx->mb1) & BIT_1)
99b0bec7 954 fcport->flags |= FCF_FCP2_DEVICE;
5ff1d584 955 }
99b0bec7 956 goto logio_done;
ac280b67
AV
957 }
958
959 data[0] = le16_to_cpu(mbx->mb0);
960 switch (data[0]) {
961 case MBS_PORT_ID_USED:
962 data[1] = le16_to_cpu(mbx->mb1);
963 break;
964 case MBS_LOOP_ID_USED:
965 break;
966 default:
967 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
968 break;
969 }
970
7c3df132
SK
971 ql_log(ql_log_warn, vha, 0x5046,
972 "Async-%s failed - portid=%02x%02x%02x status=%x "
d3fa9e7d 973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
7c3df132 974 type, fcport->d_id.b.domain,
d3fa9e7d 975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
ac280b67
AV
976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
7c3df132 978 le16_to_cpu(mbx->mb7));
ac280b67 979
99b0bec7 980logio_done:
4916392b 981 lio->done(sp);
ac280b67
AV
982}
983
9bc4f4fb
HZ
984static void
985qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
986 sts_entry_t *pkt, int iocb_type)
987{
988 const char func[] = "CT_IOCB";
989 const char *type;
990 struct qla_hw_data *ha = vha->hw;
991 srb_t *sp;
992 struct srb_ctx *sp_bsg;
993 struct fc_bsg_job *bsg_job;
994 uint16_t comp_status;
995
996 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
997 if (!sp)
998 return;
999
1000 sp_bsg = sp->ctx;
1001 bsg_job = sp_bsg->u.bsg_job;
1002
1003 type = NULL;
1004 switch (sp_bsg->type) {
1005 case SRB_CT_CMD:
1006 type = "ct pass-through";
1007 break;
1008 default:
7c3df132
SK
1009 ql_log(ql_log_warn, vha, 0x5047,
1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
9bc4f4fb
HZ
1011 return;
1012 }
1013
1014 comp_status = le16_to_cpu(pkt->comp_status);
1015
1016 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1017 * fc payload to the caller
1018 */
1019 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1020 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1021
1022 if (comp_status != CS_COMPLETE) {
1023 if (comp_status == CS_DATA_UNDERRUN) {
1024 bsg_job->reply->result = DID_OK << 16;
1025 bsg_job->reply->reply_payload_rcv_len =
1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1027
7c3df132
SK
1028 ql_log(ql_log_warn, vha, 0x5048,
1029 "CT pass-through-%s error "
9bc4f4fb 1030 "comp_status-status=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1031 type, comp_status,
1032 bsg_job->reply->reply_payload_rcv_len);
9bc4f4fb 1033 } else {
7c3df132
SK
1034 ql_log(ql_log_warn, vha, 0x5049,
1035 "CT pass-through-%s error "
1036 "comp_status-status=0x%x.\n", type, comp_status);
9bc4f4fb
HZ
1037 bsg_job->reply->result = DID_ERROR << 16;
1038 bsg_job->reply->reply_payload_rcv_len = 0;
1039 }
7c3df132
SK
1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
1041 (uint8_t *)pkt, sizeof(*pkt));
9bc4f4fb 1042 } else {
6eab04a8 1043 bsg_job->reply->result = DID_OK << 16;
9bc4f4fb
HZ
1044 bsg_job->reply->reply_payload_rcv_len =
1045 bsg_job->reply_payload.payload_len;
1046 bsg_job->reply_len = 0;
1047 }
1048
1049 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1050 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1051
1052 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1053 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1054
1055 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1056 kfree(sp->fcport);
1057
1058 kfree(sp->ctx);
1059 mempool_free(sp, ha->srb_mempool);
1060 bsg_job->job_done(bsg_job);
1061}
1062
9a069e19
GM
1063static void
1064qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1065 struct sts_entry_24xx *pkt, int iocb_type)
1066{
1067 const char func[] = "ELS_CT_IOCB";
1068 const char *type;
1069 struct qla_hw_data *ha = vha->hw;
1070 srb_t *sp;
4916392b 1071 struct srb_ctx *sp_bsg;
9a069e19
GM
1072 struct fc_bsg_job *bsg_job;
1073 uint16_t comp_status;
1074 uint32_t fw_status[3];
1075 uint8_t* fw_sts_ptr;
1076
1077 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1078 if (!sp)
1079 return;
4916392b
MI
1080 sp_bsg = sp->ctx;
1081 bsg_job = sp_bsg->u.bsg_job;
9a069e19
GM
1082
1083 type = NULL;
4916392b 1084 switch (sp_bsg->type) {
9a069e19
GM
1085 case SRB_ELS_CMD_RPT:
1086 case SRB_ELS_CMD_HST:
1087 type = "els";
1088 break;
1089 case SRB_CT_CMD:
1090 type = "ct pass-through";
1091 break;
1092 default:
7c3df132
SK
1093 ql_log(ql_log_warn, vha, 0x503e,
1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
9a069e19
GM
1095 return;
1096 }
1097
1098 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1099 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1100 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1101
1102 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1103 * fc payload to the caller
1104 */
1105 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1106 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1107
1108 if (comp_status != CS_COMPLETE) {
1109 if (comp_status == CS_DATA_UNDERRUN) {
1110 bsg_job->reply->result = DID_OK << 16;
1111 bsg_job->reply->reply_payload_rcv_len =
1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1113
7c3df132
SK
1114 ql_log(ql_log_info, vha, 0x503f,
1115 "ELS-CT pass-through-%s error comp_status-status=0x%x "
9a069e19 1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
7c3df132
SK
1117 type, comp_status, fw_status[1], fw_status[2],
1118 le16_to_cpu(((struct els_sts_entry_24xx *)
1119 pkt)->total_byte_count));
9a069e19
GM
1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1122 }
1123 else {
7c3df132
SK
1124 ql_log(ql_log_info, vha, 0x5040,
1125 "ELS-CT pass-through-%s error comp_status-status=0x%x "
9a069e19 1126 "error subcode 1=0x%x error subcode 2=0x%x.\n",
7c3df132
SK
1127 type, comp_status,
1128 le16_to_cpu(((struct els_sts_entry_24xx *)
1129 pkt)->error_subcode_1),
1130 le16_to_cpu(((struct els_sts_entry_24xx *)
1131 pkt)->error_subcode_2));
9a069e19
GM
1132 bsg_job->reply->result = DID_ERROR << 16;
1133 bsg_job->reply->reply_payload_rcv_len = 0;
1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1136 }
7c3df132
SK
1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1138 (uint8_t *)pkt, sizeof(*pkt));
9a069e19
GM
1139 }
1140 else {
6eab04a8 1141 bsg_job->reply->result = DID_OK << 16;
9a069e19
GM
1142 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1143 bsg_job->reply_len = 0;
1144 }
1145
1146 dma_unmap_sg(&ha->pdev->dev,
1147 bsg_job->request_payload.sg_list,
1148 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1149 dma_unmap_sg(&ha->pdev->dev,
1150 bsg_job->reply_payload.sg_list,
1151 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4916392b
MI
1152 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1153 (sp_bsg->type == SRB_CT_CMD))
9a069e19
GM
1154 kfree(sp->fcport);
1155 kfree(sp->ctx);
1156 mempool_free(sp, ha->srb_mempool);
1157 bsg_job->job_done(bsg_job);
1158}
1159
ac280b67
AV
1160static void
1161qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1162 struct logio_entry_24xx *logio)
1163{
1164 const char func[] = "LOGIO-IOCB";
1165 const char *type;
ac280b67
AV
1166 fc_port_t *fcport;
1167 srb_t *sp;
4916392b
MI
1168 struct srb_iocb *lio;
1169 struct srb_ctx *ctx;
99b0bec7 1170 uint16_t *data;
ac280b67
AV
1171 uint32_t iop[2];
1172
1173 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1174 if (!sp)
1175 return;
1176
4916392b
MI
1177 ctx = sp->ctx;
1178 lio = ctx->u.iocb_cmd;
1179 type = ctx->name;
ac280b67 1180 fcport = sp->fcport;
4916392b 1181 data = lio->u.logio.data;
ac280b67 1182
5ff1d584 1183 data[0] = MBS_COMMAND_ERROR;
4916392b 1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
5ff1d584 1185 QLA_LOGIO_LOGIN_RETRIED : 0;
ac280b67 1186 if (logio->entry_status) {
7c3df132
SK
1187 ql_log(ql_log_warn, vha, 0x5034,
1188 "Async-%s error entry - "
d3fa9e7d 1189 "portid=%02x%02x%02x entry-status=%x.\n",
7c3df132
SK
1190 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1191 fcport->d_id.b.al_pa, logio->entry_status);
1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
1193 (uint8_t *)logio, sizeof(*logio));
ac280b67 1194
99b0bec7 1195 goto logio_done;
ac280b67
AV
1196 }
1197
1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
7c3df132
SK
1199 ql_dbg(ql_dbg_async, vha, 0x5036,
1200 "Async-%s complete - portid=%02x%02x%02x "
d3fa9e7d 1201 "iop0=%x.\n",
7c3df132 1202 type, fcport->d_id.b.domain, fcport->d_id.b.area,
d3fa9e7d 1203 fcport->d_id.b.al_pa,
7c3df132 1204 le32_to_cpu(logio->io_parameter[0]));
ac280b67
AV
1205
1206 data[0] = MBS_COMMAND_COMPLETE;
4916392b 1207 if (ctx->type != SRB_LOGIN_CMD)
99b0bec7 1208 goto logio_done;
ac280b67
AV
1209
1210 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1211 if (iop[0] & BIT_4) {
1212 fcport->port_type = FCT_TARGET;
1213 if (iop[0] & BIT_8)
8474f3a0 1214 fcport->flags |= FCF_FCP2_DEVICE;
b0cd579c 1215 } else if (iop[0] & BIT_5)
ac280b67 1216 fcport->port_type = FCT_INITIATOR;
b0cd579c 1217
ac280b67
AV
1218 if (logio->io_parameter[7] || logio->io_parameter[8])
1219 fcport->supported_classes |= FC_COS_CLASS2;
1220 if (logio->io_parameter[9] || logio->io_parameter[10])
1221 fcport->supported_classes |= FC_COS_CLASS3;
1222
99b0bec7 1223 goto logio_done;
ac280b67
AV
1224 }
1225
1226 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1227 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1228 switch (iop[0]) {
1229 case LSC_SCODE_PORTID_USED:
1230 data[0] = MBS_PORT_ID_USED;
1231 data[1] = LSW(iop[1]);
1232 break;
1233 case LSC_SCODE_NPORT_USED:
1234 data[0] = MBS_LOOP_ID_USED;
1235 break;
ac280b67
AV
1236 default:
1237 data[0] = MBS_COMMAND_ERROR;
ac280b67
AV
1238 break;
1239 }
1240
7c3df132
SK
1241 ql_dbg(ql_dbg_async, vha, 0x5037,
1242 "Async-%s failed - portid=%02x%02x%02x comp=%x "
d3fa9e7d 1243 "iop0=%x iop1=%x.\n",
7c3df132 1244 type, fcport->d_id.b.domain,
d3fa9e7d 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
ac280b67
AV
1246 le16_to_cpu(logio->comp_status),
1247 le32_to_cpu(logio->io_parameter[0]),
7c3df132 1248 le32_to_cpu(logio->io_parameter[1]));
ac280b67 1249
99b0bec7 1250logio_done:
4916392b 1251 lio->done(sp);
ac280b67
AV
1252}
1253
3822263e
MI
1254static void
1255qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 struct tsk_mgmt_entry *tsk)
1257{
1258 const char func[] = "TMF-IOCB";
1259 const char *type;
1260 fc_port_t *fcport;
1261 srb_t *sp;
1262 struct srb_iocb *iocb;
1263 struct srb_ctx *ctx;
1264 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1265 int error = 1;
1266
1267 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1268 if (!sp)
1269 return;
1270
1271 ctx = sp->ctx;
1272 iocb = ctx->u.iocb_cmd;
1273 type = ctx->name;
1274 fcport = sp->fcport;
1275
1276 if (sts->entry_status) {
7c3df132
SK
1277 ql_log(ql_log_warn, vha, 0x5038,
1278 "Async-%s error - entry-status(%x).\n",
1279 type, sts->entry_status);
3822263e 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
7c3df132
SK
1281 ql_log(ql_log_warn, vha, 0x5039,
1282 "Async-%s error - completion status(%x).\n",
1283 type, sts->comp_status);
3822263e
MI
1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1285 SS_RESPONSE_INFO_LEN_VALID)) {
7c3df132
SK
1286 ql_log(ql_log_warn, vha, 0x503a,
1287 "Async-%s error - no response info(%x).\n",
1288 type, sts->scsi_status);
3822263e 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
7c3df132
SK
1290 ql_log(ql_log_warn, vha, 0x503b,
1291 "Async-%s error - not enough response(%d).\n",
1292 type, sts->rsp_data_len);
3822263e 1293 } else if (sts->data[3]) {
7c3df132
SK
1294 ql_log(ql_log_warn, vha, 0x503c,
1295 "Async-%s error - response(%x).\n",
1296 type, sts->data[3]);
3822263e
MI
1297 } else {
1298 error = 0;
1299 }
1300
1301 if (error) {
1302 iocb->u.tmf.data = error;
7c3df132
SK
1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts));
3822263e
MI
1305 }
1306
1307 iocb->done(sp);
1308}
1309
1da177e4
LT
1310/**
1311 * qla2x00_process_response_queue() - Process response queue entries.
1312 * @ha: SCSI driver HA context
1313 */
1314void
73208dfd 1315qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 1316{
73208dfd
AC
1317 struct scsi_qla_host *vha;
1318 struct qla_hw_data *ha = rsp->hw;
3d71644c 1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
1320 sts_entry_t *pkt;
1321 uint16_t handle_cnt;
1322 uint16_t cnt;
73208dfd 1323
2afa19a9 1324 vha = pci_get_drvdata(ha->pdev);
1da177e4 1325
e315cd28 1326 if (!vha->flags.online)
1da177e4
LT
1327 return;
1328
e315cd28
AC
1329 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1330 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 1331
e315cd28
AC
1332 rsp->ring_index++;
1333 if (rsp->ring_index == rsp->length) {
1334 rsp->ring_index = 0;
1335 rsp->ring_ptr = rsp->ring;
1da177e4 1336 } else {
e315cd28 1337 rsp->ring_ptr++;
1da177e4
LT
1338 }
1339
1340 if (pkt->entry_status != 0) {
7c3df132
SK
1341 ql_log(ql_log_warn, vha, 0x5035,
1342 "Process error entry.\n");
1da177e4 1343
73208dfd 1344 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1346 wmb();
1347 continue;
1348 }
1349
1350 switch (pkt->entry_type) {
1351 case STATUS_TYPE:
73208dfd 1352 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
1353 break;
1354 case STATUS_TYPE_21:
1355 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1356 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1357 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1358 ((sts21_entry_t *)pkt)->handle[cnt]);
1359 }
1360 break;
1361 case STATUS_TYPE_22:
1362 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1363 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 1364 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
1365 ((sts22_entry_t *)pkt)->handle[cnt]);
1366 }
1367 break;
1368 case STATUS_CONT_TYPE:
2afa19a9 1369 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1da177e4 1370 break;
ac280b67
AV
1371 case MBX_IOCB_TYPE:
1372 qla2x00_mbx_iocb_entry(vha, rsp->req,
1373 (struct mbx_entry *)pkt);
3822263e 1374 break;
9bc4f4fb
HZ
1375 case CT_IOCB_TYPE:
1376 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1377 break;
1da177e4
LT
1378 default:
1379 /* Type Not Supported. */
7c3df132
SK
1380 ql_log(ql_log_warn, vha, 0x504a,
1381 "Received unknown response pkt type %x "
1da177e4 1382 "entry status=%x.\n",
7c3df132 1383 pkt->entry_type, pkt->entry_status);
1da177e4
LT
1384 break;
1385 }
1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1387 wmb();
1388 }
1389
1390 /* Adjust ring index */
e315cd28 1391 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
1392}
1393
4733fcb1 1394static inline void
5544213b
AV
1395
1396qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1397 uint32_t sense_len, struct rsp_que *rsp)
4733fcb1 1398{
7c3df132 1399 struct scsi_qla_host *vha = sp->fcport->vha;
4733fcb1
AV
1400 struct scsi_cmnd *cp = sp->cmd;
1401
1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1403 sense_len = SCSI_SENSE_BUFFERSIZE;
1404
4733fcb1
AV
1405 sp->request_sense_length = sense_len;
1406 sp->request_sense_ptr = cp->sense_buffer;
5544213b
AV
1407 if (sp->request_sense_length > par_sense_len)
1408 sense_len = par_sense_len;
4733fcb1
AV
1409
1410 memcpy(cp->sense_buffer, sense_data, sense_len);
1411
1412 sp->request_sense_ptr += sense_len;
1413 sp->request_sense_length -= sense_len;
1414 if (sp->request_sense_length != 0)
2afa19a9 1415 rsp->status_srb = sp;
4733fcb1 1416
7c3df132
SK
1417 ql_dbg(ql_dbg_io, vha, 0x301c,
1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
1420 cp->device->lun, cp);
4733fcb1 1421 if (sense_len)
7c3df132
SK
1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1423 cp->sense_buffer, sense_len);
4733fcb1
AV
1424}
1425
bad75002
AE
1426struct scsi_dif_tuple {
1427 __be16 guard; /* Checksum */
1428 __be16 app_tag; /* APPL identifer */
1429 __be32 ref_tag; /* Target LBA or indirect LBA */
1430};
1431
1432/*
1433 * Checks the guard or meta-data for the type of error
1434 * detected by the HBA. In case of errors, we set the
1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1436 * to indicate to the kernel that the HBA detected error.
1437 */
8cb2049c 1438static inline int
bad75002
AE
1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1440{
7c3df132 1441 struct scsi_qla_host *vha = sp->fcport->vha;
bad75002 1442 struct scsi_cmnd *cmd = sp->cmd;
8cb2049c
AE
1443 uint8_t *ap = &sts24->data[12];
1444 uint8_t *ep = &sts24->data[20];
bad75002
AE
1445 uint32_t e_ref_tag, a_ref_tag;
1446 uint16_t e_app_tag, a_app_tag;
1447 uint16_t e_guard, a_guard;
1448
8cb2049c
AE
1449 /*
1450 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1451 * would make guard field appear at offset 2
1452 */
1453 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1454 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1455 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1456 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1457 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1458 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
bad75002 1459
7c3df132
SK
1460 ql_dbg(ql_dbg_io, vha, 0x3023,
1461 "iocb(s) %p Returned STATUS.\n", sts24);
bad75002 1462
7c3df132
SK
1463 ql_dbg(ql_dbg_io, vha, 0x3024,
1464 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
bad75002 1465 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
7c3df132 1466 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
bad75002 1467 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
7c3df132 1468 a_app_tag, e_app_tag, a_guard, e_guard);
bad75002 1469
8cb2049c
AE
1470 /*
1471 * Ignore sector if:
1472 * For type 3: ref & app tag is all 'f's
1473 * For type 0,1,2: app tag is all 'f's
1474 */
1475 if ((a_app_tag == 0xffff) &&
1476 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1477 (a_ref_tag == 0xffffffff))) {
1478 uint32_t blocks_done, resid;
1479 sector_t lba_s = scsi_get_lba(cmd);
1480
1481 /* 2TB boundary case covered automatically with this */
1482 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1483
1484 resid = scsi_bufflen(cmd) - (blocks_done *
1485 cmd->device->sector_size);
1486
1487 scsi_set_resid(cmd, resid);
1488 cmd->result = DID_OK << 16;
1489
1490 /* Update protection tag */
1491 if (scsi_prot_sg_count(cmd)) {
1492 uint32_t i, j = 0, k = 0, num_ent;
1493 struct scatterlist *sg;
1494 struct sd_dif_tuple *spt;
1495
1496 /* Patch the corresponding protection tags */
1497 scsi_for_each_prot_sg(cmd, sg,
1498 scsi_prot_sg_count(cmd), i) {
1499 num_ent = sg_dma_len(sg) / 8;
1500 if (k + num_ent < blocks_done) {
1501 k += num_ent;
1502 continue;
1503 }
1504 j = blocks_done - k - 1;
1505 k = blocks_done;
1506 break;
1507 }
1508
1509 if (k != blocks_done) {
1510 qla_printk(KERN_WARNING, sp->fcport->vha->hw,
1511 "unexpected tag values tag:lba=%x:%lx)\n",
1512 e_ref_tag, lba_s);
1513 return 1;
1514 }
1515
1516 spt = page_address(sg_page(sg)) + sg->offset;
1517 spt += j;
1518
1519 spt->app_tag = 0xffff;
1520 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1521 spt->ref_tag = 0xffffffff;
1522 }
1523
1524 return 0;
1525 }
1526
bad75002
AE
1527 /* check guard */
1528 if (e_guard != a_guard) {
1529 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1530 0x10, 0x1);
1531 set_driver_byte(cmd, DRIVER_SENSE);
1532 set_host_byte(cmd, DID_ABORT);
1533 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1534 return 1;
bad75002
AE
1535 }
1536
e02587d7
AE
1537 /* check ref tag */
1538 if (e_ref_tag != a_ref_tag) {
bad75002 1539 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1540 0x10, 0x3);
bad75002
AE
1541 set_driver_byte(cmd, DRIVER_SENSE);
1542 set_host_byte(cmd, DID_ABORT);
1543 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1544 return 1;
bad75002
AE
1545 }
1546
e02587d7
AE
1547 /* check appl tag */
1548 if (e_app_tag != a_app_tag) {
bad75002 1549 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
e02587d7 1550 0x10, 0x2);
bad75002
AE
1551 set_driver_byte(cmd, DRIVER_SENSE);
1552 set_host_byte(cmd, DID_ABORT);
1553 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
8cb2049c 1554 return 1;
bad75002 1555 }
e02587d7 1556
8cb2049c 1557 return 1;
bad75002
AE
1558}
1559
1da177e4
LT
1560/**
1561 * qla2x00_status_entry() - Process a Status IOCB entry.
1562 * @ha: SCSI driver HA context
1563 * @pkt: Entry pointer
1564 */
1565static void
73208dfd 1566qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 1567{
1da177e4 1568 srb_t *sp;
1da177e4
LT
1569 fc_port_t *fcport;
1570 struct scsi_cmnd *cp;
9a853f71
AV
1571 sts_entry_t *sts;
1572 struct sts_entry_24xx *sts24;
1da177e4
LT
1573 uint16_t comp_status;
1574 uint16_t scsi_status;
b7d2280c 1575 uint16_t ox_id;
1da177e4
LT
1576 uint8_t lscsi_status;
1577 int32_t resid;
5544213b
AV
1578 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1579 fw_resid_len;
9a853f71 1580 uint8_t *rsp_info, *sense_data;
e315cd28 1581 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1582 uint32_t handle;
1583 uint16_t que;
1584 struct req_que *req;
b7d2280c 1585 int logit = 1;
9a853f71
AV
1586
1587 sts = (sts_entry_t *) pkt;
1588 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 1589 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1590 comp_status = le16_to_cpu(sts24->comp_status);
1591 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1592 } else {
1593 comp_status = le16_to_cpu(sts->comp_status);
1594 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1595 }
2afa19a9
AC
1596 handle = (uint32_t) LSW(sts->handle);
1597 que = MSW(sts->handle);
1598 req = ha->req_q_map[que];
a9083016 1599
1da177e4 1600 /* Fast path completion. */
9a853f71 1601 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2afa19a9 1602 qla2x00_process_completed_request(vha, req, handle);
1da177e4
LT
1603
1604 return;
1605 }
1606
1607 /* Validate handle. */
2afa19a9
AC
1608 if (handle < MAX_OUTSTANDING_COMMANDS) {
1609 sp = req->outstanding_cmds[handle];
1610 req->outstanding_cmds[handle] = NULL;
1da177e4
LT
1611 } else
1612 sp = NULL;
1613
1614 if (sp == NULL) {
7c3df132
SK
1615 ql_log(ql_log_warn, vha, 0x3017,
1616 "Invalid status handle (0x%x).\n", sts->handle);
1da177e4 1617
8f7daead
GM
1618 if (IS_QLA82XX(ha))
1619 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1620 else
1621 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1622 qla2xxx_wake_dpc(vha);
1da177e4
LT
1623 return;
1624 }
1625 cp = sp->cmd;
1626 if (cp == NULL) {
7c3df132
SK
1627 ql_log(ql_log_warn, vha, 0x3018,
1628 "Command already returned (0x%x/%p).\n",
1629 sts->handle, sp);
1da177e4
LT
1630
1631 return;
1632 }
1633
9a853f71 1634 lscsi_status = scsi_status & STATUS_MASK;
1da177e4 1635
bdf79621 1636 fcport = sp->fcport;
1da177e4 1637
b7d2280c 1638 ox_id = 0;
5544213b
AV
1639 sense_len = par_sense_len = rsp_info_len = resid_len =
1640 fw_resid_len = 0;
e428924c 1641 if (IS_FWI2_CAPABLE(ha)) {
0f00a206
LC
1642 if (scsi_status & SS_SENSE_LEN_VALID)
1643 sense_len = le32_to_cpu(sts24->sense_len);
1644 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1645 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1646 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1647 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1648 if (comp_status == CS_DATA_UNDERRUN)
1649 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1650 rsp_info = sts24->data;
1651 sense_data = sts24->data;
1652 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
b7d2280c 1653 ox_id = le16_to_cpu(sts24->ox_id);
5544213b 1654 par_sense_len = sizeof(sts24->data);
9a853f71 1655 } else {
0f00a206
LC
1656 if (scsi_status & SS_SENSE_LEN_VALID)
1657 sense_len = le16_to_cpu(sts->req_sense_length);
1658 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1659 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
9a853f71
AV
1660 resid_len = le32_to_cpu(sts->residual_length);
1661 rsp_info = sts->rsp_info;
1662 sense_data = sts->req_sense_data;
5544213b 1663 par_sense_len = sizeof(sts->req_sense_data);
9a853f71
AV
1664 }
1665
1da177e4
LT
1666 /* Check for any FCP transport errors. */
1667 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1668 /* Sense data lies beyond any FCP RESPONSE data. */
5544213b 1669 if (IS_FWI2_CAPABLE(ha)) {
9a853f71 1670 sense_data += rsp_info_len;
5544213b
AV
1671 par_sense_len -= rsp_info_len;
1672 }
9a853f71 1673 if (rsp_info_len > 3 && rsp_info[3]) {
7c3df132
SK
1674 ql_log(ql_log_warn, vha, 0x3019,
1675 "FCP I/O protocol failure (0x%x/0x%x).\n",
1676 rsp_info_len, rsp_info[3]);
1da177e4
LT
1677
1678 cp->result = DID_BUS_BUSY << 16;
b7d2280c 1679 goto out;
1da177e4
LT
1680 }
1681 }
1682
3e8ce320
AV
1683 /* Check for overrun. */
1684 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1685 scsi_status & SS_RESIDUAL_OVER)
1686 comp_status = CS_DATA_OVERRUN;
1687
1da177e4
LT
1688 /*
1689 * Based on Host and scsi status generate status code for Linux
1690 */
1691 switch (comp_status) {
1692 case CS_COMPLETE:
df7baa50 1693 case CS_QUEUE_FULL:
1da177e4
LT
1694 if (scsi_status == 0) {
1695 cp->result = DID_OK << 16;
1696 break;
1697 }
1698 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 1699 resid = resid_len;
385d70b4 1700 scsi_set_resid(cp, resid);
0da69df1
AV
1701
1702 if (!lscsi_status &&
385d70b4 1703 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1 1704 cp->underflow)) {
7c3df132
SK
1705 ql_log(ql_log_warn, vha, 0x301a,
1706 "Mid-layer underflow "
b7d2280c 1707 "detected (0x%x of 0x%x bytes).\n",
7c3df132 1708 resid, scsi_bufflen(cp));
0da69df1
AV
1709
1710 cp->result = DID_ERROR << 16;
1711 break;
1712 }
1da177e4 1713 }
1da177e4
LT
1714 cp->result = DID_OK << 16 | lscsi_status;
1715
df7baa50 1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
7c3df132
SK
1717 ql_log(ql_log_warn, vha, 0x301b,
1718 "QUEUE FULL detected.\n");
df7baa50
AV
1719 break;
1720 }
b7d2280c 1721 logit = 0;
1da177e4
LT
1722 if (lscsi_status != SS_CHECK_CONDITION)
1723 break;
1724
b80ca4f7 1725 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1726 if (!(scsi_status & SS_SENSE_LEN_VALID))
1727 break;
1728
5544213b
AV
1729 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1730 rsp);
1da177e4
LT
1731 break;
1732
1733 case CS_DATA_UNDERRUN:
ed17c71b 1734 /* Use F/W calculated residual length. */
0f00a206
LC
1735 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1736 scsi_set_resid(cp, resid);
1737 if (scsi_status & SS_RESIDUAL_UNDER) {
1738 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
7c3df132
SK
1739 ql_log(ql_log_warn, vha, 0x301d,
1740 "Dropped frame(s) detected "
1741 "(0x%x of 0x%x bytes).\n",
1742 resid, scsi_bufflen(cp));
0f00a206
LC
1743
1744 cp->result = DID_ERROR << 16 | lscsi_status;
1745 break;
6acf8190 1746 }
ed17c71b 1747
0f00a206
LC
1748 if (!lscsi_status &&
1749 ((unsigned)(scsi_bufflen(cp) - resid) <
1750 cp->underflow)) {
7c3df132
SK
1751 ql_log(ql_log_warn, vha, 0x301e,
1752 "Mid-layer underflow "
b7d2280c 1753 "detected (0x%x of 0x%x bytes).\n",
7c3df132 1754 resid, scsi_bufflen(cp));
e038a1be 1755
0f00a206
LC
1756 cp->result = DID_ERROR << 16;
1757 break;
1758 }
0374f55e 1759 } else {
7c3df132
SK
1760 ql_log(ql_log_warn, vha, 0x301f,
1761 "Dropped frame(s) detected (0x%x "
1762 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
0f00a206 1763
0374f55e
LC
1764 cp->result = DID_ERROR << 16 | lscsi_status;
1765 goto check_scsi_status;
1da177e4
LT
1766 }
1767
0f00a206 1768 cp->result = DID_OK << 16 | lscsi_status;
b7d2280c 1769 logit = 0;
0f00a206 1770
0374f55e 1771check_scsi_status:
1da177e4 1772 /*
fa2a1ce5 1773 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
1774 * Status.
1775 */
1776 if (lscsi_status != 0) {
ffec28a3 1777 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
7c3df132
SK
1778 ql_log(ql_log_warn, vha, 0x3020,
1779 "QUEUE FULL detected.\n");
b7d2280c 1780 logit = 1;
ffec28a3
AV
1781 break;
1782 }
1da177e4
LT
1783 if (lscsi_status != SS_CHECK_CONDITION)
1784 break;
1785
b80ca4f7 1786 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1787 if (!(scsi_status & SS_SENSE_LEN_VALID))
1788 break;
1789
5544213b
AV
1790 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1791 sense_len, rsp);
1da177e4
LT
1792 }
1793 break;
1794
1da177e4
LT
1795 case CS_PORT_LOGGED_OUT:
1796 case CS_PORT_CONFIG_CHG:
1797 case CS_PORT_BUSY:
1798 case CS_INCOMPLETE:
1799 case CS_PORT_UNAVAILABLE:
b7d2280c 1800 case CS_TIMEOUT:
ff454b01
CD
1801 case CS_RESET:
1802
056a4483
MC
1803 /*
1804 * We are going to have the fc class block the rport
1805 * while we try to recover so instruct the mid layer
1806 * to requeue until the class decides how to handle this.
1807 */
1808 cp->result = DID_TRANSPORT_DISRUPTED << 16;
b7d2280c
AV
1809
1810 if (comp_status == CS_TIMEOUT) {
1811 if (IS_FWI2_CAPABLE(ha))
1812 break;
1813 else if ((le16_to_cpu(sts->status_flags) &
1814 SF_LOGOUT_SENT) == 0)
1815 break;
1816 }
1817
7c3df132
SK
1818 ql_dbg(ql_dbg_io, vha, 0x3021,
1819 "Port down status: port-state=0x%x.\n",
1820 atomic_read(&fcport->state));
b7d2280c 1821
a7a28504 1822 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 1823 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1824 break;
1825
1da177e4 1826 case CS_ABORTED:
1da177e4
LT
1827 cp->result = DID_RESET << 16;
1828 break;
bad75002
AE
1829
1830 case CS_DIF_ERROR:
8cb2049c 1831 logit = qla2x00_handle_dif_error(sp, sts24);
bad75002 1832 break;
1da177e4 1833 default:
1da177e4
LT
1834 cp->result = DID_ERROR << 16;
1835 break;
1836 }
1837
b7d2280c
AV
1838out:
1839 if (logit)
7c3df132
SK
1840 ql_dbg(ql_dbg_io, vha, 0x3022,
1841 "FCP command status: 0x%x-0x%x (0x%x) "
1842 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1843 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1844 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
1845 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1846 resid_len, fw_resid_len);
b7d2280c 1847
2afa19a9 1848 if (rsp->status_srb == NULL)
73208dfd 1849 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1850}
1851
1852/**
1853 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1854 * @ha: SCSI driver HA context
1855 * @pkt: Entry pointer
1856 *
1857 * Extended sense data.
1858 */
1859static void
2afa19a9 1860qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1da177e4
LT
1861{
1862 uint8_t sense_sz = 0;
2afa19a9 1863 struct qla_hw_data *ha = rsp->hw;
7c3df132 1864 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2afa19a9 1865 srb_t *sp = rsp->status_srb;
1da177e4
LT
1866 struct scsi_cmnd *cp;
1867
1868 if (sp != NULL && sp->request_sense_length != 0) {
1869 cp = sp->cmd;
1870 if (cp == NULL) {
7c3df132
SK
1871 ql_log(ql_log_warn, vha, 0x3025,
1872 "cmd is NULL: already returned to OS (sp=%p).\n",
fa2a1ce5 1873 sp);
1da177e4 1874
2afa19a9 1875 rsp->status_srb = NULL;
1da177e4
LT
1876 return;
1877 }
1878
1879 if (sp->request_sense_length > sizeof(pkt->data)) {
1880 sense_sz = sizeof(pkt->data);
1881 } else {
1882 sense_sz = sp->request_sense_length;
1883 }
1884
1885 /* Move sense data. */
e428924c 1886 if (IS_FWI2_CAPABLE(ha))
9a853f71 1887 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1da177e4 1888 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
7c3df132
SK
1889 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1890 sp->request_sense_ptr, sense_sz);
1da177e4
LT
1891
1892 sp->request_sense_ptr += sense_sz;
1893 sp->request_sense_length -= sense_sz;
1894
1895 /* Place command on done queue. */
1896 if (sp->request_sense_length == 0) {
2afa19a9 1897 rsp->status_srb = NULL;
73208dfd 1898 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1899 }
1900 }
1901}
1902
1903/**
1904 * qla2x00_error_entry() - Process an error entry.
1905 * @ha: SCSI driver HA context
1906 * @pkt: Entry pointer
1907 */
1908static void
73208dfd 1909qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
1910{
1911 srb_t *sp;
e315cd28 1912 struct qla_hw_data *ha = vha->hw;
2afa19a9
AC
1913 uint32_t handle = LSW(pkt->handle);
1914 uint16_t que = MSW(pkt->handle);
1915 struct req_que *req = ha->req_q_map[que];
7c3df132 1916
1da177e4 1917 if (pkt->entry_status & RF_INV_E_ORDER)
7c3df132
SK
1918 ql_dbg(ql_dbg_async, vha, 0x502a,
1919 "Invalid Entry Order.\n");
1da177e4 1920 else if (pkt->entry_status & RF_INV_E_COUNT)
7c3df132
SK
1921 ql_dbg(ql_dbg_async, vha, 0x502b,
1922 "Invalid Entry Count.\n");
1da177e4 1923 else if (pkt->entry_status & RF_INV_E_PARAM)
7c3df132
SK
1924 ql_dbg(ql_dbg_async, vha, 0x502c,
1925 "Invalid Entry Parameter.\n");
1da177e4 1926 else if (pkt->entry_status & RF_INV_E_TYPE)
7c3df132
SK
1927 ql_dbg(ql_dbg_async, vha, 0x502d,
1928 "Invalid Entry Type.\n");
1da177e4 1929 else if (pkt->entry_status & RF_BUSY)
7c3df132
SK
1930 ql_dbg(ql_dbg_async, vha, 0x502e,
1931 "Busy.\n");
1da177e4 1932 else
7c3df132
SK
1933 ql_dbg(ql_dbg_async, vha, 0x502f,
1934 "UNKNOWN flag error.\n");
1da177e4
LT
1935
1936 /* Validate handle. */
2afa19a9
AC
1937 if (handle < MAX_OUTSTANDING_COMMANDS)
1938 sp = req->outstanding_cmds[handle];
1da177e4
LT
1939 else
1940 sp = NULL;
1941
1942 if (sp) {
1943 /* Free outstanding command slot. */
2afa19a9 1944 req->outstanding_cmds[handle] = NULL;
354d6b21 1945
1da177e4
LT
1946 /* Bad payload or header */
1947 if (pkt->entry_status &
1948 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1949 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1950 sp->cmd->result = DID_ERROR << 16;
1951 } else if (pkt->entry_status & RF_BUSY) {
1952 sp->cmd->result = DID_BUS_BUSY << 16;
1953 } else {
1954 sp->cmd->result = DID_ERROR << 16;
1955 }
73208dfd 1956 qla2x00_sp_compl(ha, sp);
1da177e4 1957
9a853f71 1958 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
8f7daead
GM
1959 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1960 || pkt->entry_type == COMMAND_TYPE_6) {
7c3df132
SK
1961 ql_log(ql_log_warn, vha, 0x5030,
1962 "Error entry - invalid handle.\n");
1da177e4 1963
8f7daead
GM
1964 if (IS_QLA82XX(ha))
1965 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1966 else
1967 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
e315cd28 1968 qla2xxx_wake_dpc(vha);
1da177e4
LT
1969 }
1970}
1971
9a853f71
AV
1972/**
1973 * qla24xx_mbx_completion() - Process mailbox command completions.
1974 * @ha: SCSI driver HA context
1975 * @mb0: Mailbox0 register
1976 */
1977static void
e315cd28 1978qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
1979{
1980 uint16_t cnt;
1981 uint16_t __iomem *wptr;
e315cd28 1982 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
1983 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1984
1985 /* Load return mailbox registers. */
1986 ha->flags.mbox_int = 1;
1987 ha->mailbox_out[0] = mb0;
1988 wptr = (uint16_t __iomem *)&reg->mailbox1;
1989
1990 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1991 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1992 wptr++;
1993 }
1994
1995 if (ha->mcp) {
7c3df132
SK
1996 ql_dbg(ql_dbg_async, vha, 0x504d,
1997 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
9a853f71 1998 } else {
7c3df132
SK
1999 ql_dbg(ql_dbg_async, vha, 0x504e,
2000 "MBX pointer ERROR.\n");
9a853f71
AV
2001 }
2002}
2003
2004/**
2005 * qla24xx_process_response_queue() - Process response queue entries.
2006 * @ha: SCSI driver HA context
2007 */
2afa19a9
AC
2008void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2009 struct rsp_que *rsp)
9a853f71 2010{
9a853f71 2011 struct sts_entry_24xx *pkt;
a9083016 2012 struct qla_hw_data *ha = vha->hw;
9a853f71 2013
e315cd28 2014 if (!vha->flags.online)
9a853f71
AV
2015 return;
2016
e315cd28
AC
2017 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2018 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 2019
e315cd28
AC
2020 rsp->ring_index++;
2021 if (rsp->ring_index == rsp->length) {
2022 rsp->ring_index = 0;
2023 rsp->ring_ptr = rsp->ring;
9a853f71 2024 } else {
e315cd28 2025 rsp->ring_ptr++;
9a853f71
AV
2026 }
2027
2028 if (pkt->entry_status != 0) {
7c3df132
SK
2029 ql_dbg(ql_dbg_async, vha, 0x5029,
2030 "Process error entry.\n");
9a853f71 2031
73208dfd 2032 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
9a853f71
AV
2033 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2034 wmb();
2035 continue;
2036 }
2037
2038 switch (pkt->entry_type) {
2039 case STATUS_TYPE:
73208dfd 2040 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
2041 break;
2042 case STATUS_CONT_TYPE:
2afa19a9 2043 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
9a853f71 2044 break;
2c3dfe3f 2045 case VP_RPT_ID_IOCB_TYPE:
e315cd28 2046 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
2047 (struct vp_rpt_id_entry_24xx *)pkt);
2048 break;
ac280b67
AV
2049 case LOGINOUT_PORT_IOCB_TYPE:
2050 qla24xx_logio_entry(vha, rsp->req,
2051 (struct logio_entry_24xx *)pkt);
2052 break;
3822263e
MI
2053 case TSK_MGMT_IOCB_TYPE:
2054 qla24xx_tm_iocb_entry(vha, rsp->req,
2055 (struct tsk_mgmt_entry *)pkt);
2056 break;
9a069e19
GM
2057 case CT_IOCB_TYPE:
2058 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2059 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
2060 break;
2061 case ELS_IOCB_TYPE:
2062 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2063 break;
9a853f71
AV
2064 default:
2065 /* Type Not Supported. */
7c3df132
SK
2066 ql_dbg(ql_dbg_async, vha, 0x5042,
2067 "Received unknown response pkt type %x "
9a853f71 2068 "entry status=%x.\n",
7c3df132 2069 pkt->entry_type, pkt->entry_status);
9a853f71
AV
2070 break;
2071 }
2072 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2073 wmb();
2074 }
2075
2076 /* Adjust ring index */
a9083016
GM
2077 if (IS_QLA82XX(ha)) {
2078 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2079 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2080 } else
2081 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
9a853f71
AV
2082}
2083
05236a05 2084static void
e315cd28 2085qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
2086{
2087 int rval;
2088 uint32_t cnt;
e315cd28 2089 struct qla_hw_data *ha = vha->hw;
05236a05
AV
2090 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2091
3a03eb79 2092 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
05236a05
AV
2093 return;
2094
2095 rval = QLA_SUCCESS;
2096 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2097 RD_REG_DWORD(&reg->iobase_addr);
2098 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2099 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2100 rval == QLA_SUCCESS; cnt--) {
2101 if (cnt) {
2102 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2103 udelay(10);
2104 } else
2105 rval = QLA_FUNCTION_TIMEOUT;
2106 }
2107 if (rval == QLA_SUCCESS)
2108 goto next_test;
2109
2110 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2111 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2112 rval == QLA_SUCCESS; cnt--) {
2113 if (cnt) {
2114 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2115 udelay(10);
2116 } else
2117 rval = QLA_FUNCTION_TIMEOUT;
2118 }
2119 if (rval != QLA_SUCCESS)
2120 goto done;
2121
2122next_test:
2123 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
7c3df132
SK
2124 ql_log(ql_log_info, vha, 0x504c,
2125 "Additional code -- 0x55AA.\n");
05236a05
AV
2126
2127done:
2128 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2129 RD_REG_DWORD(&reg->iobase_window);
2130}
2131
9a853f71
AV
2132/**
2133 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2134 * @irq:
2135 * @dev_id: SCSI driver HA context
9a853f71
AV
2136 *
2137 * Called by system whenever the host adapter generates an interrupt.
2138 *
2139 * Returns handled flag.
2140 */
2141irqreturn_t
7d12e780 2142qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 2143{
e315cd28
AC
2144 scsi_qla_host_t *vha;
2145 struct qla_hw_data *ha;
9a853f71
AV
2146 struct device_reg_24xx __iomem *reg;
2147 int status;
9a853f71
AV
2148 unsigned long iter;
2149 uint32_t stat;
2150 uint32_t hccr;
2151 uint16_t mb[4];
e315cd28 2152 struct rsp_que *rsp;
43fac4d9 2153 unsigned long flags;
9a853f71 2154
e315cd28
AC
2155 rsp = (struct rsp_que *) dev_id;
2156 if (!rsp) {
9a853f71 2157 printk(KERN_INFO
7c3df132 2158 "%s(): NULL response queue pointer.\n", __func__);
9a853f71
AV
2159 return IRQ_NONE;
2160 }
2161
e315cd28 2162 ha = rsp->hw;
9a853f71
AV
2163 reg = &ha->iobase->isp24;
2164 status = 0;
2165
85880801
AV
2166 if (unlikely(pci_channel_offline(ha->pdev)))
2167 return IRQ_HANDLED;
2168
43fac4d9 2169 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2170 vha = pci_get_drvdata(ha->pdev);
9a853f71
AV
2171 for (iter = 50; iter--; ) {
2172 stat = RD_REG_DWORD(&reg->host_status);
2173 if (stat & HSRX_RISC_PAUSED) {
85880801 2174 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2175 break;
2176
9a853f71
AV
2177 hccr = RD_REG_DWORD(&reg->hccr);
2178
7c3df132
SK
2179 ql_log(ql_log_warn, vha, 0x504b,
2180 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2181 hccr);
05236a05 2182
e315cd28 2183 qla2xxx_check_risc_status(vha);
05236a05 2184
e315cd28
AC
2185 ha->isp_ops->fw_dump(vha, 1);
2186 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
2187 break;
2188 } else if ((stat & HSRX_RISC_INT) == 0)
2189 break;
2190
2191 switch (stat & 0xff) {
2192 case 0x1:
2193 case 0x2:
2194 case 0x10:
2195 case 0x11:
e315cd28 2196 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
2197 status |= MBX_INTERRUPT;
2198
2199 break;
2200 case 0x12:
2201 mb[0] = MSW(stat);
2202 mb[1] = RD_REG_WORD(&reg->mailbox1);
2203 mb[2] = RD_REG_WORD(&reg->mailbox2);
2204 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2205 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
2206 break;
2207 case 0x13:
73208dfd 2208 case 0x14:
2afa19a9 2209 qla24xx_process_response_queue(vha, rsp);
9a853f71
AV
2210 break;
2211 default:
7c3df132
SK
2212 ql_dbg(ql_dbg_async, vha, 0x504f,
2213 "Unrecognized interrupt type (%d).\n", stat * 0xff);
9a853f71
AV
2214 break;
2215 }
2216 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2217 RD_REG_DWORD_RELAXED(&reg->hccr);
2218 }
43fac4d9 2219 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a853f71
AV
2220
2221 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2222 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 2223 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2224 complete(&ha->mbx_intr_comp);
9a853f71
AV
2225 }
2226
2227 return IRQ_HANDLED;
2228}
2229
a8488abe
AV
2230static irqreturn_t
2231qla24xx_msix_rsp_q(int irq, void *dev_id)
2232{
e315cd28
AC
2233 struct qla_hw_data *ha;
2234 struct rsp_que *rsp;
a8488abe 2235 struct device_reg_24xx __iomem *reg;
2afa19a9 2236 struct scsi_qla_host *vha;
0f19bc68 2237 unsigned long flags;
a8488abe 2238
e315cd28
AC
2239 rsp = (struct rsp_que *) dev_id;
2240 if (!rsp) {
2241 printk(KERN_INFO
7c3df132 2242 "%s(): NULL response queue pointer.\n", __func__);
e315cd28
AC
2243 return IRQ_NONE;
2244 }
2245 ha = rsp->hw;
a8488abe
AV
2246 reg = &ha->iobase->isp24;
2247
0f19bc68 2248 spin_lock_irqsave(&ha->hardware_lock, flags);
a8488abe 2249
a67093d4 2250 vha = pci_get_drvdata(ha->pdev);
2afa19a9 2251 qla24xx_process_response_queue(vha, rsp);
3155754a 2252 if (!ha->flags.disable_msix_handshake) {
eb94114b
AC
2253 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2254 RD_REG_DWORD_RELAXED(&reg->hccr);
2255 }
0f19bc68 2256 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2257
2258 return IRQ_HANDLED;
2259}
2260
68ca949c
AC
2261static irqreturn_t
2262qla25xx_msix_rsp_q(int irq, void *dev_id)
2263{
2264 struct qla_hw_data *ha;
2265 struct rsp_que *rsp;
3155754a 2266 struct device_reg_24xx __iomem *reg;
0f19bc68 2267 unsigned long flags;
68ca949c
AC
2268
2269 rsp = (struct rsp_que *) dev_id;
2270 if (!rsp) {
2271 printk(KERN_INFO
7c3df132 2272 "%s(): NULL response queue pointer.\n", __func__);
68ca949c
AC
2273 return IRQ_NONE;
2274 }
2275 ha = rsp->hw;
2276
3155754a
AC
2277 /* Clear the interrupt, if enabled, for this response queue */
2278 if (rsp->options & ~BIT_6) {
2279 reg = &ha->iobase->isp24;
0f19bc68 2280 spin_lock_irqsave(&ha->hardware_lock, flags);
3155754a
AC
2281 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2282 RD_REG_DWORD_RELAXED(&reg->hccr);
0f19bc68 2283 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3155754a 2284 }
68ca949c
AC
2285 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2286
2287 return IRQ_HANDLED;
2288}
2289
a8488abe
AV
2290static irqreturn_t
2291qla24xx_msix_default(int irq, void *dev_id)
2292{
e315cd28
AC
2293 scsi_qla_host_t *vha;
2294 struct qla_hw_data *ha;
2295 struct rsp_que *rsp;
a8488abe
AV
2296 struct device_reg_24xx __iomem *reg;
2297 int status;
a8488abe
AV
2298 uint32_t stat;
2299 uint32_t hccr;
2300 uint16_t mb[4];
0f19bc68 2301 unsigned long flags;
a8488abe 2302
e315cd28
AC
2303 rsp = (struct rsp_que *) dev_id;
2304 if (!rsp) {
7c3df132
SK
2305 printk(KERN_INFO
2306 "%s(): NULL response queue pointer.\n", __func__);
e315cd28
AC
2307 return IRQ_NONE;
2308 }
2309 ha = rsp->hw;
a8488abe
AV
2310 reg = &ha->iobase->isp24;
2311 status = 0;
2312
0f19bc68 2313 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 2314 vha = pci_get_drvdata(ha->pdev);
87f27015 2315 do {
a8488abe
AV
2316 stat = RD_REG_DWORD(&reg->host_status);
2317 if (stat & HSRX_RISC_PAUSED) {
85880801 2318 if (unlikely(pci_channel_offline(ha->pdev)))
14e660e6
SJ
2319 break;
2320
a8488abe
AV
2321 hccr = RD_REG_DWORD(&reg->hccr);
2322
7c3df132
SK
2323 ql_log(ql_log_info, vha, 0x5050,
2324 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2325 hccr);
05236a05 2326
e315cd28 2327 qla2xxx_check_risc_status(vha);
05236a05 2328
e315cd28
AC
2329 ha->isp_ops->fw_dump(vha, 1);
2330 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
2331 break;
2332 } else if ((stat & HSRX_RISC_INT) == 0)
2333 break;
2334
2335 switch (stat & 0xff) {
2336 case 0x1:
2337 case 0x2:
2338 case 0x10:
2339 case 0x11:
e315cd28 2340 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
2341 status |= MBX_INTERRUPT;
2342
2343 break;
2344 case 0x12:
2345 mb[0] = MSW(stat);
2346 mb[1] = RD_REG_WORD(&reg->mailbox1);
2347 mb[2] = RD_REG_WORD(&reg->mailbox2);
2348 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 2349 qla2x00_async_event(vha, rsp, mb);
a8488abe
AV
2350 break;
2351 case 0x13:
73208dfd 2352 case 0x14:
2afa19a9 2353 qla24xx_process_response_queue(vha, rsp);
a8488abe
AV
2354 break;
2355 default:
7c3df132
SK
2356 ql_dbg(ql_dbg_async, vha, 0x5051,
2357 "Unrecognized interrupt type (%d).\n", stat & 0xff);
a8488abe
AV
2358 break;
2359 }
2360 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 2361 } while (0);
0f19bc68 2362 spin_unlock_irqrestore(&ha->hardware_lock, flags);
a8488abe
AV
2363
2364 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2365 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 2366 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 2367 complete(&ha->mbx_intr_comp);
a8488abe 2368 }
a8488abe
AV
2369 return IRQ_HANDLED;
2370}
2371
2372/* Interrupt handling helpers. */
2373
2374struct qla_init_msix_entry {
a8488abe 2375 const char *name;
476834c2 2376 irq_handler_t handler;
a8488abe
AV
2377};
2378
68ca949c 2379static struct qla_init_msix_entry msix_entries[3] = {
2afa19a9
AC
2380 { "qla2xxx (default)", qla24xx_msix_default },
2381 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
68ca949c 2382 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
a8488abe
AV
2383};
2384
a9083016
GM
2385static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2386 { "qla2xxx (default)", qla82xx_msix_default },
2387 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2388};
2389
a8488abe 2390static void
e315cd28 2391qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
2392{
2393 int i;
2394 struct qla_msix_entry *qentry;
7c3df132 2395 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe 2396
73208dfd
AC
2397 for (i = 0; i < ha->msix_count; i++) {
2398 qentry = &ha->msix_entries[i];
a8488abe 2399 if (qentry->have_irq)
73208dfd 2400 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
2401 }
2402 pci_disable_msix(ha->pdev);
73208dfd
AC
2403 kfree(ha->msix_entries);
2404 ha->msix_entries = NULL;
2405 ha->flags.msix_enabled = 0;
7c3df132
SK
2406 ql_dbg(ql_dbg_init, vha, 0x0042,
2407 "Disabled the MSI.\n");
a8488abe
AV
2408}
2409
2410static int
73208dfd 2411qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe 2412{
ad038fa8 2413#define MIN_MSIX_COUNT 2
a8488abe 2414 int i, ret;
73208dfd 2415 struct msix_entry *entries;
a8488abe 2416 struct qla_msix_entry *qentry;
7c3df132 2417 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2418
2419 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
a9083016 2420 GFP_KERNEL);
7c3df132
SK
2421 if (!entries) {
2422 ql_log(ql_log_warn, vha, 0x00bc,
2423 "Failed to allocate memory for msix_entry.\n");
73208dfd 2424 return -ENOMEM;
7c3df132 2425 }
a8488abe 2426
73208dfd
AC
2427 for (i = 0; i < ha->msix_count; i++)
2428 entries[i].entry = i;
a8488abe 2429
73208dfd 2430 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe 2431 if (ret) {
ad038fa8
LC
2432 if (ret < MIN_MSIX_COUNT)
2433 goto msix_failed;
2434
7c3df132
SK
2435 ql_log(ql_log_warn, vha, 0x00c6,
2436 "MSI-X: Failed to enable support "
2437 "-- %d/%d\n Retry with %d vectors.\n",
2438 ha->msix_count, ret, ret);
73208dfd
AC
2439 ha->msix_count = ret;
2440 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2441 if (ret) {
ad038fa8 2442msix_failed:
7c3df132
SK
2443 ql_log(ql_log_fatal, vha, 0x00c7,
2444 "MSI-X: Failed to enable support, "
2445 "giving up -- %d/%d.\n",
2446 ha->msix_count, ret);
73208dfd
AC
2447 goto msix_out;
2448 }
2afa19a9 2449 ha->max_rsp_queues = ha->msix_count - 1;
73208dfd
AC
2450 }
2451 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2452 ha->msix_count, GFP_KERNEL);
2453 if (!ha->msix_entries) {
7c3df132
SK
2454 ql_log(ql_log_fatal, vha, 0x00c8,
2455 "Failed to allocate memory for ha->msix_entries.\n");
73208dfd 2456 ret = -ENOMEM;
a8488abe
AV
2457 goto msix_out;
2458 }
2459 ha->flags.msix_enabled = 1;
2460
73208dfd
AC
2461 for (i = 0; i < ha->msix_count; i++) {
2462 qentry = &ha->msix_entries[i];
2463 qentry->vector = entries[i].vector;
2464 qentry->entry = entries[i].entry;
a8488abe 2465 qentry->have_irq = 0;
73208dfd 2466 qentry->rsp = NULL;
a8488abe
AV
2467 }
2468
2afa19a9
AC
2469 /* Enable MSI-X vectors for the base queue */
2470 for (i = 0; i < 2; i++) {
2471 qentry = &ha->msix_entries[i];
a9083016
GM
2472 if (IS_QLA82XX(ha)) {
2473 ret = request_irq(qentry->vector,
2474 qla82xx_msix_entries[i].handler,
2475 0, qla82xx_msix_entries[i].name, rsp);
2476 } else {
2477 ret = request_irq(qentry->vector,
2478 msix_entries[i].handler,
2479 0, msix_entries[i].name, rsp);
2480 }
2afa19a9 2481 if (ret) {
7c3df132
SK
2482 ql_log(ql_log_fatal, vha, 0x00cb,
2483 "MSI-X: unable to register handler -- %x/%d.\n",
2484 qentry->vector, ret);
2afa19a9
AC
2485 qla24xx_disable_msix(ha);
2486 ha->mqenable = 0;
2487 goto msix_out;
2488 }
2489 qentry->have_irq = 1;
2490 qentry->rsp = rsp;
2491 rsp->msix = qentry;
73208dfd 2492 }
73208dfd
AC
2493
2494 /* Enable MSI-X vector for response queue update for queue 0 */
2afa19a9 2495 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
73208dfd 2496 ha->mqenable = 1;
7c3df132
SK
2497 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2498 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2499 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2500 ql_dbg(ql_dbg_init, vha, 0x0055,
2501 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2502 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
73208dfd 2503
a8488abe 2504msix_out:
73208dfd 2505 kfree(entries);
a8488abe
AV
2506 return ret;
2507}
2508
2509int
73208dfd 2510qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
2511{
2512 int ret;
963b0fdd 2513 device_reg_t __iomem *reg = ha->iobase;
7c3df132 2514 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
a8488abe
AV
2515
2516 /* If possible, enable MSI-X. */
3a03eb79 2517 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
a9083016 2518 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
6377a7ae
BH
2519 goto skip_msi;
2520
2521 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2522 (ha->pdev->subsystem_device == 0x7040 ||
2523 ha->pdev->subsystem_device == 0x7041 ||
2524 ha->pdev->subsystem_device == 0x1705)) {
7c3df132
SK
2525 ql_log(ql_log_warn, vha, 0x0034,
2526 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
6377a7ae 2527 ha->pdev->subsystem_vendor,
7c3df132 2528 ha->pdev->subsystem_device);
6377a7ae
BH
2529 goto skip_msi;
2530 }
a8488abe 2531
e315cd28
AC
2532 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2533 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
7c3df132
SK
2534 ql_log(ql_log_warn, vha, 0x0035,
2535 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2536 ha->pdev->revision, ha->fw_attributes);
a8488abe
AV
2537 goto skip_msix;
2538 }
2539
73208dfd 2540 ret = qla24xx_enable_msix(ha, rsp);
a8488abe 2541 if (!ret) {
7c3df132
SK
2542 ql_dbg(ql_dbg_init, vha, 0x0036,
2543 "MSI-X: Enabled (0x%X, 0x%X).\n",
2544 ha->chip_revision, ha->fw_attributes);
963b0fdd 2545 goto clear_risc_ints;
a8488abe 2546 }
7c3df132
SK
2547 ql_log(ql_log_info, vha, 0x0037,
2548 "MSI-X Falling back-to MSI mode -%d.\n", ret);
a8488abe 2549skip_msix:
cbedb601 2550
3a03eb79
AV
2551 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2552 !IS_QLA8001(ha))
cbedb601
AV
2553 goto skip_msi;
2554
2555 ret = pci_enable_msi(ha->pdev);
2556 if (!ret) {
7c3df132
SK
2557 ql_dbg(ql_dbg_init, vha, 0x0038,
2558 "MSI: Enabled.\n");
cbedb601 2559 ha->flags.msi_enabled = 1;
a9083016 2560 } else
7c3df132
SK
2561 ql_log(ql_log_warn, vha, 0x0039,
2562 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
cbedb601
AV
2563skip_msi:
2564
fd34f556 2565 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
7992abfc
MH
2566 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2567 QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 2568 if (ret) {
7c3df132 2569 ql_log(ql_log_warn, vha, 0x003a,
a8488abe
AV
2570 "Failed to reserve interrupt %d already in use.\n",
2571 ha->pdev->irq);
963b0fdd
AV
2572 goto fail;
2573 }
7992abfc 2574
963b0fdd
AV
2575clear_risc_ints:
2576
3a03eb79
AV
2577 /*
2578 * FIXME: Noted that 8014s were being dropped during NK testing.
2579 * Timing deltas during MSI-X/INTa transitions?
2580 */
a9083016 2581 if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
3a03eb79 2582 goto fail;
c6952483 2583 spin_lock_irq(&ha->hardware_lock);
963b0fdd
AV
2584 if (IS_FWI2_CAPABLE(ha)) {
2585 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2586 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2587 } else {
2588 WRT_REG_WORD(&reg->isp.semaphore, 0);
2589 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2590 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
a8488abe 2591 }
c6952483 2592 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2593
963b0fdd 2594fail:
a8488abe
AV
2595 return ret;
2596}
2597
2598void
e315cd28 2599qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2600{
e315cd28 2601 struct qla_hw_data *ha = vha->hw;
73208dfd 2602 struct rsp_que *rsp = ha->rsp_q_map[0];
a8488abe
AV
2603
2604 if (ha->flags.msix_enabled)
2605 qla24xx_disable_msix(ha);
90a86fc0 2606 else if (ha->flags.msi_enabled) {
e315cd28 2607 free_irq(ha->pdev->irq, rsp);
cbedb601 2608 pci_disable_msi(ha->pdev);
90a86fc0
JC
2609 } else
2610 free_irq(ha->pdev->irq, rsp);
a8488abe 2611}
e315cd28 2612
73208dfd
AC
2613
2614int qla25xx_request_irq(struct rsp_que *rsp)
2615{
2616 struct qla_hw_data *ha = rsp->hw;
2afa19a9 2617 struct qla_init_msix_entry *intr = &msix_entries[2];
73208dfd 2618 struct qla_msix_entry *msix = rsp->msix;
7c3df132 2619 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
73208dfd
AC
2620 int ret;
2621
2622 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2623 if (ret) {
7c3df132
SK
2624 ql_log(ql_log_fatal, vha, 0x00e6,
2625 "MSI-X: Unable to register handler -- %x/%d.\n",
2626 msix->vector, ret);
73208dfd
AC
2627 return ret;
2628 }
2629 msix->have_irq = 1;
2630 msix->rsp = rsp;
2631 return ret;
2632}