[SCSI] hpsa: add entry to MAINTAINERS
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_os.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
9#include <linux/moduleparam.h>
10#include <linux/vmalloc.h>
1da177e4 11#include <linux/delay.h>
39a11240 12#include <linux/kthread.h>
e1e82b6f 13#include <linux/mutex.h>
3420d36c 14#include <linux/kobject.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16
17#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h>
20#include <scsi/scsi_transport_fc.h>
21
22/*
23 * Driver version
24 */
25char qla2x00_version_str[40];
26
6a03b4cd
HZ
27static int apidev_major;
28
1da177e4
LT
29/*
30 * SRB allocation cache
31 */
e18b890b 32static struct kmem_cache *srb_cachep;
1da177e4 33
a9083016
GM
34/*
35 * CT6 CTX allocation cache
36 */
37static struct kmem_cache *ctx_cachep;
38
1da177e4
LT
39int ql2xlogintimeout = 20;
40module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
41MODULE_PARM_DESC(ql2xlogintimeout,
42 "Login timeout value in seconds.");
43
a7b61842 44int qlport_down_retry;
1da177e4
LT
45module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
46MODULE_PARM_DESC(qlport_down_retry,
900d9f98 47 "Maximum number of command retries to a port that returns "
1da177e4
LT
48 "a PORT-DOWN status.");
49
1da177e4
LT
50int ql2xplogiabsentdevice;
51module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
52MODULE_PARM_DESC(ql2xplogiabsentdevice,
53 "Option to enable PLOGI to devices that are not present after "
900d9f98 54 "a Fabric scan. This is needed for several broken switches. "
1da177e4
LT
55 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
56
1da177e4
LT
57int ql2xloginretrycount = 0;
58module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
59MODULE_PARM_DESC(ql2xloginretrycount,
60 "Specify an alternate value for the NVRAM login retry count.");
61
a7a167bf
AV
62int ql2xallocfwdump = 1;
63module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
64MODULE_PARM_DESC(ql2xallocfwdump,
65 "Option to enable allocation of memory for a firmware dump "
66 "during HBA initialization. Memory allocation requirements "
67 "vary by ISP type. Default is 1 - allocate memory.");
68
11010fec 69int ql2xextended_error_logging;
27d94035 70module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
11010fec 71MODULE_PARM_DESC(ql2xextended_error_logging,
0181944f
AV
72 "Option to enable extended error logging, "
73 "Default is 0 - no logging. 1 - log errors.");
74
a9083016
GM
75int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
77MODULE_PARM_DESC(ql2xshiftctondsd,
78 "Set to control shifting of command type processing "
79 "based on total number of SG elements.");
80
1da177e4
LT
81static void qla2x00_free_device(scsi_qla_host_t *);
82
7e47e5ca 83int ql2xfdmienable=1;
cca5335c
AV
84module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
85MODULE_PARM_DESC(ql2xfdmienable,
7794a5af
FW
86 "Enables FDMI registrations. "
87 "0 - no FDMI. Default is 1 - perform FDMI.");
cca5335c 88
df7baa50
AV
89#define MAX_Q_DEPTH 32
90static int ql2xmaxqdepth = MAX_Q_DEPTH;
91module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
92MODULE_PARM_DESC(ql2xmaxqdepth,
93 "Maximum queue depth to report for target devices.");
94
bad75002
AE
95/* Do not change the value of this after module load */
96int ql2xenabledif = 1;
97module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
98MODULE_PARM_DESC(ql2xenabledif,
99 " Enable T10-CRC-DIF "
100 " Default is 0 - No DIF Support. 1 - Enable it");
101
102int ql2xenablehba_err_chk;
103module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(ql2xenablehba_err_chk,
105 " Enable T10-CRC-DIF Error isolation by HBA"
106 " Default is 0 - Error isolation disabled, 1 - Enable it");
107
e5896bd5
AV
108int ql2xiidmaenable=1;
109module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xiidmaenable,
111 "Enables iIDMA settings "
112 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
113
73208dfd
AC
114int ql2xmaxqueues = 1;
115module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
116MODULE_PARM_DESC(ql2xmaxqueues,
117 "Enables MQ settings "
118 "Default is 1 for single queue. Set it to number \
119 of queues in MQ mode.");
68ca949c
AC
120
121int ql2xmultique_tag;
122module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
123MODULE_PARM_DESC(ql2xmultique_tag,
124 "Enables CPU affinity settings for the driver "
125 "Default is 0 for no affinity of request and response IO. "
126 "Set it to 1 to turn on the cpu affinity.");
e337d907
AV
127
128int ql2xfwloadbin;
129module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
130MODULE_PARM_DESC(ql2xfwloadbin,
131 "Option to specify location from which to load ISP firmware:\n"
132 " 2 -- load firmware via the request_firmware() (hotplug)\n"
133 " interface.\n"
134 " 1 -- load firmware from flash.\n"
135 " 0 -- use default semantics.\n");
136
ae97c91e
AV
137int ql2xetsenable;
138module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
139MODULE_PARM_DESC(ql2xetsenable,
140 "Enables firmware ETS burst."
141 "Default is 0 - skip ETS enablement.");
142
a9083016
GM
143int ql2xdbwr;
144module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
145MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n"
147 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n");
149
150int ql2xdontresethba;
151module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
152MODULE_PARM_DESC(ql2xdontresethba,
153 "Option to specify reset behaviour\n"
154 " 0 (Default) -- Reset on failure.\n"
155 " 1 -- Do not reset on failure.\n");
156
f4c496c1
GM
157int ql2xtargetreset = 1;
158module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
159MODULE_PARM_DESC(ql2xtargetreset,
160 "Enable target reset."
161 "Default is 1 - use hw defaults.");
162
a9083016 163
3822263e
MI
164int ql2xasynctmfenable;
165module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
166MODULE_PARM_DESC(ql2xasynctmfenable,
167 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
168 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
1da177e4 169/*
fa2a1ce5 170 * SCSI host template entry points
1da177e4
LT
171 */
172static int qla2xxx_slave_configure(struct scsi_device * device);
f4f051eb 173static int qla2xxx_slave_alloc(struct scsi_device *);
1e99e33a
AV
174static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
175static void qla2xxx_scan_start(struct Scsi_Host *);
f4f051eb 176static void qla2xxx_slave_destroy(struct scsi_device *);
a5326f86 177static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
fca29703 178 void (*fn)(struct scsi_cmnd *));
1da177e4
LT
179static int qla2xxx_eh_abort(struct scsi_cmnd *);
180static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
523ec773 181static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
1da177e4
LT
182static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
183static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
1da177e4 184
e881a172 185static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
ce7e4af7
AV
186static int qla2x00_change_queue_type(struct scsi_device *, int);
187
a5326f86 188struct scsi_host_template qla2xxx_driver_template = {
1da177e4 189 .module = THIS_MODULE,
cb63067a 190 .name = QLA2XXX_DRIVER_NAME,
a5326f86 191 .queuecommand = qla2xxx_queuecommand,
fca29703
AV
192
193 .eh_abort_handler = qla2xxx_eh_abort,
194 .eh_device_reset_handler = qla2xxx_eh_device_reset,
523ec773 195 .eh_target_reset_handler = qla2xxx_eh_target_reset,
fca29703
AV
196 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
197 .eh_host_reset_handler = qla2xxx_eh_host_reset,
198
199 .slave_configure = qla2xxx_slave_configure,
200
201 .slave_alloc = qla2xxx_slave_alloc,
202 .slave_destroy = qla2xxx_slave_destroy,
ed677086
AV
203 .scan_finished = qla2xxx_scan_finished,
204 .scan_start = qla2xxx_scan_start,
ce7e4af7
AV
205 .change_queue_depth = qla2x00_change_queue_depth,
206 .change_queue_type = qla2x00_change_queue_type,
fca29703
AV
207 .this_id = -1,
208 .cmd_per_lun = 3,
209 .use_clustering = ENABLE_CLUSTERING,
210 .sg_tablesize = SG_ALL,
211
212 .max_sectors = 0xFFFF,
afb046e2 213 .shost_attrs = qla2x00_host_attrs,
fca29703
AV
214};
215
1da177e4 216static struct scsi_transport_template *qla2xxx_transport_template = NULL;
2c3dfe3f 217struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
1da177e4 218
1da177e4
LT
219/* TODO Convert to inlines
220 *
221 * Timer routines
222 */
1da177e4 223
2c3dfe3f 224__inline__ void
e315cd28 225qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
1da177e4 226{
e315cd28
AC
227 init_timer(&vha->timer);
228 vha->timer.expires = jiffies + interval * HZ;
229 vha->timer.data = (unsigned long)vha;
230 vha->timer.function = (void (*)(unsigned long))func;
231 add_timer(&vha->timer);
232 vha->timer_active = 1;
1da177e4
LT
233}
234
235static inline void
e315cd28 236qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
1da177e4 237{
a9083016
GM
238 /* Currently used for 82XX only. */
239 if (vha->device_flags & DFLG_DEV_FAILED)
240 return;
241
e315cd28 242 mod_timer(&vha->timer, jiffies + interval * HZ);
1da177e4
LT
243}
244
a824ebb3 245static __inline__ void
e315cd28 246qla2x00_stop_timer(scsi_qla_host_t *vha)
1da177e4 247{
e315cd28
AC
248 del_timer_sync(&vha->timer);
249 vha->timer_active = 0;
1da177e4
LT
250}
251
1da177e4
LT
252static int qla2x00_do_dpc(void *data);
253
254static void qla2x00_rst_aen(scsi_qla_host_t *);
255
73208dfd
AC
256static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
257 struct req_que **, struct rsp_que **);
e315cd28
AC
258static void qla2x00_mem_free(struct qla_hw_data *);
259static void qla2x00_sp_free_dma(srb_t *);
1da177e4 260
1da177e4 261/* -------------------------------------------------------------------------- */
73208dfd
AC
262static int qla2x00_alloc_queues(struct qla_hw_data *ha)
263{
2afa19a9 264 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
73208dfd
AC
265 GFP_KERNEL);
266 if (!ha->req_q_map) {
267 qla_printk(KERN_WARNING, ha,
268 "Unable to allocate memory for request queue ptrs\n");
269 goto fail_req_map;
270 }
271
2afa19a9 272 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
73208dfd
AC
273 GFP_KERNEL);
274 if (!ha->rsp_q_map) {
275 qla_printk(KERN_WARNING, ha,
276 "Unable to allocate memory for response queue ptrs\n");
277 goto fail_rsp_map;
278 }
279 set_bit(0, ha->rsp_qid_map);
280 set_bit(0, ha->req_qid_map);
281 return 1;
282
283fail_rsp_map:
284 kfree(ha->req_q_map);
285 ha->req_q_map = NULL;
286fail_req_map:
287 return -ENOMEM;
288}
289
2afa19a9 290static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
73208dfd 291{
73208dfd
AC
292 if (req && req->ring)
293 dma_free_coherent(&ha->pdev->dev,
294 (req->length + 1) * sizeof(request_t),
295 req->ring, req->dma);
296
297 kfree(req);
298 req = NULL;
299}
300
2afa19a9
AC
301static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
302{
303 if (rsp && rsp->ring)
304 dma_free_coherent(&ha->pdev->dev,
305 (rsp->length + 1) * sizeof(response_t),
306 rsp->ring, rsp->dma);
307
308 kfree(rsp);
309 rsp = NULL;
310}
311
73208dfd
AC
312static void qla2x00_free_queues(struct qla_hw_data *ha)
313{
314 struct req_que *req;
315 struct rsp_que *rsp;
316 int cnt;
317
2afa19a9 318 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
73208dfd 319 req = ha->req_q_map[cnt];
2afa19a9 320 qla2x00_free_req_que(ha, req);
73208dfd 321 }
73208dfd
AC
322 kfree(ha->req_q_map);
323 ha->req_q_map = NULL;
2afa19a9
AC
324
325 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
326 rsp = ha->rsp_q_map[cnt];
327 qla2x00_free_rsp_que(ha, rsp);
328 }
329 kfree(ha->rsp_q_map);
330 ha->rsp_q_map = NULL;
73208dfd
AC
331}
332
68ca949c
AC
333static int qla25xx_setup_mode(struct scsi_qla_host *vha)
334{
335 uint16_t options = 0;
336 int ques, req, ret;
337 struct qla_hw_data *ha = vha->hw;
338
7163ea81
AC
339 if (!(ha->fw_attributes & BIT_6)) {
340 qla_printk(KERN_INFO, ha,
341 "Firmware is not multi-queue capable\n");
342 goto fail;
343 }
68ca949c 344 if (ql2xmultique_tag) {
68ca949c
AC
345 /* create a request queue for IO */
346 options |= BIT_7;
347 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
348 QLA_DEFAULT_QUE_QOS);
349 if (!req) {
350 qla_printk(KERN_WARNING, ha,
351 "Can't create request queue\n");
352 goto fail;
353 }
7163ea81 354 ha->wq = create_workqueue("qla2xxx_wq");
68ca949c
AC
355 vha->req = ha->req_q_map[req];
356 options |= BIT_1;
357 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
358 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
359 if (!ret) {
360 qla_printk(KERN_WARNING, ha,
361 "Response Queue create failed\n");
362 goto fail2;
363 }
364 }
7163ea81
AC
365 ha->flags.cpu_affinity_enabled = 1;
366
68ca949c
AC
367 DEBUG2(qla_printk(KERN_INFO, ha,
368 "CPU affinity mode enabled, no. of response"
369 " queues:%d, no. of request queues:%d\n",
370 ha->max_rsp_queues, ha->max_req_queues));
371 }
372 return 0;
373fail2:
374 qla25xx_delete_queues(vha);
7163ea81
AC
375 destroy_workqueue(ha->wq);
376 ha->wq = NULL;
68ca949c
AC
377fail:
378 ha->mqenable = 0;
7163ea81
AC
379 kfree(ha->req_q_map);
380 kfree(ha->rsp_q_map);
381 ha->max_req_queues = ha->max_rsp_queues = 1;
68ca949c
AC
382 return 1;
383}
384
1da177e4 385static char *
e315cd28 386qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
1da177e4 387{
e315cd28 388 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
389 static char *pci_bus_modes[] = {
390 "33", "66", "100", "133",
391 };
392 uint16_t pci_bus;
393
394 strcpy(str, "PCI");
395 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
396 if (pci_bus) {
397 strcat(str, "-X (");
398 strcat(str, pci_bus_modes[pci_bus]);
399 } else {
400 pci_bus = (ha->pci_attr & BIT_8) >> 8;
401 strcat(str, " (");
402 strcat(str, pci_bus_modes[pci_bus]);
403 }
404 strcat(str, " MHz)");
405
406 return (str);
407}
408
fca29703 409static char *
e315cd28 410qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
fca29703
AV
411{
412 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
e315cd28 413 struct qla_hw_data *ha = vha->hw;
fca29703
AV
414 uint32_t pci_bus;
415 int pcie_reg;
416
417 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
418 if (pcie_reg) {
419 char lwstr[6];
420 uint16_t pcie_lstat, lspeed, lwidth;
421
422 pcie_reg += 0x12;
423 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
424 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
425 lwidth = (pcie_lstat &
426 (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
427
428 strcpy(str, "PCIe (");
429 if (lspeed == 1)
c87a0d8c 430 strcat(str, "2.5GT/s ");
c3a2f0df 431 else if (lspeed == 2)
c87a0d8c 432 strcat(str, "5.0GT/s ");
fca29703
AV
433 else
434 strcat(str, "<unknown> ");
435 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
436 strcat(str, lwstr);
437
438 return str;
439 }
440
441 strcpy(str, "PCI");
442 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
443 if (pci_bus == 0 || pci_bus == 8) {
444 strcat(str, " (");
445 strcat(str, pci_bus_modes[pci_bus >> 3]);
446 } else {
447 strcat(str, "-X ");
448 if (pci_bus & BIT_2)
449 strcat(str, "Mode 2");
450 else
451 strcat(str, "Mode 1");
452 strcat(str, " (");
453 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
454 }
455 strcat(str, " MHz)");
456
457 return str;
458}
459
e5f82ab8 460static char *
e315cd28 461qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
1da177e4
LT
462{
463 char un_str[10];
e315cd28 464 struct qla_hw_data *ha = vha->hw;
fa2a1ce5 465
1da177e4
LT
466 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
467 ha->fw_minor_version,
468 ha->fw_subminor_version);
469
470 if (ha->fw_attributes & BIT_9) {
471 strcat(str, "FLX");
472 return (str);
473 }
474
475 switch (ha->fw_attributes & 0xFF) {
476 case 0x7:
477 strcat(str, "EF");
478 break;
479 case 0x17:
480 strcat(str, "TP");
481 break;
482 case 0x37:
483 strcat(str, "IP");
484 break;
485 case 0x77:
486 strcat(str, "VI");
487 break;
488 default:
489 sprintf(un_str, "(%x)", ha->fw_attributes);
490 strcat(str, un_str);
491 break;
492 }
493 if (ha->fw_attributes & 0x100)
494 strcat(str, "X");
495
496 return (str);
497}
498
e5f82ab8 499static char *
e315cd28 500qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
fca29703 501{
e315cd28 502 struct qla_hw_data *ha = vha->hw;
f0883ac6 503
3a03eb79
AV
504 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
505 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
fca29703 506 return str;
fca29703
AV
507}
508
509static inline srb_t *
e315cd28 510qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
fca29703
AV
511 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
512{
513 srb_t *sp;
e315cd28 514 struct qla_hw_data *ha = vha->hw;
fca29703
AV
515
516 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
517 if (!sp)
518 return sp;
519
fca29703
AV
520 sp->fcport = fcport;
521 sp->cmd = cmd;
522 sp->flags = 0;
523 CMD_SP(cmd) = (void *)sp;
524 cmd->scsi_done = done;
cf53b069 525 sp->ctx = NULL;
fca29703
AV
526
527 return sp;
528}
529
1da177e4 530static int
a5326f86 531qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
fca29703 532{
e315cd28 533 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fca29703 534 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
19a7b4ae 535 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
e315cd28
AC
536 struct qla_hw_data *ha = vha->hw;
537 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
fca29703
AV
538 srb_t *sp;
539 int rval;
540
85880801
AV
541 if (ha->flags.eeh_busy) {
542 if (ha->flags.pci_channel_io_perm_failure)
b9b12f73 543 cmd->result = DID_NO_CONNECT << 16;
85880801
AV
544 else
545 cmd->result = DID_REQUEUE << 16;
14e660e6
SJ
546 goto qc24_fail_command;
547 }
548
19a7b4ae
JSEC
549 rval = fc_remote_port_chkready(rport);
550 if (rval) {
551 cmd->result = rval;
fca29703
AV
552 goto qc24_fail_command;
553 }
554
387f96b4 555 /* Close window on fcport/rport state-transitioning. */
7b594131
MC
556 if (fcport->drport)
557 goto qc24_target_busy;
387f96b4 558
bad75002
AE
559 if (!vha->flags.difdix_supported &&
560 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
561 DEBUG2(qla_printk(KERN_ERR, ha,
562 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
563 cmd->cmnd[0]));
564 cmd->result = DID_NO_CONNECT << 16;
565 goto qc24_fail_command;
566 }
fca29703
AV
567 if (atomic_read(&fcport->state) != FCS_ONLINE) {
568 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
e315cd28 569 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
fca29703
AV
570 cmd->result = DID_NO_CONNECT << 16;
571 goto qc24_fail_command;
572 }
7b594131 573 goto qc24_target_busy;
fca29703
AV
574 }
575
e315cd28 576 spin_unlock_irq(vha->host->host_lock);
fca29703 577
e315cd28 578 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
fca29703
AV
579 if (!sp)
580 goto qc24_host_busy_lock;
581
e315cd28 582 rval = ha->isp_ops->start_scsi(sp);
fca29703
AV
583 if (rval != QLA_SUCCESS)
584 goto qc24_host_busy_free_sp;
585
e315cd28 586 spin_lock_irq(vha->host->host_lock);
fca29703
AV
587
588 return 0;
589
590qc24_host_busy_free_sp:
e315cd28
AC
591 qla2x00_sp_free_dma(sp);
592 mempool_free(sp, ha->srb_mempool);
fca29703
AV
593
594qc24_host_busy_lock:
e315cd28 595 spin_lock_irq(vha->host->host_lock);
fca29703
AV
596 return SCSI_MLQUEUE_HOST_BUSY;
597
7b594131
MC
598qc24_target_busy:
599 return SCSI_MLQUEUE_TARGET_BUSY;
600
fca29703
AV
601qc24_fail_command:
602 done(cmd);
603
604 return 0;
605}
606
607
1da177e4
LT
608/*
609 * qla2x00_eh_wait_on_command
610 * Waits for the command to be returned by the Firmware for some
611 * max time.
612 *
613 * Input:
1da177e4 614 * cmd = Scsi Command to wait on.
1da177e4
LT
615 *
616 * Return:
617 * Not Found : 0
618 * Found : 1
619 */
620static int
e315cd28 621qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
1da177e4 622{
fe74c71f
AV
623#define ABORT_POLLING_PERIOD 1000
624#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
f4f051eb 625 unsigned long wait_iter = ABORT_WAIT_ITER;
85880801
AV
626 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
627 struct qla_hw_data *ha = vha->hw;
f4f051eb 628 int ret = QLA_SUCCESS;
1da177e4 629
85880801
AV
630 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
631 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
632 return ret;
633 }
634
d970432c 635 while (CMD_SP(cmd) && wait_iter--) {
fe74c71f 636 msleep(ABORT_POLLING_PERIOD);
f4f051eb 637 }
638 if (CMD_SP(cmd))
639 ret = QLA_FUNCTION_FAILED;
1da177e4 640
f4f051eb 641 return ret;
1da177e4
LT
642}
643
644/*
645 * qla2x00_wait_for_hba_online
fa2a1ce5 646 * Wait till the HBA is online after going through
1da177e4
LT
647 * <= MAX_RETRIES_OF_ISP_ABORT or
648 * finally HBA is disabled ie marked offline
649 *
650 * Input:
651 * ha - pointer to host adapter structure
fa2a1ce5
AV
652 *
653 * Note:
1da177e4
LT
654 * Does context switching-Release SPIN_LOCK
655 * (if any) before calling this routine.
656 *
657 * Return:
658 * Success (Adapter is online) : 0
659 * Failed (Adapter is offline/disabled) : 1
660 */
854165f4 661int
e315cd28 662qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1da177e4 663{
fca29703
AV
664 int return_status;
665 unsigned long wait_online;
e315cd28
AC
666 struct qla_hw_data *ha = vha->hw;
667 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 668
fa2a1ce5 669 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
e315cd28
AC
670 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
671 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
672 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
673 ha->dpc_active) && time_before(jiffies, wait_online)) {
1da177e4
LT
674
675 msleep(1000);
676 }
e315cd28 677 if (base_vha->flags.online)
fa2a1ce5 678 return_status = QLA_SUCCESS;
1da177e4
LT
679 else
680 return_status = QLA_FUNCTION_FAILED;
681
1da177e4
LT
682 return (return_status);
683}
684
86fbee86
LC
685/*
686 * qla2x00_wait_for_reset_ready
687 * Wait till the HBA is online after going through
688 * <= MAX_RETRIES_OF_ISP_ABORT or
689 * finally HBA is disabled ie marked offline or flash
690 * operations are in progress.
691 *
692 * Input:
693 * ha - pointer to host adapter structure
694 *
695 * Note:
696 * Does context switching-Release SPIN_LOCK
697 * (if any) before calling this routine.
698 *
699 * Return:
700 * Success (Adapter is online/no flash ops) : 0
701 * Failed (Adapter is offline/disabled/flash ops in progress) : 1
702 */
703int
704qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
705{
706 int return_status;
707 unsigned long wait_online;
708 struct qla_hw_data *ha = vha->hw;
709 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
710
711 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
712 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
713 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
714 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
715 ha->optrom_state != QLA_SWAITING ||
716 ha->dpc_active) && time_before(jiffies, wait_online))
717 msleep(1000);
718
719 if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
720 return_status = QLA_SUCCESS;
721 else
722 return_status = QLA_FUNCTION_FAILED;
723
724 DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
725
726 return return_status;
727}
728
2533cf67
LC
729int
730qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
731{
732 int return_status;
733 unsigned long wait_reset;
734 struct qla_hw_data *ha = vha->hw;
735 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
736
737 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
738 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
739 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
740 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
741 ha->dpc_active) && time_before(jiffies, wait_reset)) {
742
743 msleep(1000);
744
745 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
746 ha->flags.chip_reset_done)
747 break;
748 }
749 if (ha->flags.chip_reset_done)
750 return_status = QLA_SUCCESS;
751 else
752 return_status = QLA_FUNCTION_FAILED;
753
754 return return_status;
755}
756
1da177e4
LT
757/*
758 * qla2x00_wait_for_loop_ready
759 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
fa2a1ce5 760 * to be in LOOP_READY state.
1da177e4
LT
761 * Input:
762 * ha - pointer to host adapter structure
fa2a1ce5
AV
763 *
764 * Note:
1da177e4
LT
765 * Does context switching-Release SPIN_LOCK
766 * (if any) before calling this routine.
fa2a1ce5 767 *
1da177e4
LT
768 *
769 * Return:
770 * Success (LOOP_READY) : 0
771 * Failed (LOOP_NOT_READY) : 1
772 */
fa2a1ce5 773static inline int
e315cd28 774qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
1da177e4
LT
775{
776 int return_status = QLA_SUCCESS;
777 unsigned long loop_timeout ;
e315cd28
AC
778 struct qla_hw_data *ha = vha->hw;
779 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4
LT
780
781 /* wait for 5 min at the max for loop to be ready */
fa2a1ce5 782 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1da177e4 783
e315cd28
AC
784 while ((!atomic_read(&base_vha->loop_down_timer) &&
785 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
786 atomic_read(&base_vha->loop_state) != LOOP_READY) {
787 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
57680080
RA
788 return_status = QLA_FUNCTION_FAILED;
789 break;
790 }
1da177e4
LT
791 msleep(1000);
792 if (time_after_eq(jiffies, loop_timeout)) {
793 return_status = QLA_FUNCTION_FAILED;
794 break;
795 }
796 }
fa2a1ce5 797 return (return_status);
1da177e4
LT
798}
799
800/**************************************************************************
801* qla2xxx_eh_abort
802*
803* Description:
804* The abort function will abort the specified command.
805*
806* Input:
807* cmd = Linux SCSI command packet to be aborted.
808*
809* Returns:
810* Either SUCCESS or FAILED.
811*
812* Note:
2ea00202 813* Only return FAILED if command not returned by firmware.
1da177e4 814**************************************************************************/
e5f82ab8 815static int
1da177e4
LT
816qla2xxx_eh_abort(struct scsi_cmnd *cmd)
817{
e315cd28 818 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
f4f051eb 819 srb_t *sp;
17d98630 820 int ret, i;
f4f051eb 821 unsigned int id, lun;
822 unsigned long serial;
18e144d3 823 unsigned long flags;
2ea00202 824 int wait = 0;
e315cd28 825 struct qla_hw_data *ha = vha->hw;
67c2e93a 826 struct req_que *req = vha->req;
17d98630 827 srb_t *spt;
1da177e4 828
65d430fa 829 fc_block_scsi_eh(cmd);
07db5183 830
f4f051eb 831 if (!CMD_SP(cmd))
2ea00202 832 return SUCCESS;
1da177e4 833
2ea00202 834 ret = SUCCESS;
1da177e4 835
f4f051eb 836 id = cmd->device->id;
837 lun = cmd->device->lun;
838 serial = cmd->serial_number;
17d98630
AC
839 spt = (srb_t *) CMD_SP(cmd);
840 if (!spt)
841 return SUCCESS;
1da177e4 842
f4f051eb 843 /* Check active list for command command. */
e315cd28 844 spin_lock_irqsave(&ha->hardware_lock, flags);
17d98630
AC
845 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
846 sp = req->outstanding_cmds[i];
1da177e4 847
17d98630
AC
848 if (sp == NULL)
849 continue;
bad75002
AE
850 if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
851 !IS_PROT_IO(sp))
cf53b069 852 continue;
17d98630
AC
853 if (sp->cmd != cmd)
854 continue;
1da177e4 855
17d98630
AC
856 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
857 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
858
859 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2afa19a9 860 if (ha->isp_ops->abort_command(sp)) {
17d98630
AC
861 DEBUG2(printk("%s(%ld): abort_command "
862 "mbx failed.\n", __func__, vha->host_no));
2ac4b64f 863 ret = FAILED;
17d98630
AC
864 } else {
865 DEBUG3(printk("%s(%ld): abort_command "
866 "mbx success.\n", __func__, vha->host_no));
867 wait = 1;
73208dfd 868 }
17d98630
AC
869 spin_lock_irqsave(&ha->hardware_lock, flags);
870 break;
f4f051eb 871 }
e315cd28 872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4 873
f4f051eb 874 /* Wait for the command to be returned. */
2ea00202 875 if (wait) {
e315cd28 876 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
fa2a1ce5 877 qla_printk(KERN_ERR, ha,
f4f051eb 878 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
e315cd28 879 "%x.\n", vha->host_no, id, lun, serial, ret);
2ea00202 880 ret = FAILED;
f4f051eb 881 }
1da177e4 882 }
1da177e4 883
fa2a1ce5 884 qla_printk(KERN_INFO, ha,
2ea00202 885 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
e315cd28 886 vha->host_no, id, lun, wait, serial, ret);
1da177e4 887
f4f051eb 888 return ret;
889}
1da177e4 890
523ec773
AV
891enum nexus_wait_type {
892 WAIT_HOST = 0,
893 WAIT_TARGET,
894 WAIT_LUN,
895};
896
f4f051eb 897static int
e315cd28 898qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
17d98630 899 unsigned int l, srb_t *sp, enum nexus_wait_type type)
f4f051eb 900{
17d98630 901 int cnt, match, status;
18e144d3 902 unsigned long flags;
e315cd28 903 struct qla_hw_data *ha = vha->hw;
73208dfd 904 struct req_que *req;
1da177e4 905
523ec773 906 status = QLA_SUCCESS;
17d98630
AC
907 if (!sp)
908 return status;
909
e315cd28 910 spin_lock_irqsave(&ha->hardware_lock, flags);
67c2e93a 911 req = vha->req;
17d98630
AC
912 for (cnt = 1; status == QLA_SUCCESS &&
913 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
914 sp = req->outstanding_cmds[cnt];
915 if (!sp)
523ec773 916 continue;
bad75002 917 if ((sp->ctx) && !IS_PROT_IO(sp))
cf53b069 918 continue;
17d98630
AC
919 if (vha->vp_idx != sp->fcport->vha->vp_idx)
920 continue;
921 match = 0;
922 switch (type) {
923 case WAIT_HOST:
924 match = 1;
925 break;
926 case WAIT_TARGET:
927 match = sp->cmd->device->id == t;
928 break;
929 case WAIT_LUN:
930 match = (sp->cmd->device->id == t &&
931 sp->cmd->device->lun == l);
932 break;
73208dfd 933 }
17d98630
AC
934 if (!match)
935 continue;
936
937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
938 status = qla2x00_eh_wait_on_command(sp->cmd);
939 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 940 }
e315cd28 941 spin_unlock_irqrestore(&ha->hardware_lock, flags);
523ec773
AV
942
943 return status;
1da177e4
LT
944}
945
a9083016
GM
946void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
947{
948 int cnt;
949 srb_t *sp;
950 struct req_que *req = vha->req;
951
952 DEBUG2(qla_printk(KERN_INFO, vha->hw,
953 "Waiting for pending commands\n"));
954 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
955 sp = req->outstanding_cmds[cnt];
956 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
957 sp, WAIT_HOST) == QLA_SUCCESS) {
958 DEBUG2(qla_printk(KERN_INFO, vha->hw,
959 "Done wait for pending commands\n"));
960 }
961 }
962}
963
523ec773
AV
964static char *reset_errors[] = {
965 "HBA not online",
966 "HBA not ready",
967 "Task management failed",
968 "Waiting for command completions",
969};
1da177e4 970
e5f82ab8 971static int
523ec773 972__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
2afa19a9 973 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
1da177e4 974{
e315cd28 975 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 976 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
523ec773 977 int err;
1da177e4 978
65d430fa 979 fc_block_scsi_eh(cmd);
07db5183 980
b0328bee 981 if (!fcport)
523ec773 982 return FAILED;
1da177e4 983
e315cd28
AC
984 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
985 vha->host_no, cmd->device->id, cmd->device->lun, name);
1da177e4 986
523ec773 987 err = 0;
e315cd28 988 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
523ec773
AV
989 goto eh_reset_failed;
990 err = 1;
e315cd28 991 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
523ec773
AV
992 goto eh_reset_failed;
993 err = 2;
2afa19a9
AC
994 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
995 != QLA_SUCCESS)
523ec773
AV
996 goto eh_reset_failed;
997 err = 3;
e315cd28 998 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
17d98630 999 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
523ec773
AV
1000 goto eh_reset_failed;
1001
e315cd28
AC
1002 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
1003 vha->host_no, cmd->device->id, cmd->device->lun, name);
523ec773
AV
1004
1005 return SUCCESS;
1006
1007 eh_reset_failed:
e315cd28
AC
1008 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
1009 , vha->host_no, cmd->device->id, cmd->device->lun, name,
523ec773
AV
1010 reset_errors[err]);
1011 return FAILED;
1012}
1da177e4 1013
523ec773
AV
1014static int
1015qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1016{
e315cd28
AC
1017 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1018 struct qla_hw_data *ha = vha->hw;
1da177e4 1019
523ec773
AV
1020 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
1021 ha->isp_ops->lun_reset);
1da177e4
LT
1022}
1023
1da177e4 1024static int
523ec773 1025qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1da177e4 1026{
e315cd28
AC
1027 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1028 struct qla_hw_data *ha = vha->hw;
1da177e4 1029
523ec773
AV
1030 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
1031 ha->isp_ops->target_reset);
1da177e4
LT
1032}
1033
1da177e4
LT
1034/**************************************************************************
1035* qla2xxx_eh_bus_reset
1036*
1037* Description:
1038* The bus reset function will reset the bus and abort any executing
1039* commands.
1040*
1041* Input:
1042* cmd = Linux SCSI command packet of the command that cause the
1043* bus reset.
1044*
1045* Returns:
1046* SUCCESS/FAILURE (defined as macro in scsi.h).
1047*
1048**************************************************************************/
e5f82ab8 1049static int
1da177e4
LT
1050qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1051{
e315cd28 1052 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 1053 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
2c3dfe3f 1054 int ret = FAILED;
f4f051eb 1055 unsigned int id, lun;
1056 unsigned long serial;
17d98630 1057 srb_t *sp = (srb_t *) CMD_SP(cmd);
f4f051eb 1058
65d430fa 1059 fc_block_scsi_eh(cmd);
07db5183 1060
f4f051eb 1061 id = cmd->device->id;
1062 lun = cmd->device->lun;
1063 serial = cmd->serial_number;
1da177e4 1064
b0328bee 1065 if (!fcport)
f4f051eb 1066 return ret;
1da177e4 1067
e315cd28 1068 qla_printk(KERN_INFO, vha->hw,
749af3d5 1069 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
1da177e4 1070
e315cd28 1071 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1da177e4 1072 DEBUG2(printk("%s failed:board disabled\n",__func__));
f4f051eb 1073 goto eh_bus_reset_done;
1da177e4
LT
1074 }
1075
e315cd28
AC
1076 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
1077 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
f4f051eb 1078 ret = SUCCESS;
1da177e4 1079 }
f4f051eb 1080 if (ret == FAILED)
1081 goto eh_bus_reset_done;
1da177e4 1082
9a41a62b 1083 /* Flush outstanding commands. */
17d98630 1084 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
523ec773 1085 QLA_SUCCESS)
9a41a62b 1086 ret = FAILED;
1da177e4 1087
f4f051eb 1088eh_bus_reset_done:
e315cd28 1089 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
f4f051eb 1090 (ret == FAILED) ? "failed" : "succeded");
1da177e4 1091
f4f051eb 1092 return ret;
1da177e4
LT
1093}
1094
1095/**************************************************************************
1096* qla2xxx_eh_host_reset
1097*
1098* Description:
1099* The reset function will reset the Adapter.
1100*
1101* Input:
1102* cmd = Linux SCSI command packet of the command that cause the
1103* adapter reset.
1104*
1105* Returns:
1106* Either SUCCESS or FAILED.
1107*
1108* Note:
1109**************************************************************************/
e5f82ab8 1110static int
1da177e4
LT
1111qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1112{
e315cd28 1113 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
bdf79621 1114 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
e315cd28 1115 struct qla_hw_data *ha = vha->hw;
2c3dfe3f 1116 int ret = FAILED;
f4f051eb 1117 unsigned int id, lun;
1118 unsigned long serial;
17d98630 1119 srb_t *sp = (srb_t *) CMD_SP(cmd);
e315cd28 1120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 1121
65d430fa 1122 fc_block_scsi_eh(cmd);
07db5183 1123
f4f051eb 1124 id = cmd->device->id;
1125 lun = cmd->device->lun;
1126 serial = cmd->serial_number;
1127
b0328bee 1128 if (!fcport)
f4f051eb 1129 return ret;
1da177e4 1130
1da177e4 1131 qla_printk(KERN_INFO, ha,
e315cd28 1132 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1da177e4 1133
86fbee86 1134 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
f4f051eb 1135 goto eh_host_reset_lock;
1da177e4
LT
1136
1137 /*
1138 * Fixme-may be dpc thread is active and processing
fa2a1ce5 1139 * loop_resync,so wait a while for it to
1da177e4
LT
1140 * be completed and then issue big hammer.Otherwise
1141 * it may cause I/O failure as big hammer marks the
1142 * devices as lost kicking of the port_down_timer
1143 * while dpc is stuck for the mailbox to complete.
1144 */
e315cd28
AC
1145 qla2x00_wait_for_loop_ready(vha);
1146 if (vha != base_vha) {
1147 if (qla2x00_vp_abort_isp(vha))
f4f051eb 1148 goto eh_host_reset_lock;
e315cd28 1149 } else {
a9083016
GM
1150 if (IS_QLA82XX(vha->hw)) {
1151 if (!qla82xx_fcoe_ctx_reset(vha)) {
1152 /* Ctx reset success */
1153 ret = SUCCESS;
1154 goto eh_host_reset_lock;
1155 }
1156 /* fall thru if ctx reset failed */
1157 }
68ca949c
AC
1158 if (ha->wq)
1159 flush_workqueue(ha->wq);
1160
e315cd28 1161 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
a9083016 1162 if (ha->isp_ops->abort_isp(base_vha)) {
e315cd28
AC
1163 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1164 /* failed. schedule dpc to try */
1165 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1166
1167 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1168 goto eh_host_reset_lock;
1169 }
1170 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
fa2a1ce5 1171 }
1da177e4 1172
e315cd28 1173 /* Waiting for command to be returned to OS.*/
17d98630 1174 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
e315cd28 1175 QLA_SUCCESS)
f4f051eb 1176 ret = SUCCESS;
1da177e4 1177
f4f051eb 1178eh_host_reset_lock:
f4f051eb 1179 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1180 (ret == FAILED) ? "failed" : "succeded");
1da177e4 1181
f4f051eb 1182 return ret;
1183}
1da177e4
LT
1184
1185/*
1186* qla2x00_loop_reset
1187* Issue loop reset.
1188*
1189* Input:
1190* ha = adapter block pointer.
1191*
1192* Returns:
1193* 0 = success
1194*/
a4722cf2 1195int
e315cd28 1196qla2x00_loop_reset(scsi_qla_host_t *vha)
1da177e4 1197{
0c8c39af 1198 int ret;
bdf79621 1199 struct fc_port *fcport;
e315cd28 1200 struct qla_hw_data *ha = vha->hw;
1da177e4 1201
f4c496c1 1202 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
55e5ed27
AV
1203 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1204 if (fcport->port_type != FCT_TARGET)
1205 continue;
1206
1207 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1208 if (ret != QLA_SUCCESS) {
1209 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1210 "target_reset=%d d_id=%x.\n", __func__,
1211 vha->host_no, ret, fcport->d_id.b24));
1212 }
1213 }
1214 }
1215
a9083016 1216 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
e315cd28 1217 ret = qla2x00_full_login_lip(vha);
0c8c39af 1218 if (ret != QLA_SUCCESS) {
749af3d5 1219 DEBUG2_3(printk("%s(%ld): failed: "
e315cd28 1220 "full_login_lip=%d.\n", __func__, vha->host_no,
0c8c39af 1221 ret));
749af3d5
AC
1222 }
1223 atomic_set(&vha->loop_state, LOOP_DOWN);
1224 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1225 qla2x00_mark_all_devices_lost(vha, 0);
1226 qla2x00_wait_for_loop_ready(vha);
0c8c39af
AV
1227 }
1228
0d6e61bc 1229 if (ha->flags.enable_lip_reset) {
e315cd28 1230 ret = qla2x00_lip_reset(vha);
0c8c39af 1231 if (ret != QLA_SUCCESS) {
749af3d5 1232 DEBUG2_3(printk("%s(%ld): failed: "
e315cd28
AC
1233 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1234 } else
1235 qla2x00_wait_for_loop_ready(vha);
1da177e4
LT
1236 }
1237
1da177e4 1238 /* Issue marker command only when we are going to start the I/O */
e315cd28 1239 vha->marker_needed = 1;
1da177e4 1240
0c8c39af 1241 return QLA_SUCCESS;
1da177e4
LT
1242}
1243
df4bf0bb 1244void
e315cd28 1245qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
df4bf0bb 1246{
73208dfd 1247 int que, cnt;
df4bf0bb
AV
1248 unsigned long flags;
1249 srb_t *sp;
ac280b67 1250 struct srb_ctx *ctx;
e315cd28 1251 struct qla_hw_data *ha = vha->hw;
73208dfd 1252 struct req_que *req;
df4bf0bb
AV
1253
1254 spin_lock_irqsave(&ha->hardware_lock, flags);
2afa19a9 1255 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 1256 req = ha->req_q_map[que];
73208dfd
AC
1257 if (!req)
1258 continue;
1259 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1260 sp = req->outstanding_cmds[cnt];
e612d465 1261 if (sp) {
73208dfd 1262 req->outstanding_cmds[cnt] = NULL;
a9083016 1263 if (!sp->ctx ||
bad75002
AE
1264 (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1265 IS_PROT_IO(sp)) {
ac280b67
AV
1266 sp->cmd->result = res;
1267 qla2x00_sp_compl(ha, sp);
1268 } else {
1269 ctx = sp->ctx;
6c452a45
AV
1270 if (ctx->type == SRB_LOGIN_CMD ||
1271 ctx->type == SRB_LOGOUT_CMD) {
4916392b 1272 ctx->u.iocb_cmd->free(sp);
db3ad7f8 1273 } else {
6c452a45 1274 struct fc_bsg_job *bsg_job =
4916392b 1275 ctx->u.bsg_job;
6c452a45
AV
1276 if (bsg_job->request->msgcode
1277 == FC_BSG_HST_CT)
db3ad7f8 1278 kfree(sp->fcport);
6c452a45
AV
1279 bsg_job->req->errors = 0;
1280 bsg_job->reply->result = res;
4916392b 1281 bsg_job->job_done(bsg_job);
db3ad7f8 1282 kfree(sp->ctx);
6c452a45 1283 mempool_free(sp,
4916392b 1284 ha->srb_mempool);
db3ad7f8 1285 }
ac280b67 1286 }
73208dfd 1287 }
df4bf0bb
AV
1288 }
1289 }
1290 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1291}
1292
f4f051eb 1293static int
1294qla2xxx_slave_alloc(struct scsi_device *sdev)
1da177e4 1295{
bdf79621 1296 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1da177e4 1297
19a7b4ae 1298 if (!rport || fc_remote_port_chkready(rport))
f4f051eb 1299 return -ENXIO;
bdf79621 1300
19a7b4ae 1301 sdev->hostdata = *(fc_port_t **)rport->dd_data;
1da177e4 1302
f4f051eb 1303 return 0;
1304}
1da177e4 1305
f4f051eb 1306static int
1307qla2xxx_slave_configure(struct scsi_device *sdev)
1308{
e315cd28
AC
1309 scsi_qla_host_t *vha = shost_priv(sdev->host);
1310 struct qla_hw_data *ha = vha->hw;
8482e118 1311 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2afa19a9 1312 struct req_que *req = vha->req;
8482e118 1313
f4f051eb 1314 if (sdev->tagged_supported)
73208dfd 1315 scsi_activate_tcq(sdev, req->max_q_depth);
f4f051eb 1316 else
73208dfd 1317 scsi_deactivate_tcq(sdev, req->max_q_depth);
1da177e4 1318
85821c90 1319 rport->dev_loss_tmo = ha->port_down_retry_count;
8482e118 1320
f4f051eb 1321 return 0;
1322}
1da177e4 1323
f4f051eb 1324static void
1325qla2xxx_slave_destroy(struct scsi_device *sdev)
1326{
1327 sdev->hostdata = NULL;
1da177e4
LT
1328}
1329
c45dd305
GM
1330static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1331{
1332 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1333
1334 if (!scsi_track_queue_full(sdev, qdepth))
1335 return;
1336
1337 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1338 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1339 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1340 sdev->queue_depth));
1341}
1342
1343static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1344{
1345 fc_port_t *fcport = sdev->hostdata;
1346 struct scsi_qla_host *vha = fcport->vha;
1347 struct qla_hw_data *ha = vha->hw;
1348 struct req_que *req = NULL;
1349
1350 req = vha->req;
1351 if (!req)
1352 return;
1353
1354 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1355 return;
1356
1357 if (sdev->ordered_tags)
1358 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1359 else
1360 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1361
1362 DEBUG2(qla_printk(KERN_INFO, ha,
1363 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1364 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1365 sdev->queue_depth));
1366}
1367
ce7e4af7 1368static int
e881a172 1369qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
ce7e4af7 1370{
c45dd305
GM
1371 switch (reason) {
1372 case SCSI_QDEPTH_DEFAULT:
1373 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1374 break;
1375 case SCSI_QDEPTH_QFULL:
1376 qla2x00_handle_queue_full(sdev, qdepth);
1377 break;
1378 case SCSI_QDEPTH_RAMP_UP:
1379 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1380 break;
1381 default:
08002af2 1382 return -EOPNOTSUPP;
c45dd305 1383 }
e881a172 1384
ce7e4af7
AV
1385 return sdev->queue_depth;
1386}
1387
1388static int
1389qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1390{
1391 if (sdev->tagged_supported) {
1392 scsi_set_tag_type(sdev, tag_type);
1393 if (tag_type)
1394 scsi_activate_tcq(sdev, sdev->queue_depth);
1395 else
1396 scsi_deactivate_tcq(sdev, sdev->queue_depth);
1397 } else
1398 tag_type = 0;
1399
1400 return tag_type;
1401}
1402
1da177e4
LT
1403/**
1404 * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1405 * @ha: HA context
1406 *
1407 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1408 * supported addressing method.
1409 */
1410static void
53303c42 1411qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1da177e4 1412{
7524f9b9 1413 /* Assume a 32bit DMA mask. */
1da177e4 1414 ha->flags.enable_64bit_addressing = 0;
1da177e4 1415
6a35528a 1416 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
7524f9b9
AV
1417 /* Any upper-dword bits set? */
1418 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
6a35528a 1419 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7524f9b9 1420 /* Ok, a 64bit DMA mask is applicable. */
1da177e4 1421 ha->flags.enable_64bit_addressing = 1;
fd34f556
AV
1422 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1423 ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
7524f9b9 1424 return;
1da177e4 1425 }
1da177e4 1426 }
7524f9b9 1427
284901a9
YH
1428 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1429 pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1da177e4
LT
1430}
1431
fd34f556 1432static void
e315cd28 1433qla2x00_enable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1434{
1435 unsigned long flags = 0;
1436 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1437
1438 spin_lock_irqsave(&ha->hardware_lock, flags);
1439 ha->interrupts_on = 1;
1440 /* enable risc and host interrupts */
1441 WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1442 RD_REG_WORD(&reg->ictrl);
1443 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1444
1445}
1446
1447static void
e315cd28 1448qla2x00_disable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1449{
1450 unsigned long flags = 0;
1451 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1452
1453 spin_lock_irqsave(&ha->hardware_lock, flags);
1454 ha->interrupts_on = 0;
1455 /* disable risc and host interrupts */
1456 WRT_REG_WORD(&reg->ictrl, 0);
1457 RD_REG_WORD(&reg->ictrl);
1458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1459}
1460
1461static void
e315cd28 1462qla24xx_enable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1463{
1464 unsigned long flags = 0;
1465 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1466
1467 spin_lock_irqsave(&ha->hardware_lock, flags);
1468 ha->interrupts_on = 1;
1469 WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1470 RD_REG_DWORD(&reg->ictrl);
1471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1472}
1473
1474static void
e315cd28 1475qla24xx_disable_intrs(struct qla_hw_data *ha)
fd34f556
AV
1476{
1477 unsigned long flags = 0;
1478 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1479
124f85e6
AV
1480 if (IS_NOPOLLING_TYPE(ha))
1481 return;
fd34f556
AV
1482 spin_lock_irqsave(&ha->hardware_lock, flags);
1483 ha->interrupts_on = 0;
1484 WRT_REG_DWORD(&reg->ictrl, 0);
1485 RD_REG_DWORD(&reg->ictrl);
1486 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1487}
1488
1489static struct isp_operations qla2100_isp_ops = {
1490 .pci_config = qla2100_pci_config,
1491 .reset_chip = qla2x00_reset_chip,
1492 .chip_diag = qla2x00_chip_diag,
1493 .config_rings = qla2x00_config_rings,
1494 .reset_adapter = qla2x00_reset_adapter,
1495 .nvram_config = qla2x00_nvram_config,
1496 .update_fw_options = qla2x00_update_fw_options,
1497 .load_risc = qla2x00_load_risc,
1498 .pci_info_str = qla2x00_pci_info_str,
1499 .fw_version_str = qla2x00_fw_version_str,
1500 .intr_handler = qla2100_intr_handler,
1501 .enable_intrs = qla2x00_enable_intrs,
1502 .disable_intrs = qla2x00_disable_intrs,
1503 .abort_command = qla2x00_abort_command,
523ec773
AV
1504 .target_reset = qla2x00_abort_target,
1505 .lun_reset = qla2x00_lun_reset,
fd34f556
AV
1506 .fabric_login = qla2x00_login_fabric,
1507 .fabric_logout = qla2x00_fabric_logout,
1508 .calc_req_entries = qla2x00_calc_iocbs_32,
1509 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1510 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1511 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1512 .read_nvram = qla2x00_read_nvram_data,
1513 .write_nvram = qla2x00_write_nvram_data,
1514 .fw_dump = qla2100_fw_dump,
1515 .beacon_on = NULL,
1516 .beacon_off = NULL,
1517 .beacon_blink = NULL,
1518 .read_optrom = qla2x00_read_optrom_data,
1519 .write_optrom = qla2x00_write_optrom_data,
1520 .get_flash_version = qla2x00_get_flash_version,
e315cd28 1521 .start_scsi = qla2x00_start_scsi,
a9083016 1522 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1523};
1524
1525static struct isp_operations qla2300_isp_ops = {
1526 .pci_config = qla2300_pci_config,
1527 .reset_chip = qla2x00_reset_chip,
1528 .chip_diag = qla2x00_chip_diag,
1529 .config_rings = qla2x00_config_rings,
1530 .reset_adapter = qla2x00_reset_adapter,
1531 .nvram_config = qla2x00_nvram_config,
1532 .update_fw_options = qla2x00_update_fw_options,
1533 .load_risc = qla2x00_load_risc,
1534 .pci_info_str = qla2x00_pci_info_str,
1535 .fw_version_str = qla2x00_fw_version_str,
1536 .intr_handler = qla2300_intr_handler,
1537 .enable_intrs = qla2x00_enable_intrs,
1538 .disable_intrs = qla2x00_disable_intrs,
1539 .abort_command = qla2x00_abort_command,
523ec773
AV
1540 .target_reset = qla2x00_abort_target,
1541 .lun_reset = qla2x00_lun_reset,
fd34f556
AV
1542 .fabric_login = qla2x00_login_fabric,
1543 .fabric_logout = qla2x00_fabric_logout,
1544 .calc_req_entries = qla2x00_calc_iocbs_32,
1545 .build_iocbs = qla2x00_build_scsi_iocbs_32,
1546 .prep_ms_iocb = qla2x00_prep_ms_iocb,
1547 .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
1548 .read_nvram = qla2x00_read_nvram_data,
1549 .write_nvram = qla2x00_write_nvram_data,
1550 .fw_dump = qla2300_fw_dump,
1551 .beacon_on = qla2x00_beacon_on,
1552 .beacon_off = qla2x00_beacon_off,
1553 .beacon_blink = qla2x00_beacon_blink,
1554 .read_optrom = qla2x00_read_optrom_data,
1555 .write_optrom = qla2x00_write_optrom_data,
1556 .get_flash_version = qla2x00_get_flash_version,
e315cd28 1557 .start_scsi = qla2x00_start_scsi,
a9083016 1558 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1559};
1560
1561static struct isp_operations qla24xx_isp_ops = {
1562 .pci_config = qla24xx_pci_config,
1563 .reset_chip = qla24xx_reset_chip,
1564 .chip_diag = qla24xx_chip_diag,
1565 .config_rings = qla24xx_config_rings,
1566 .reset_adapter = qla24xx_reset_adapter,
1567 .nvram_config = qla24xx_nvram_config,
1568 .update_fw_options = qla24xx_update_fw_options,
1569 .load_risc = qla24xx_load_risc,
1570 .pci_info_str = qla24xx_pci_info_str,
1571 .fw_version_str = qla24xx_fw_version_str,
1572 .intr_handler = qla24xx_intr_handler,
1573 .enable_intrs = qla24xx_enable_intrs,
1574 .disable_intrs = qla24xx_disable_intrs,
1575 .abort_command = qla24xx_abort_command,
523ec773
AV
1576 .target_reset = qla24xx_abort_target,
1577 .lun_reset = qla24xx_lun_reset,
fd34f556
AV
1578 .fabric_login = qla24xx_login_fabric,
1579 .fabric_logout = qla24xx_fabric_logout,
1580 .calc_req_entries = NULL,
1581 .build_iocbs = NULL,
1582 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1583 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1584 .read_nvram = qla24xx_read_nvram_data,
1585 .write_nvram = qla24xx_write_nvram_data,
1586 .fw_dump = qla24xx_fw_dump,
1587 .beacon_on = qla24xx_beacon_on,
1588 .beacon_off = qla24xx_beacon_off,
1589 .beacon_blink = qla24xx_beacon_blink,
1590 .read_optrom = qla24xx_read_optrom_data,
1591 .write_optrom = qla24xx_write_optrom_data,
1592 .get_flash_version = qla24xx_get_flash_version,
e315cd28 1593 .start_scsi = qla24xx_start_scsi,
a9083016 1594 .abort_isp = qla2x00_abort_isp,
fd34f556
AV
1595};
1596
c3a2f0df
AV
1597static struct isp_operations qla25xx_isp_ops = {
1598 .pci_config = qla25xx_pci_config,
1599 .reset_chip = qla24xx_reset_chip,
1600 .chip_diag = qla24xx_chip_diag,
1601 .config_rings = qla24xx_config_rings,
1602 .reset_adapter = qla24xx_reset_adapter,
1603 .nvram_config = qla24xx_nvram_config,
1604 .update_fw_options = qla24xx_update_fw_options,
1605 .load_risc = qla24xx_load_risc,
1606 .pci_info_str = qla24xx_pci_info_str,
1607 .fw_version_str = qla24xx_fw_version_str,
1608 .intr_handler = qla24xx_intr_handler,
1609 .enable_intrs = qla24xx_enable_intrs,
1610 .disable_intrs = qla24xx_disable_intrs,
1611 .abort_command = qla24xx_abort_command,
523ec773
AV
1612 .target_reset = qla24xx_abort_target,
1613 .lun_reset = qla24xx_lun_reset,
c3a2f0df
AV
1614 .fabric_login = qla24xx_login_fabric,
1615 .fabric_logout = qla24xx_fabric_logout,
1616 .calc_req_entries = NULL,
1617 .build_iocbs = NULL,
1618 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1619 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1620 .read_nvram = qla25xx_read_nvram_data,
1621 .write_nvram = qla25xx_write_nvram_data,
1622 .fw_dump = qla25xx_fw_dump,
1623 .beacon_on = qla24xx_beacon_on,
1624 .beacon_off = qla24xx_beacon_off,
1625 .beacon_blink = qla24xx_beacon_blink,
338c9161 1626 .read_optrom = qla25xx_read_optrom_data,
c3a2f0df
AV
1627 .write_optrom = qla24xx_write_optrom_data,
1628 .get_flash_version = qla24xx_get_flash_version,
bad75002 1629 .start_scsi = qla24xx_dif_start_scsi,
a9083016 1630 .abort_isp = qla2x00_abort_isp,
c3a2f0df
AV
1631};
1632
3a03eb79
AV
1633static struct isp_operations qla81xx_isp_ops = {
1634 .pci_config = qla25xx_pci_config,
1635 .reset_chip = qla24xx_reset_chip,
1636 .chip_diag = qla24xx_chip_diag,
1637 .config_rings = qla24xx_config_rings,
1638 .reset_adapter = qla24xx_reset_adapter,
1639 .nvram_config = qla81xx_nvram_config,
1640 .update_fw_options = qla81xx_update_fw_options,
eaac30be 1641 .load_risc = qla81xx_load_risc,
3a03eb79
AV
1642 .pci_info_str = qla24xx_pci_info_str,
1643 .fw_version_str = qla24xx_fw_version_str,
1644 .intr_handler = qla24xx_intr_handler,
1645 .enable_intrs = qla24xx_enable_intrs,
1646 .disable_intrs = qla24xx_disable_intrs,
1647 .abort_command = qla24xx_abort_command,
1648 .target_reset = qla24xx_abort_target,
1649 .lun_reset = qla24xx_lun_reset,
1650 .fabric_login = qla24xx_login_fabric,
1651 .fabric_logout = qla24xx_fabric_logout,
1652 .calc_req_entries = NULL,
1653 .build_iocbs = NULL,
1654 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1655 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
3d79038f
AV
1656 .read_nvram = NULL,
1657 .write_nvram = NULL,
3a03eb79
AV
1658 .fw_dump = qla81xx_fw_dump,
1659 .beacon_on = qla24xx_beacon_on,
1660 .beacon_off = qla24xx_beacon_off,
1661 .beacon_blink = qla24xx_beacon_blink,
1662 .read_optrom = qla25xx_read_optrom_data,
1663 .write_optrom = qla24xx_write_optrom_data,
1664 .get_flash_version = qla24xx_get_flash_version,
1665 .start_scsi = qla24xx_start_scsi,
a9083016
GM
1666 .abort_isp = qla2x00_abort_isp,
1667};
1668
1669static struct isp_operations qla82xx_isp_ops = {
1670 .pci_config = qla82xx_pci_config,
1671 .reset_chip = qla82xx_reset_chip,
1672 .chip_diag = qla24xx_chip_diag,
1673 .config_rings = qla82xx_config_rings,
1674 .reset_adapter = qla24xx_reset_adapter,
1675 .nvram_config = qla81xx_nvram_config,
1676 .update_fw_options = qla24xx_update_fw_options,
1677 .load_risc = qla82xx_load_risc,
1678 .pci_info_str = qla82xx_pci_info_str,
1679 .fw_version_str = qla24xx_fw_version_str,
1680 .intr_handler = qla82xx_intr_handler,
1681 .enable_intrs = qla82xx_enable_intrs,
1682 .disable_intrs = qla82xx_disable_intrs,
1683 .abort_command = qla24xx_abort_command,
1684 .target_reset = qla24xx_abort_target,
1685 .lun_reset = qla24xx_lun_reset,
1686 .fabric_login = qla24xx_login_fabric,
1687 .fabric_logout = qla24xx_fabric_logout,
1688 .calc_req_entries = NULL,
1689 .build_iocbs = NULL,
1690 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1691 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1692 .read_nvram = qla24xx_read_nvram_data,
1693 .write_nvram = qla24xx_write_nvram_data,
1694 .fw_dump = qla24xx_fw_dump,
1695 .beacon_on = qla24xx_beacon_on,
1696 .beacon_off = qla24xx_beacon_off,
1697 .beacon_blink = qla24xx_beacon_blink,
1698 .read_optrom = qla82xx_read_optrom_data,
1699 .write_optrom = qla82xx_write_optrom_data,
1700 .get_flash_version = qla24xx_get_flash_version,
1701 .start_scsi = qla82xx_start_scsi,
1702 .abort_isp = qla82xx_abort_isp,
3a03eb79
AV
1703};
1704
ea5b6382 1705static inline void
e315cd28 1706qla2x00_set_isp_flags(struct qla_hw_data *ha)
ea5b6382 1707{
1708 ha->device_type = DT_EXTENDED_IDS;
1709 switch (ha->pdev->device) {
1710 case PCI_DEVICE_ID_QLOGIC_ISP2100:
1711 ha->device_type |= DT_ISP2100;
1712 ha->device_type &= ~DT_EXTENDED_IDS;
441d1072 1713 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
ea5b6382 1714 break;
1715 case PCI_DEVICE_ID_QLOGIC_ISP2200:
1716 ha->device_type |= DT_ISP2200;
1717 ha->device_type &= ~DT_EXTENDED_IDS;
441d1072 1718 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
ea5b6382 1719 break;
1720 case PCI_DEVICE_ID_QLOGIC_ISP2300:
1721 ha->device_type |= DT_ISP2300;
4a59f71d 1722 ha->device_type |= DT_ZIO_SUPPORTED;
441d1072 1723 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382 1724 break;
1725 case PCI_DEVICE_ID_QLOGIC_ISP2312:
1726 ha->device_type |= DT_ISP2312;
4a59f71d 1727 ha->device_type |= DT_ZIO_SUPPORTED;
441d1072 1728 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382 1729 break;
1730 case PCI_DEVICE_ID_QLOGIC_ISP2322:
1731 ha->device_type |= DT_ISP2322;
4a59f71d 1732 ha->device_type |= DT_ZIO_SUPPORTED;
ea5b6382 1733 if (ha->pdev->subsystem_vendor == 0x1028 &&
1734 ha->pdev->subsystem_device == 0x0170)
1735 ha->device_type |= DT_OEM_001;
441d1072 1736 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382 1737 break;
1738 case PCI_DEVICE_ID_QLOGIC_ISP6312:
1739 ha->device_type |= DT_ISP6312;
441d1072 1740 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382 1741 break;
1742 case PCI_DEVICE_ID_QLOGIC_ISP6322:
1743 ha->device_type |= DT_ISP6322;
441d1072 1744 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
ea5b6382 1745 break;
1746 case PCI_DEVICE_ID_QLOGIC_ISP2422:
1747 ha->device_type |= DT_ISP2422;
4a59f71d 1748 ha->device_type |= DT_ZIO_SUPPORTED;
e428924c 1749 ha->device_type |= DT_FWI2;
c76f2c01 1750 ha->device_type |= DT_IIDMA;
441d1072 1751 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1752 break;
1753 case PCI_DEVICE_ID_QLOGIC_ISP2432:
1754 ha->device_type |= DT_ISP2432;
4a59f71d 1755 ha->device_type |= DT_ZIO_SUPPORTED;
e428924c 1756 ha->device_type |= DT_FWI2;
c76f2c01 1757 ha->device_type |= DT_IIDMA;
441d1072 1758 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1759 break;
4d4df193
HK
1760 case PCI_DEVICE_ID_QLOGIC_ISP8432:
1761 ha->device_type |= DT_ISP8432;
1762 ha->device_type |= DT_ZIO_SUPPORTED;
1763 ha->device_type |= DT_FWI2;
1764 ha->device_type |= DT_IIDMA;
1765 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1766 break;
044cc6c8 1767 case PCI_DEVICE_ID_QLOGIC_ISP5422:
1768 ha->device_type |= DT_ISP5422;
e428924c 1769 ha->device_type |= DT_FWI2;
441d1072 1770 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1771 break;
044cc6c8 1772 case PCI_DEVICE_ID_QLOGIC_ISP5432:
1773 ha->device_type |= DT_ISP5432;
e428924c 1774 ha->device_type |= DT_FWI2;
441d1072 1775 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1776 break;
c3a2f0df
AV
1777 case PCI_DEVICE_ID_QLOGIC_ISP2532:
1778 ha->device_type |= DT_ISP2532;
1779 ha->device_type |= DT_ZIO_SUPPORTED;
1780 ha->device_type |= DT_FWI2;
1781 ha->device_type |= DT_IIDMA;
441d1072 1782 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
ea5b6382 1783 break;
3a03eb79
AV
1784 case PCI_DEVICE_ID_QLOGIC_ISP8001:
1785 ha->device_type |= DT_ISP8001;
1786 ha->device_type |= DT_ZIO_SUPPORTED;
1787 ha->device_type |= DT_FWI2;
1788 ha->device_type |= DT_IIDMA;
1789 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1790 break;
a9083016
GM
1791 case PCI_DEVICE_ID_QLOGIC_ISP8021:
1792 ha->device_type |= DT_ISP8021;
1793 ha->device_type |= DT_ZIO_SUPPORTED;
1794 ha->device_type |= DT_FWI2;
1795 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1796 /* Initialize 82XX ISP flags */
1797 qla82xx_init_flags(ha);
1798 break;
ea5b6382 1799 }
e5b68a61 1800
a9083016
GM
1801 if (IS_QLA82XX(ha))
1802 ha->port_no = !(ha->portnum & 1);
1803 else
1804 /* Get adapter physical port no from interrupt pin register. */
1805 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1806
e5b68a61
AC
1807 if (ha->port_no & 1)
1808 ha->flags.port0 = 1;
1809 else
1810 ha->flags.port0 = 0;
ea5b6382 1811}
1812
1da177e4 1813static int
e315cd28 1814qla2x00_iospace_config(struct qla_hw_data *ha)
1da177e4 1815{
3776541d 1816 resource_size_t pio;
73208dfd 1817 uint16_t msix;
68ca949c 1818 int cpus;
1da177e4 1819
a9083016
GM
1820 if (IS_QLA82XX(ha))
1821 return qla82xx_iospace_config(ha);
1822
285d0321
AV
1823 if (pci_request_selected_regions(ha->pdev, ha->bars,
1824 QLA2XXX_DRIVER_NAME)) {
1825 qla_printk(KERN_WARNING, ha,
1826 "Failed to reserve PIO/MMIO regions (%s)\n",
1827 pci_name(ha->pdev));
1828
1829 goto iospace_error_exit;
1830 }
1831 if (!(ha->bars & 1))
1832 goto skip_pio;
1833
1da177e4
LT
1834 /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1835 pio = pci_resource_start(ha->pdev, 0);
3776541d
AV
1836 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1837 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1da177e4
LT
1838 qla_printk(KERN_WARNING, ha,
1839 "Invalid PCI I/O region size (%s)...\n",
1840 pci_name(ha->pdev));
1841 pio = 0;
1842 }
1843 } else {
1844 qla_printk(KERN_WARNING, ha,
1845 "region #0 not a PIO resource (%s)...\n",
1846 pci_name(ha->pdev));
1847 pio = 0;
1848 }
285d0321 1849 ha->pio_address = pio;
1da177e4 1850
285d0321 1851skip_pio:
1da177e4 1852 /* Use MMIO operations for all accesses. */
3776541d 1853 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1da177e4 1854 qla_printk(KERN_ERR, ha,
3776541d 1855 "region #1 not an MMIO resource (%s), aborting\n",
1da177e4
LT
1856 pci_name(ha->pdev));
1857 goto iospace_error_exit;
1858 }
3776541d 1859 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1da177e4
LT
1860 qla_printk(KERN_ERR, ha,
1861 "Invalid PCI mem region size (%s), aborting\n",
1862 pci_name(ha->pdev));
1863 goto iospace_error_exit;
1864 }
1865
3776541d 1866 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1da177e4
LT
1867 if (!ha->iobase) {
1868 qla_printk(KERN_ERR, ha,
1869 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1870
1871 goto iospace_error_exit;
1872 }
1873
73208dfd 1874 /* Determine queue resources */
2afa19a9 1875 ha->max_req_queues = ha->max_rsp_queues = 1;
d84a47c2
MH
1876 if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1877 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
2afa19a9 1878 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
17d98630 1879 goto mqiobase_exit;
d84a47c2 1880
17d98630
AC
1881 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1882 pci_resource_len(ha->pdev, 3));
1883 if (ha->mqiobase) {
1884 /* Read MSIX vector size of the board */
1885 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1886 ha->msix_count = msix;
68ca949c
AC
1887 /* Max queues are bounded by available msix vectors */
1888 /* queue 0 uses two msix vectors */
1889 if (ql2xmultique_tag) {
1890 cpus = num_online_cpus();
27dc9c5a 1891 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
68ca949c
AC
1892 (cpus + 1) : (ha->msix_count - 1);
1893 ha->max_req_queues = 2;
1894 } else if (ql2xmaxqueues > 1) {
2afa19a9
AC
1895 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1896 QLA_MQ_SIZE : ql2xmaxqueues;
1897 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1898 " of request queues:%d\n", ha->max_req_queues));
1899 }
68ca949c
AC
1900 qla_printk(KERN_INFO, ha,
1901 "MSI-X vector count: %d\n", msix);
2afa19a9
AC
1902 } else
1903 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
17d98630
AC
1904
1905mqiobase_exit:
2afa19a9 1906 ha->msix_count = ha->max_rsp_queues + 1;
1da177e4
LT
1907 return (0);
1908
1909iospace_error_exit:
1910 return (-ENOMEM);
1911}
1912
1e99e33a
AV
1913static void
1914qla2xxx_scan_start(struct Scsi_Host *shost)
1915{
e315cd28 1916 scsi_qla_host_t *vha = shost_priv(shost);
1e99e33a 1917
cbc8eb67
AV
1918 if (vha->hw->flags.running_gold_fw)
1919 return;
1920
e315cd28
AC
1921 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1922 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1923 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1924 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1e99e33a
AV
1925}
1926
1927static int
1928qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1929{
e315cd28 1930 scsi_qla_host_t *vha = shost_priv(shost);
1e99e33a 1931
e315cd28 1932 if (!vha->host)
1e99e33a 1933 return 1;
e315cd28 1934 if (time > vha->hw->loop_reset_delay * HZ)
1e99e33a
AV
1935 return 1;
1936
e315cd28 1937 return atomic_read(&vha->loop_state) == LOOP_READY;
1e99e33a
AV
1938}
1939
1da177e4
LT
1940/*
1941 * PCI driver interface
1942 */
7ee61397
AV
1943static int __devinit
1944qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4 1945{
a1541d5a 1946 int ret = -ENODEV;
1da177e4 1947 struct Scsi_Host *host;
e315cd28
AC
1948 scsi_qla_host_t *base_vha = NULL;
1949 struct qla_hw_data *ha;
29856e28 1950 char pci_info[30];
1da177e4 1951 char fw_str[30];
5433383e 1952 struct scsi_host_template *sht;
c51da4ec 1953 int bars, max_id, mem_only = 0;
e315cd28 1954 uint16_t req_length = 0, rsp_length = 0;
73208dfd
AC
1955 struct req_que *req = NULL;
1956 struct rsp_que *rsp = NULL;
1da177e4 1957
285d0321 1958 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
a5326f86 1959 sht = &qla2xxx_driver_template;
5433383e 1960 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
8bc69e7d 1961 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
4d4df193 1962 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
8bc69e7d 1963 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
c3a2f0df 1964 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
3a03eb79 1965 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
a9083016
GM
1966 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1967 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
285d0321 1968 bars = pci_select_bars(pdev, IORESOURCE_MEM);
09483916 1969 mem_only = 1;
285d0321
AV
1970 }
1971
09483916
BH
1972 if (mem_only) {
1973 if (pci_enable_device_mem(pdev))
1974 goto probe_out;
1975 } else {
1976 if (pci_enable_device(pdev))
1977 goto probe_out;
1978 }
285d0321 1979
0927678f
JB
1980 /* This may fail but that's ok */
1981 pci_enable_pcie_error_reporting(pdev);
285d0321 1982
e315cd28
AC
1983 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1984 if (!ha) {
1985 DEBUG(printk("Unable to allocate memory for ha\n"));
1986 goto probe_out;
1da177e4 1987 }
e315cd28 1988 ha->pdev = pdev;
1da177e4
LT
1989
1990 /* Clear our data area */
285d0321 1991 ha->bars = bars;
09483916 1992 ha->mem_only = mem_only;
df4bf0bb 1993 spin_lock_init(&ha->hardware_lock);
1da177e4 1994
ea5b6382 1995 /* Set ISP-type information. */
1996 qla2x00_set_isp_flags(ha);
ca79cf66
DG
1997
1998 /* Set EEH reset type to fundamental if required by hba */
1999 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
2000 pdev->needs_freset = 1;
ca79cf66
DG
2001 }
2002
1da177e4
LT
2003 /* Configure PCI I/O space */
2004 ret = qla2x00_iospace_config(ha);
a1541d5a 2005 if (ret)
e315cd28 2006 goto probe_hw_failed;
1da177e4 2007
1da177e4 2008 qla_printk(KERN_INFO, ha,
5433383e
AV
2009 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
2010 ha->iobase);
1da177e4 2011
1da177e4 2012 ha->prev_topology = 0;
fca29703 2013 ha->init_cb_size = sizeof(init_cb_t);
d8b45213 2014 ha->link_data_rate = PORT_SPEED_UNKNOWN;
854165f4 2015 ha->optrom_size = OPTROM_SIZE_2300;
1da177e4 2016
abbd8870 2017 /* Assign ISP specific operations. */
e315cd28 2018 max_id = MAX_TARGETS_2200;
1da177e4 2019 if (IS_QLA2100(ha)) {
e315cd28 2020 max_id = MAX_TARGETS_2100;
1da177e4 2021 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
e315cd28
AC
2022 req_length = REQUEST_ENTRY_CNT_2100;
2023 rsp_length = RESPONSE_ENTRY_CNT_2100;
2024 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
abbd8870 2025 ha->gid_list_info_size = 4;
3a03eb79
AV
2026 ha->flash_conf_off = ~0;
2027 ha->flash_data_off = ~0;
2028 ha->nvram_conf_off = ~0;
2029 ha->nvram_data_off = ~0;
fd34f556 2030 ha->isp_ops = &qla2100_isp_ops;
1da177e4 2031 } else if (IS_QLA2200(ha)) {
1da177e4 2032 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2033 req_length = REQUEST_ENTRY_CNT_2200;
2034 rsp_length = RESPONSE_ENTRY_CNT_2100;
2035 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
abbd8870 2036 ha->gid_list_info_size = 4;
3a03eb79
AV
2037 ha->flash_conf_off = ~0;
2038 ha->flash_data_off = ~0;
2039 ha->nvram_conf_off = ~0;
2040 ha->nvram_data_off = ~0;
fd34f556 2041 ha->isp_ops = &qla2100_isp_ops;
fca29703 2042 } else if (IS_QLA23XX(ha)) {
1da177e4 2043 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2044 req_length = REQUEST_ENTRY_CNT_2200;
2045 rsp_length = RESPONSE_ENTRY_CNT_2300;
2046 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
abbd8870 2047 ha->gid_list_info_size = 6;
854165f4 2048 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2049 ha->optrom_size = OPTROM_SIZE_2322;
3a03eb79
AV
2050 ha->flash_conf_off = ~0;
2051 ha->flash_data_off = ~0;
2052 ha->nvram_conf_off = ~0;
2053 ha->nvram_data_off = ~0;
fd34f556 2054 ha->isp_ops = &qla2300_isp_ops;
4d4df193 2055 } else if (IS_QLA24XX_TYPE(ha)) {
fca29703 2056 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2057 req_length = REQUEST_ENTRY_CNT_24XX;
2058 rsp_length = RESPONSE_ENTRY_CNT_2300;
2059 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2c3dfe3f 2060 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
fca29703 2061 ha->gid_list_info_size = 8;
854165f4 2062 ha->optrom_size = OPTROM_SIZE_24XX;
73208dfd 2063 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
fd34f556 2064 ha->isp_ops = &qla24xx_isp_ops;
3a03eb79
AV
2065 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2066 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2067 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2068 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
c3a2f0df 2069 } else if (IS_QLA25XX(ha)) {
c3a2f0df 2070 ha->mbx_count = MAILBOX_REGISTER_COUNT;
e315cd28
AC
2071 req_length = REQUEST_ENTRY_CNT_24XX;
2072 rsp_length = RESPONSE_ENTRY_CNT_2300;
2073 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
c3a2f0df 2074 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
c3a2f0df
AV
2075 ha->gid_list_info_size = 8;
2076 ha->optrom_size = OPTROM_SIZE_25XX;
73208dfd 2077 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
c3a2f0df 2078 ha->isp_ops = &qla25xx_isp_ops;
3a03eb79
AV
2079 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2080 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2081 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2082 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2083 } else if (IS_QLA81XX(ha)) {
2084 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2085 req_length = REQUEST_ENTRY_CNT_24XX;
2086 rsp_length = RESPONSE_ENTRY_CNT_2300;
2087 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2088 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2089 ha->gid_list_info_size = 8;
2090 ha->optrom_size = OPTROM_SIZE_81XX;
40859ae5 2091 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3a03eb79
AV
2092 ha->isp_ops = &qla81xx_isp_ops;
2093 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2094 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2095 ha->nvram_conf_off = ~0;
2096 ha->nvram_data_off = ~0;
a9083016
GM
2097 } else if (IS_QLA82XX(ha)) {
2098 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2099 req_length = REQUEST_ENTRY_CNT_82XX;
2100 rsp_length = RESPONSE_ENTRY_CNT_82XX;
2101 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2102 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2103 ha->gid_list_info_size = 8;
2104 ha->optrom_size = OPTROM_SIZE_82XX;
2105 ha->isp_ops = &qla82xx_isp_ops;
2106 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2107 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2108 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2109 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1da177e4 2110 }
1da177e4 2111
6c2f527c 2112 mutex_init(&ha->vport_lock);
0b05a1f0
MB
2113 init_completion(&ha->mbx_cmd_comp);
2114 complete(&ha->mbx_cmd_comp);
2115 init_completion(&ha->mbx_intr_comp);
1da177e4 2116
2c3dfe3f 2117 set_bit(0, (unsigned long *) ha->vp_idx_map);
1da177e4 2118
53303c42 2119 qla2x00_config_dma_addressing(ha);
73208dfd 2120 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
e315cd28 2121 if (!ret) {
1da177e4
LT
2122 qla_printk(KERN_WARNING, ha,
2123 "[ERROR] Failed to allocate memory for adapter\n");
2124
e315cd28
AC
2125 goto probe_hw_failed;
2126 }
2127
73208dfd 2128 req->max_q_depth = MAX_Q_DEPTH;
e315cd28 2129 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
73208dfd
AC
2130 req->max_q_depth = ql2xmaxqdepth;
2131
e315cd28
AC
2132
2133 base_vha = qla2x00_create_host(sht, ha);
2134 if (!base_vha) {
2135 qla_printk(KERN_WARNING, ha,
2136 "[ERROR] Failed to allocate memory for scsi_host\n");
2137
a1541d5a 2138 ret = -ENOMEM;
6e9f21f3 2139 qla2x00_mem_free(ha);
2afa19a9
AC
2140 qla2x00_free_req_que(ha, req);
2141 qla2x00_free_rsp_que(ha, rsp);
e315cd28 2142 goto probe_hw_failed;
1da177e4
LT
2143 }
2144
e315cd28
AC
2145 pci_set_drvdata(pdev, base_vha);
2146
e315cd28 2147 host = base_vha->host;
2afa19a9 2148 base_vha->req = req;
73208dfd
AC
2149 host->can_queue = req->length + 128;
2150 if (IS_QLA2XXX_MIDTYPE(ha))
e315cd28 2151 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
73208dfd 2152 else
e315cd28
AC
2153 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
2154 base_vha->vp_idx;
e315cd28
AC
2155 if (IS_QLA2100(ha))
2156 host->sg_tablesize = 32;
2157 host->max_id = max_id;
2158 host->this_id = 255;
2159 host->cmd_per_lun = 3;
2160 host->unique_id = host->host_no;
2161 host->max_cmd_len = MAX_CMDSZ;
2162 host->max_channel = MAX_BUSES - 1;
2163 host->max_lun = MAX_LUNS;
2164 host->transportt = qla2xxx_transport_template;
9a069e19 2165 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
e315cd28 2166
73208dfd
AC
2167 /* Set up the irqs */
2168 ret = qla2x00_request_irqs(ha, rsp);
2169 if (ret)
6e9f21f3 2170 goto probe_init_failed;
90a86fc0
JC
2171
2172 pci_save_state(pdev);
2173
73208dfd 2174 /* Alloc arrays of request and response ring ptrs */
7163ea81 2175que_init:
73208dfd
AC
2176 if (!qla2x00_alloc_queues(ha)) {
2177 qla_printk(KERN_WARNING, ha,
2178 "[ERROR] Failed to allocate memory for queue"
2179 " pointers\n");
6e9f21f3 2180 goto probe_init_failed;
73208dfd 2181 }
a9083016 2182
73208dfd
AC
2183 ha->rsp_q_map[0] = rsp;
2184 ha->req_q_map[0] = req;
2afa19a9
AC
2185 rsp->req = req;
2186 req->rsp = rsp;
2187 set_bit(0, ha->req_qid_map);
2188 set_bit(0, ha->rsp_qid_map);
08029990
AV
2189 /* FWI2-capable only. */
2190 req->req_q_in = &ha->iobase->isp24.req_q_in;
2191 req->req_q_out = &ha->iobase->isp24.req_q_out;
2192 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2193 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
17d98630 2194 if (ha->mqenable) {
08029990
AV
2195 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2196 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2197 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
2198 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
17d98630
AC
2199 }
2200
a9083016
GM
2201 if (IS_QLA82XX(ha)) {
2202 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2203 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2204 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2205 }
2206
e315cd28 2207 if (qla2x00_initialize_adapter(base_vha)) {
1da177e4
LT
2208 qla_printk(KERN_WARNING, ha,
2209 "Failed to initialize adapter\n");
2210
2211 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
2212 "Adapter flags %x.\n",
e315cd28 2213 base_vha->host_no, base_vha->device_flags));
1da177e4 2214
a9083016
GM
2215 if (IS_QLA82XX(ha)) {
2216 qla82xx_idc_lock(ha);
2217 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2218 QLA82XX_DEV_FAILED);
2219 qla82xx_idc_unlock(ha);
2220 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
2221 }
2222
a1541d5a 2223 ret = -ENODEV;
1da177e4
LT
2224 goto probe_failed;
2225 }
2226
7163ea81
AC
2227 if (ha->mqenable) {
2228 if (qla25xx_setup_mode(base_vha)) {
68ca949c
AC
2229 qla_printk(KERN_WARNING, ha,
2230 "Can't create queues, falling back to single"
2231 " queue mode\n");
7163ea81
AC
2232 goto que_init;
2233 }
2234 }
68ca949c 2235
cbc8eb67
AV
2236 if (ha->flags.running_gold_fw)
2237 goto skip_dpc;
2238
1da177e4
LT
2239 /*
2240 * Startup the kernel thread for this host adapter
2241 */
39a11240 2242 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
e315cd28 2243 "%s_dpc", base_vha->host_str);
39a11240 2244 if (IS_ERR(ha->dpc_thread)) {
1da177e4
LT
2245 qla_printk(KERN_WARNING, ha,
2246 "Unable to start DPC thread!\n");
39a11240 2247 ret = PTR_ERR(ha->dpc_thread);
1da177e4
LT
2248 goto probe_failed;
2249 }
1da177e4 2250
cbc8eb67 2251skip_dpc:
e315cd28
AC
2252 list_add_tail(&base_vha->list, &ha->vp_list);
2253 base_vha->host->irq = ha->pdev->irq;
1da177e4
LT
2254
2255 /* Initialized the timer */
e315cd28 2256 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1da177e4
LT
2257
2258 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
e315cd28 2259 base_vha->host_no, ha));
d19044c3 2260
bad75002
AE
2261 if (IS_QLA25XX(ha) && ql2xenabledif) {
2262 if (ha->fw_attributes & BIT_4) {
2263 base_vha->flags.difdix_supported = 1;
2264 DEBUG18(qla_printk(KERN_INFO, ha,
2265 "Registering for DIF/DIX type 1 and 3"
2266 " protection.\n"));
2267 scsi_host_set_prot(host,
2268 SHOST_DIF_TYPE1_PROTECTION
2269 | SHOST_DIF_TYPE3_PROTECTION
2270 | SHOST_DIX_TYPE1_PROTECTION
2271 | SHOST_DIX_TYPE3_PROTECTION);
2272 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
2273 } else
2274 base_vha->flags.difdix_supported = 0;
2275 }
2276
a9083016
GM
2277 ha->isp_ops->enable_intrs(ha);
2278
a1541d5a
AV
2279 ret = scsi_add_host(host, &pdev->dev);
2280 if (ret)
2281 goto probe_failed;
2282
1486400f
MR
2283 base_vha->flags.init_done = 1;
2284 base_vha->flags.online = 1;
2285
1e99e33a
AV
2286 scsi_scan_host(host);
2287
e315cd28 2288 qla2x00_alloc_sysfs_attr(base_vha);
a1541d5a 2289
e315cd28 2290 qla2x00_init_host_attr(base_vha);
a1541d5a 2291
e315cd28 2292 qla2x00_dfs_setup(base_vha);
df613b96 2293
1da177e4
LT
2294 qla_printk(KERN_INFO, ha, "\n"
2295 " QLogic Fibre Channel HBA Driver: %s\n"
2296 " QLogic %s - %s\n"
5433383e
AV
2297 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2298 qla2x00_version_str, ha->model_number,
e315cd28
AC
2299 ha->model_desc ? ha->model_desc : "", pdev->device,
2300 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2301 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2302 ha->isp_ops->fw_version_str(base_vha, fw_str));
1da177e4 2303
1da177e4
LT
2304 return 0;
2305
6e9f21f3 2306probe_init_failed:
2afa19a9
AC
2307 qla2x00_free_req_que(ha, req);
2308 qla2x00_free_rsp_que(ha, rsp);
2309 ha->max_req_queues = ha->max_rsp_queues = 0;
6e9f21f3 2310
1da177e4 2311probe_failed:
b9978769
AV
2312 if (base_vha->timer_active)
2313 qla2x00_stop_timer(base_vha);
2314 base_vha->flags.online = 0;
2315 if (ha->dpc_thread) {
2316 struct task_struct *t = ha->dpc_thread;
2317
2318 ha->dpc_thread = NULL;
2319 kthread_stop(t);
2320 }
2321
e315cd28 2322 qla2x00_free_device(base_vha);
1da177e4 2323
e315cd28 2324 scsi_host_put(base_vha->host);
1da177e4 2325
e315cd28 2326probe_hw_failed:
a9083016
GM
2327 if (IS_QLA82XX(ha)) {
2328 qla82xx_idc_lock(ha);
2329 qla82xx_clear_drv_active(ha);
2330 qla82xx_idc_unlock(ha);
2331 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2332 if (!ql2xdbwr)
2333 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2334 } else {
2335 if (ha->iobase)
2336 iounmap(ha->iobase);
2337 }
e315cd28
AC
2338 pci_release_selected_regions(ha->pdev, ha->bars);
2339 kfree(ha);
2340 ha = NULL;
1da177e4 2341
a1541d5a 2342probe_out:
e315cd28 2343 pci_disable_device(pdev);
a1541d5a 2344 return ret;
1da177e4 2345}
1da177e4 2346
4c993f76 2347static void
7ee61397 2348qla2x00_remove_one(struct pci_dev *pdev)
1da177e4 2349{
e315cd28
AC
2350 scsi_qla_host_t *base_vha, *vha, *temp;
2351 struct qla_hw_data *ha;
2352
2353 base_vha = pci_get_drvdata(pdev);
2354 ha = base_vha->hw;
2355
2356 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
2357 if (vha && vha->fc_vport)
2358 fc_vport_terminate(vha->fc_vport);
2359 }
1da177e4 2360
e315cd28 2361 set_bit(UNLOADING, &base_vha->dpc_flags);
1da177e4 2362
b9978769
AV
2363 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2364
e315cd28 2365 qla2x00_dfs_remove(base_vha);
c795c1e4 2366
e315cd28 2367 qla84xx_put_chip(base_vha);
c795c1e4 2368
b9978769
AV
2369 /* Disable timer */
2370 if (base_vha->timer_active)
2371 qla2x00_stop_timer(base_vha);
2372
2373 base_vha->flags.online = 0;
2374
68ca949c
AC
2375 /* Flush the work queue and remove it */
2376 if (ha->wq) {
2377 flush_workqueue(ha->wq);
2378 destroy_workqueue(ha->wq);
2379 ha->wq = NULL;
2380 }
2381
b9978769
AV
2382 /* Kill the kernel thread for this host */
2383 if (ha->dpc_thread) {
2384 struct task_struct *t = ha->dpc_thread;
2385
2386 /*
2387 * qla2xxx_wake_dpc checks for ->dpc_thread
2388 * so we need to zero it out.
2389 */
2390 ha->dpc_thread = NULL;
2391 kthread_stop(t);
2392 }
2393
e315cd28 2394 qla2x00_free_sysfs_attr(base_vha);
df613b96 2395
e315cd28 2396 fc_remove_host(base_vha->host);
4d4df193 2397
e315cd28 2398 scsi_remove_host(base_vha->host);
1da177e4 2399
e315cd28 2400 qla2x00_free_device(base_vha);
bdf79621 2401
e315cd28 2402 scsi_host_put(base_vha->host);
1da177e4 2403
a9083016
GM
2404 if (IS_QLA82XX(ha)) {
2405 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2406 if (!ql2xdbwr)
2407 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2408 } else {
2409 if (ha->iobase)
2410 iounmap(ha->iobase);
1da177e4 2411
a9083016
GM
2412 if (ha->mqiobase)
2413 iounmap(ha->mqiobase);
2414 }
73208dfd 2415
e315cd28
AC
2416 pci_release_selected_regions(ha->pdev, ha->bars);
2417 kfree(ha);
2418 ha = NULL;
1da177e4 2419
90a86fc0
JC
2420 pci_disable_pcie_error_reporting(pdev);
2421
665db93b 2422 pci_disable_device(pdev);
1da177e4
LT
2423 pci_set_drvdata(pdev, NULL);
2424}
1da177e4
LT
2425
2426static void
e315cd28 2427qla2x00_free_device(scsi_qla_host_t *vha)
1da177e4 2428{
e315cd28 2429 struct qla_hw_data *ha = vha->hw;
1da177e4 2430
85880801
AV
2431 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2432
2433 /* Disable timer */
2434 if (vha->timer_active)
2435 qla2x00_stop_timer(vha);
2436
2437 /* Kill the kernel thread for this host */
2438 if (ha->dpc_thread) {
2439 struct task_struct *t = ha->dpc_thread;
2440
2441 /*
2442 * qla2xxx_wake_dpc checks for ->dpc_thread
2443 * so we need to zero it out.
2444 */
2445 ha->dpc_thread = NULL;
2446 kthread_stop(t);
2447 }
2448
2afa19a9
AC
2449 qla25xx_delete_queues(vha);
2450
df613b96 2451 if (ha->flags.fce_enabled)
e315cd28 2452 qla2x00_disable_fce_trace(vha, NULL, NULL);
df613b96 2453
a7a167bf 2454 if (ha->eft)
e315cd28 2455 qla2x00_disable_eft_trace(vha);
a7a167bf 2456
f6ef3b18 2457 /* Stop currently executing firmware. */
e315cd28 2458 qla2x00_try_to_stop_firmware(vha);
1da177e4 2459
85880801
AV
2460 vha->flags.online = 0;
2461
f6ef3b18 2462 /* turn-off interrupts on the card */
a9083016
GM
2463 if (ha->interrupts_on) {
2464 vha->flags.init_done = 0;
fd34f556 2465 ha->isp_ops->disable_intrs(ha);
a9083016 2466 }
f6ef3b18 2467
e315cd28 2468 qla2x00_free_irqs(vha);
1da177e4 2469
e315cd28 2470 qla2x00_mem_free(ha);
73208dfd
AC
2471
2472 qla2x00_free_queues(ha);
1da177e4
LT
2473}
2474
d97994dc 2475static inline void
e315cd28 2476qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
d97994dc 2477 int defer)
2478{
d97994dc 2479 struct fc_rport *rport;
67becc00 2480 scsi_qla_host_t *base_vha;
d97994dc 2481
2482 if (!fcport->rport)
2483 return;
2484
2485 rport = fcport->rport;
2486 if (defer) {
67becc00 2487 base_vha = pci_get_drvdata(vha->hw->pdev);
e315cd28 2488 spin_lock_irq(vha->host->host_lock);
d97994dc 2489 fcport->drport = rport;
e315cd28 2490 spin_unlock_irq(vha->host->host_lock);
67becc00
AV
2491 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2492 qla2xxx_wake_dpc(base_vha);
5f3a9a20 2493 } else
d97994dc 2494 fc_remote_port_delete(rport);
d97994dc 2495}
2496
1da177e4
LT
2497/*
2498 * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2499 *
2500 * Input: ha = adapter block pointer. fcport = port structure pointer.
2501 *
2502 * Return: None.
2503 *
2504 * Context:
2505 */
e315cd28 2506void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
d97994dc 2507 int do_login, int defer)
1da177e4 2508{
2c3dfe3f 2509 if (atomic_read(&fcport->state) == FCS_ONLINE &&
e315cd28
AC
2510 vha->vp_idx == fcport->vp_idx) {
2511 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2512 qla2x00_schedule_rport_del(vha, fcport, defer);
2513 }
fa2a1ce5 2514 /*
1da177e4
LT
2515 * We may need to retry the login, so don't change the state of the
2516 * port but do the retries.
2517 */
2518 if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2519 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2520
2521 if (!do_login)
2522 return;
2523
2524 if (fcport->login_retry == 0) {
e315cd28
AC
2525 fcport->login_retry = vha->hw->login_retry_count;
2526 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1da177e4
LT
2527
2528 DEBUG(printk("scsi(%ld): Port login retry: "
2529 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2530 "id = 0x%04x retry cnt=%d\n",
e315cd28 2531 vha->host_no,
1da177e4
LT
2532 fcport->port_name[0],
2533 fcport->port_name[1],
2534 fcport->port_name[2],
2535 fcport->port_name[3],
2536 fcport->port_name[4],
2537 fcport->port_name[5],
2538 fcport->port_name[6],
2539 fcport->port_name[7],
2540 fcport->loop_id,
2541 fcport->login_retry));
2542 }
2543}
2544
2545/*
2546 * qla2x00_mark_all_devices_lost
2547 * Updates fcport state when device goes offline.
2548 *
2549 * Input:
2550 * ha = adapter block pointer.
2551 * fcport = port structure pointer.
2552 *
2553 * Return:
2554 * None.
2555 *
2556 * Context:
2557 */
2558void
e315cd28 2559qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1da177e4
LT
2560{
2561 fc_port_t *fcport;
2562
e315cd28 2563 list_for_each_entry(fcport, &vha->vp_fcports, list) {
0d6e61bc 2564 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
1da177e4 2565 continue;
0d6e61bc 2566
1da177e4
LT
2567 /*
2568 * No point in marking the device as lost, if the device is
2569 * already DEAD.
2570 */
2571 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2572 continue;
e315cd28 2573 if (atomic_read(&fcport->state) == FCS_ONLINE) {
0d6e61bc
AV
2574 if (defer)
2575 qla2x00_schedule_rport_del(vha, fcport, defer);
2576 else if (vha->vp_idx == fcport->vp_idx)
2577 qla2x00_schedule_rport_del(vha, fcport, defer);
2578 }
2579 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1da177e4
LT
2580 }
2581}
2582
2583/*
2584* qla2x00_mem_alloc
2585* Allocates adapter memory.
2586*
2587* Returns:
2588* 0 = success.
e8711085 2589* !0 = failure.
1da177e4 2590*/
e8711085 2591static int
73208dfd
AC
2592qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2593 struct req_que **req, struct rsp_que **rsp)
1da177e4
LT
2594{
2595 char name[16];
1da177e4 2596
e8711085 2597 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
e315cd28 2598 &ha->init_cb_dma, GFP_KERNEL);
e8711085 2599 if (!ha->init_cb)
e315cd28 2600 goto fail;
e8711085 2601
e315cd28
AC
2602 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2603 &ha->gid_list_dma, GFP_KERNEL);
2604 if (!ha->gid_list)
e8711085 2605 goto fail_free_init_cb;
1da177e4 2606
e8711085
AV
2607 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2608 if (!ha->srb_mempool)
e315cd28 2609 goto fail_free_gid_list;
e8711085 2610
a9083016
GM
2611 if (IS_QLA82XX(ha)) {
2612 /* Allocate cache for CT6 Ctx. */
2613 if (!ctx_cachep) {
2614 ctx_cachep = kmem_cache_create("qla2xxx_ctx",
2615 sizeof(struct ct6_dsd), 0,
2616 SLAB_HWCACHE_ALIGN, NULL);
2617 if (!ctx_cachep)
2618 goto fail_free_gid_list;
2619 }
2620 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2621 ctx_cachep);
2622 if (!ha->ctx_mempool)
2623 goto fail_free_srb_mempool;
2624 }
2625
e8711085
AV
2626 /* Get memory for cached NVRAM */
2627 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2628 if (!ha->nvram)
a9083016 2629 goto fail_free_ctx_mempool;
e8711085 2630
e315cd28
AC
2631 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2632 ha->pdev->device);
2633 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2634 DMA_POOL_SIZE, 8, 0);
2635 if (!ha->s_dma_pool)
2636 goto fail_free_nvram;
2637
bad75002 2638 if (IS_QLA82XX(ha) || ql2xenabledif) {
a9083016
GM
2639 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2640 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2641 if (!ha->dl_dma_pool) {
2642 qla_printk(KERN_WARNING, ha,
2643 "Memory Allocation failed - dl_dma_pool\n");
2644 goto fail_s_dma_pool;
2645 }
2646
2647 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2648 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2649 if (!ha->fcp_cmnd_dma_pool) {
2650 qla_printk(KERN_WARNING, ha,
2651 "Memory Allocation failed - fcp_cmnd_dma_pool\n");
2652 goto fail_dl_dma_pool;
2653 }
2654 }
2655
e8711085
AV
2656 /* Allocate memory for SNS commands */
2657 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
e315cd28 2658 /* Get consistent memory allocated for SNS commands */
e8711085 2659 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
e315cd28 2660 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
e8711085 2661 if (!ha->sns_cmd)
e315cd28 2662 goto fail_dma_pool;
e8711085 2663 } else {
e315cd28 2664 /* Get consistent memory allocated for MS IOCB */
e8711085 2665 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
e315cd28 2666 &ha->ms_iocb_dma);
e8711085 2667 if (!ha->ms_iocb)
e315cd28
AC
2668 goto fail_dma_pool;
2669 /* Get consistent memory allocated for CT SNS commands */
e8711085 2670 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
e315cd28 2671 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
e8711085
AV
2672 if (!ha->ct_sns)
2673 goto fail_free_ms_iocb;
1da177e4
LT
2674 }
2675
e315cd28 2676 /* Allocate memory for request ring */
73208dfd
AC
2677 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2678 if (!*req) {
e315cd28
AC
2679 DEBUG(printk("Unable to allocate memory for req\n"));
2680 goto fail_req;
2681 }
73208dfd
AC
2682 (*req)->length = req_len;
2683 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2684 ((*req)->length + 1) * sizeof(request_t),
2685 &(*req)->dma, GFP_KERNEL);
2686 if (!(*req)->ring) {
e315cd28
AC
2687 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2688 goto fail_req_ring;
2689 }
2690 /* Allocate memory for response ring */
73208dfd
AC
2691 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2692 if (!*rsp) {
2693 qla_printk(KERN_WARNING, ha,
2694 "Unable to allocate memory for rsp\n");
e315cd28
AC
2695 goto fail_rsp;
2696 }
73208dfd
AC
2697 (*rsp)->hw = ha;
2698 (*rsp)->length = rsp_len;
2699 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2700 ((*rsp)->length + 1) * sizeof(response_t),
2701 &(*rsp)->dma, GFP_KERNEL);
2702 if (!(*rsp)->ring) {
2703 qla_printk(KERN_WARNING, ha,
2704 "Unable to allocate memory for rsp_ring\n");
e315cd28
AC
2705 goto fail_rsp_ring;
2706 }
73208dfd
AC
2707 (*req)->rsp = *rsp;
2708 (*rsp)->req = *req;
2709 /* Allocate memory for NVRAM data for vports */
2710 if (ha->nvram_npiv_size) {
2711 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2712 ha->nvram_npiv_size, GFP_KERNEL);
2713 if (!ha->npiv_info) {
2714 qla_printk(KERN_WARNING, ha,
2715 "Unable to allocate memory for npiv info\n");
2716 goto fail_npiv_info;
2717 }
2718 } else
2719 ha->npiv_info = NULL;
e8711085 2720
b64b0e8f 2721 /* Get consistent memory allocated for EX-INIT-CB. */
a9083016 2722 if (IS_QLA8XXX_TYPE(ha)) {
b64b0e8f
AV
2723 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2724 &ha->ex_init_cb_dma);
2725 if (!ha->ex_init_cb)
2726 goto fail_ex_init_cb;
2727 }
2728
a9083016
GM
2729 INIT_LIST_HEAD(&ha->gbl_dsd_list);
2730
5ff1d584
AV
2731 /* Get consistent memory allocated for Async Port-Database. */
2732 if (!IS_FWI2_CAPABLE(ha)) {
2733 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2734 &ha->async_pd_dma);
2735 if (!ha->async_pd)
2736 goto fail_async_pd;
2737 }
2738
e315cd28
AC
2739 INIT_LIST_HEAD(&ha->vp_list);
2740 return 1;
2741
5ff1d584
AV
2742fail_async_pd:
2743 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
b64b0e8f
AV
2744fail_ex_init_cb:
2745 kfree(ha->npiv_info);
73208dfd
AC
2746fail_npiv_info:
2747 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2748 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2749 (*rsp)->ring = NULL;
2750 (*rsp)->dma = 0;
e315cd28 2751fail_rsp_ring:
73208dfd 2752 kfree(*rsp);
e315cd28 2753fail_rsp:
73208dfd
AC
2754 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2755 sizeof(request_t), (*req)->ring, (*req)->dma);
2756 (*req)->ring = NULL;
2757 (*req)->dma = 0;
e315cd28 2758fail_req_ring:
73208dfd 2759 kfree(*req);
e315cd28
AC
2760fail_req:
2761 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2762 ha->ct_sns, ha->ct_sns_dma);
2763 ha->ct_sns = NULL;
2764 ha->ct_sns_dma = 0;
e8711085
AV
2765fail_free_ms_iocb:
2766 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2767 ha->ms_iocb = NULL;
2768 ha->ms_iocb_dma = 0;
e315cd28 2769fail_dma_pool:
bad75002 2770 if (IS_QLA82XX(ha) || ql2xenabledif) {
a9083016
GM
2771 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2772 ha->fcp_cmnd_dma_pool = NULL;
2773 }
2774fail_dl_dma_pool:
bad75002 2775 if (IS_QLA82XX(ha) || ql2xenabledif) {
a9083016
GM
2776 dma_pool_destroy(ha->dl_dma_pool);
2777 ha->dl_dma_pool = NULL;
2778 }
2779fail_s_dma_pool:
e315cd28
AC
2780 dma_pool_destroy(ha->s_dma_pool);
2781 ha->s_dma_pool = NULL;
e8711085
AV
2782fail_free_nvram:
2783 kfree(ha->nvram);
2784 ha->nvram = NULL;
a9083016
GM
2785fail_free_ctx_mempool:
2786 mempool_destroy(ha->ctx_mempool);
2787 ha->ctx_mempool = NULL;
e8711085
AV
2788fail_free_srb_mempool:
2789 mempool_destroy(ha->srb_mempool);
2790 ha->srb_mempool = NULL;
e8711085
AV
2791fail_free_gid_list:
2792 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
e315cd28 2793 ha->gid_list_dma);
e8711085
AV
2794 ha->gid_list = NULL;
2795 ha->gid_list_dma = 0;
e315cd28
AC
2796fail_free_init_cb:
2797 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2798 ha->init_cb_dma);
2799 ha->init_cb = NULL;
2800 ha->init_cb_dma = 0;
e8711085 2801fail:
e315cd28 2802 DEBUG(printk("%s: Memory allocation failure\n", __func__));
e8711085 2803 return -ENOMEM;
1da177e4
LT
2804}
2805
2806/*
2807* qla2x00_mem_free
2808* Frees all adapter allocated memory.
2809*
2810* Input:
2811* ha = adapter block pointer.
2812*/
a824ebb3 2813static void
e315cd28 2814qla2x00_mem_free(struct qla_hw_data *ha)
1da177e4 2815{
e8711085
AV
2816 if (ha->srb_mempool)
2817 mempool_destroy(ha->srb_mempool);
1da177e4 2818
df613b96
AV
2819 if (ha->fce)
2820 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
e315cd28 2821 ha->fce_dma);
df613b96 2822
a7a167bf
AV
2823 if (ha->fw_dump) {
2824 if (ha->eft)
2825 dma_free_coherent(&ha->pdev->dev,
e315cd28 2826 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
a7a167bf
AV
2827 vfree(ha->fw_dump);
2828 }
2829
11bbc1d8
AV
2830 if (ha->dcbx_tlv)
2831 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2832 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2833
ce0423f4
AV
2834 if (ha->xgmac_data)
2835 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2836 ha->xgmac_data, ha->xgmac_data_dma);
2837
1da177e4
LT
2838 if (ha->sns_cmd)
2839 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
e315cd28 2840 ha->sns_cmd, ha->sns_cmd_dma);
1da177e4
LT
2841
2842 if (ha->ct_sns)
2843 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
e315cd28 2844 ha->ct_sns, ha->ct_sns_dma);
1da177e4 2845
88729e53
AV
2846 if (ha->sfp_data)
2847 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2848
ad0ecd61
JC
2849 if (ha->edc_data)
2850 dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2851
1da177e4
LT
2852 if (ha->ms_iocb)
2853 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2854
b64b0e8f 2855 if (ha->ex_init_cb)
a9083016
GM
2856 dma_pool_free(ha->s_dma_pool,
2857 ha->ex_init_cb, ha->ex_init_cb_dma);
b64b0e8f 2858
5ff1d584
AV
2859 if (ha->async_pd)
2860 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2861
1da177e4
LT
2862 if (ha->s_dma_pool)
2863 dma_pool_destroy(ha->s_dma_pool);
2864
1da177e4
LT
2865 if (ha->gid_list)
2866 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
e315cd28 2867 ha->gid_list_dma);
1da177e4 2868
a9083016
GM
2869 if (IS_QLA82XX(ha)) {
2870 if (!list_empty(&ha->gbl_dsd_list)) {
2871 struct dsd_dma *dsd_ptr, *tdsd_ptr;
2872
2873 /* clean up allocated prev pool */
2874 list_for_each_entry_safe(dsd_ptr,
2875 tdsd_ptr, &ha->gbl_dsd_list, list) {
2876 dma_pool_free(ha->dl_dma_pool,
2877 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
2878 list_del(&dsd_ptr->list);
2879 kfree(dsd_ptr);
2880 }
2881 }
2882 }
2883
2884 if (ha->dl_dma_pool)
2885 dma_pool_destroy(ha->dl_dma_pool);
2886
2887 if (ha->fcp_cmnd_dma_pool)
2888 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2889
2890 if (ha->ctx_mempool)
2891 mempool_destroy(ha->ctx_mempool);
2892
e315cd28
AC
2893 if (ha->init_cb)
2894 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
a9083016 2895 ha->init_cb, ha->init_cb_dma);
e315cd28
AC
2896 vfree(ha->optrom_buffer);
2897 kfree(ha->nvram);
73208dfd 2898 kfree(ha->npiv_info);
1da177e4 2899
e8711085 2900 ha->srb_mempool = NULL;
a9083016 2901 ha->ctx_mempool = NULL;
a7a167bf
AV
2902 ha->eft = NULL;
2903 ha->eft_dma = 0;
1da177e4
LT
2904 ha->sns_cmd = NULL;
2905 ha->sns_cmd_dma = 0;
2906 ha->ct_sns = NULL;
2907 ha->ct_sns_dma = 0;
2908 ha->ms_iocb = NULL;
2909 ha->ms_iocb_dma = 0;
1da177e4
LT
2910 ha->init_cb = NULL;
2911 ha->init_cb_dma = 0;
b64b0e8f
AV
2912 ha->ex_init_cb = NULL;
2913 ha->ex_init_cb_dma = 0;
5ff1d584
AV
2914 ha->async_pd = NULL;
2915 ha->async_pd_dma = 0;
1da177e4
LT
2916
2917 ha->s_dma_pool = NULL;
a9083016
GM
2918 ha->dl_dma_pool = NULL;
2919 ha->fcp_cmnd_dma_pool = NULL;
1da177e4 2920
1da177e4
LT
2921 ha->gid_list = NULL;
2922 ha->gid_list_dma = 0;
2923
e315cd28
AC
2924 ha->fw_dump = NULL;
2925 ha->fw_dumped = 0;
2926 ha->fw_dump_reading = 0;
e315cd28 2927}
1da177e4 2928
e315cd28
AC
2929struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2930 struct qla_hw_data *ha)
2931{
2932 struct Scsi_Host *host;
2933 struct scsi_qla_host *vha = NULL;
854165f4 2934
e315cd28
AC
2935 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2936 if (host == NULL) {
2937 printk(KERN_WARNING
2938 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2939 goto fail;
2940 }
2941
2942 /* Clear our data area */
2943 vha = shost_priv(host);
2944 memset(vha, 0, sizeof(scsi_qla_host_t));
2945
2946 vha->host = host;
2947 vha->host_no = host->host_no;
2948 vha->hw = ha;
2949
2950 INIT_LIST_HEAD(&vha->vp_fcports);
2951 INIT_LIST_HEAD(&vha->work_list);
2952 INIT_LIST_HEAD(&vha->list);
2953
f999f4c1
AV
2954 spin_lock_init(&vha->work_lock);
2955
e315cd28
AC
2956 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2957 return vha;
2958
2959fail:
2960 return vha;
1da177e4
LT
2961}
2962
01ef66bb 2963static struct qla_work_evt *
f999f4c1 2964qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
0971de7f
AV
2965{
2966 struct qla_work_evt *e;
2967
f999f4c1 2968 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
0971de7f
AV
2969 if (!e)
2970 return NULL;
2971
2972 INIT_LIST_HEAD(&e->list);
2973 e->type = type;
2974 e->flags = QLA_EVT_FLAG_FREE;
2975 return e;
2976}
2977
01ef66bb 2978static int
f999f4c1 2979qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
0971de7f 2980{
f999f4c1 2981 unsigned long flags;
0971de7f 2982
f999f4c1 2983 spin_lock_irqsave(&vha->work_lock, flags);
e315cd28 2984 list_add_tail(&e->list, &vha->work_list);
f999f4c1 2985 spin_unlock_irqrestore(&vha->work_lock, flags);
e315cd28 2986 qla2xxx_wake_dpc(vha);
f999f4c1 2987
0971de7f
AV
2988 return QLA_SUCCESS;
2989}
2990
2991int
e315cd28 2992qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
0971de7f
AV
2993 u32 data)
2994{
2995 struct qla_work_evt *e;
2996
f999f4c1 2997 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
0971de7f
AV
2998 if (!e)
2999 return QLA_FUNCTION_FAILED;
3000
3001 e->u.aen.code = code;
3002 e->u.aen.data = data;
f999f4c1 3003 return qla2x00_post_work(vha, e);
0971de7f
AV
3004}
3005
8a659571
AV
3006int
3007qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
3008{
3009 struct qla_work_evt *e;
3010
f999f4c1 3011 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
8a659571
AV
3012 if (!e)
3013 return QLA_FUNCTION_FAILED;
3014
3015 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
f999f4c1 3016 return qla2x00_post_work(vha, e);
8a659571
AV
3017}
3018
ac280b67
AV
3019#define qla2x00_post_async_work(name, type) \
3020int qla2x00_post_async_##name##_work( \
3021 struct scsi_qla_host *vha, \
3022 fc_port_t *fcport, uint16_t *data) \
3023{ \
3024 struct qla_work_evt *e; \
3025 \
3026 e = qla2x00_alloc_work(vha, type); \
3027 if (!e) \
3028 return QLA_FUNCTION_FAILED; \
3029 \
3030 e->u.logio.fcport = fcport; \
3031 if (data) { \
3032 e->u.logio.data[0] = data[0]; \
3033 e->u.logio.data[1] = data[1]; \
3034 } \
3035 return qla2x00_post_work(vha, e); \
3036}
3037
3038qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
3039qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
3040qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
3041qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
5ff1d584
AV
3042qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
3043qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
ac280b67 3044
3420d36c
AV
3045int
3046qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
3047{
3048 struct qla_work_evt *e;
3049
3050 e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
3051 if (!e)
3052 return QLA_FUNCTION_FAILED;
3053
3054 e->u.uevent.code = code;
3055 return qla2x00_post_work(vha, e);
3056}
3057
3058static void
3059qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
3060{
3061 char event_string[40];
3062 char *envp[] = { event_string, NULL };
3063
3064 switch (code) {
3065 case QLA_UEVENT_CODE_FW_DUMP:
3066 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
3067 vha->host_no);
3068 break;
3069 default:
3070 /* do nothing */
3071 break;
3072 }
3073 kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
3074}
3075
ac280b67 3076void
e315cd28 3077qla2x00_do_work(struct scsi_qla_host *vha)
0971de7f 3078{
f999f4c1
AV
3079 struct qla_work_evt *e, *tmp;
3080 unsigned long flags;
3081 LIST_HEAD(work);
0971de7f 3082
f999f4c1
AV
3083 spin_lock_irqsave(&vha->work_lock, flags);
3084 list_splice_init(&vha->work_list, &work);
3085 spin_unlock_irqrestore(&vha->work_lock, flags);
3086
3087 list_for_each_entry_safe(e, tmp, &work, list) {
0971de7f 3088 list_del_init(&e->list);
0971de7f
AV
3089
3090 switch (e->type) {
3091 case QLA_EVT_AEN:
e315cd28 3092 fc_host_post_event(vha->host, fc_get_event_number(),
0971de7f
AV
3093 e->u.aen.code, e->u.aen.data);
3094 break;
8a659571
AV
3095 case QLA_EVT_IDC_ACK:
3096 qla81xx_idc_ack(vha, e->u.idc_ack.mb);
3097 break;
ac280b67
AV
3098 case QLA_EVT_ASYNC_LOGIN:
3099 qla2x00_async_login(vha, e->u.logio.fcport,
3100 e->u.logio.data);
3101 break;
3102 case QLA_EVT_ASYNC_LOGIN_DONE:
3103 qla2x00_async_login_done(vha, e->u.logio.fcport,
3104 e->u.logio.data);
3105 break;
3106 case QLA_EVT_ASYNC_LOGOUT:
3107 qla2x00_async_logout(vha, e->u.logio.fcport);
3108 break;
3109 case QLA_EVT_ASYNC_LOGOUT_DONE:
3110 qla2x00_async_logout_done(vha, e->u.logio.fcport,
3111 e->u.logio.data);
3112 break;
5ff1d584
AV
3113 case QLA_EVT_ASYNC_ADISC:
3114 qla2x00_async_adisc(vha, e->u.logio.fcport,
3115 e->u.logio.data);
3116 break;
3117 case QLA_EVT_ASYNC_ADISC_DONE:
3118 qla2x00_async_adisc_done(vha, e->u.logio.fcport,
3119 e->u.logio.data);
3120 break;
3420d36c
AV
3121 case QLA_EVT_UEVENT:
3122 qla2x00_uevent_emit(vha, e->u.uevent.code);
3123 break;
0971de7f
AV
3124 }
3125 if (e->flags & QLA_EVT_FLAG_FREE)
3126 kfree(e);
e315cd28 3127 }
e315cd28 3128}
f999f4c1 3129
e315cd28
AC
3130/* Relogins all the fcports of a vport
3131 * Context: dpc thread
3132 */
3133void qla2x00_relogin(struct scsi_qla_host *vha)
3134{
3135 fc_port_t *fcport;
c6b2fca8 3136 int status;
e315cd28
AC
3137 uint16_t next_loopid = 0;
3138 struct qla_hw_data *ha = vha->hw;
ac280b67 3139 uint16_t data[2];
e315cd28
AC
3140
3141 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3142 /*
3143 * If the port is not ONLINE then try to login
3144 * to it if we haven't run out of retries.
3145 */
5ff1d584
AV
3146 if (atomic_read(&fcport->state) != FCS_ONLINE &&
3147 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
ac280b67 3148 fcport->login_retry--;
e315cd28 3149 if (fcport->flags & FCF_FABRIC_DEVICE) {
f08b7251 3150 if (fcport->flags & FCF_FCP2_DEVICE)
e315cd28
AC
3151 ha->isp_ops->fabric_logout(vha,
3152 fcport->loop_id,
3153 fcport->d_id.b.domain,
3154 fcport->d_id.b.area,
3155 fcport->d_id.b.al_pa);
3156
ac280b67 3157 if (IS_ALOGIO_CAPABLE(ha)) {
5ff1d584 3158 fcport->flags |= FCF_ASYNC_SENT;
ac280b67
AV
3159 data[0] = 0;
3160 data[1] = QLA_LOGIO_LOGIN_RETRIED;
3161 status = qla2x00_post_async_login_work(
3162 vha, fcport, data);
3163 if (status == QLA_SUCCESS)
3164 continue;
3165 /* Attempt a retry. */
3166 status = 1;
3167 } else
3168 status = qla2x00_fabric_login(vha,
3169 fcport, &next_loopid);
e315cd28
AC
3170 } else
3171 status = qla2x00_local_device_login(vha,
3172 fcport);
3173
e315cd28
AC
3174 if (status == QLA_SUCCESS) {
3175 fcport->old_loop_id = fcport->loop_id;
3176
3177 DEBUG(printk("scsi(%ld): port login OK: logged "
3178 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
3179
3180 qla2x00_update_fcport(vha, fcport);
3181
3182 } else if (status == 1) {
3183 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3184 /* retry the login again */
3185 DEBUG(printk("scsi(%ld): Retrying"
3186 " %d login again loop_id 0x%x\n",
3187 vha->host_no, fcport->login_retry,
3188 fcport->loop_id));
3189 } else {
3190 fcport->login_retry = 0;
3191 }
3192
3193 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3194 fcport->loop_id = FC_NO_LOOP_ID;
3195 }
3196 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3197 break;
0971de7f 3198 }
0971de7f
AV
3199}
3200
1da177e4
LT
3201/**************************************************************************
3202* qla2x00_do_dpc
3203* This kernel thread is a task that is schedule by the interrupt handler
3204* to perform the background processing for interrupts.
3205*
3206* Notes:
3207* This task always run in the context of a kernel thread. It
3208* is kick-off by the driver's detect code and starts up
3209* up one per adapter. It immediately goes to sleep and waits for
3210* some fibre event. When either the interrupt handler or
3211* the timer routine detects a event it will one of the task
3212* bits then wake us up.
3213**************************************************************************/
3214static int
3215qla2x00_do_dpc(void *data)
3216{
2c3dfe3f 3217 int rval;
e315cd28
AC
3218 scsi_qla_host_t *base_vha;
3219 struct qla_hw_data *ha;
1da177e4 3220
e315cd28
AC
3221 ha = (struct qla_hw_data *)data;
3222 base_vha = pci_get_drvdata(ha->pdev);
1da177e4 3223
1da177e4
LT
3224 set_user_nice(current, -20);
3225
39a11240 3226 while (!kthread_should_stop()) {
1da177e4
LT
3227 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3228
39a11240
CH
3229 set_current_state(TASK_INTERRUPTIBLE);
3230 schedule();
3231 __set_current_state(TASK_RUNNING);
1da177e4
LT
3232
3233 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
3234
3235 /* Initialization not yet finished. Don't do anything yet. */
e315cd28 3236 if (!base_vha->flags.init_done)
1da177e4
LT
3237 continue;
3238
85880801
AV
3239 if (ha->flags.eeh_busy) {
3240 DEBUG17(qla_printk(KERN_WARNING, ha,
3241 "qla2x00_do_dpc: dpc_flags: %lx\n",
3242 base_vha->dpc_flags));
3243 continue;
3244 }
3245
e315cd28 3246 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
1da177e4
LT
3247
3248 ha->dpc_active = 1;
3249
1da177e4 3250 if (ha->flags.mbox_busy) {
1da177e4
LT
3251 ha->dpc_active = 0;
3252 continue;
3253 }
3254
e315cd28 3255 qla2x00_do_work(base_vha);
0971de7f 3256
a9083016
GM
3257 if (IS_QLA82XX(ha)) {
3258 if (test_and_clear_bit(ISP_UNRECOVERABLE,
3259 &base_vha->dpc_flags)) {
3260 qla82xx_idc_lock(ha);
3261 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3262 QLA82XX_DEV_FAILED);
3263 qla82xx_idc_unlock(ha);
3264 qla_printk(KERN_INFO, ha,
3265 "HW State: FAILED\n");
3266 qla82xx_device_state_handler(base_vha);
3267 continue;
3268 }
3269
3270 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3271 &base_vha->dpc_flags)) {
3272
3273 DEBUG(printk(KERN_INFO
3274 "scsi(%ld): dpc: sched "
3275 "qla82xx_fcoe_ctx_reset ha = %p\n",
3276 base_vha->host_no, ha));
3277 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3278 &base_vha->dpc_flags))) {
3279 if (qla82xx_fcoe_ctx_reset(base_vha)) {
3280 /* FCoE-ctx reset failed.
3281 * Escalate to chip-reset
3282 */
3283 set_bit(ISP_ABORT_NEEDED,
3284 &base_vha->dpc_flags);
3285 }
3286 clear_bit(ABORT_ISP_ACTIVE,
3287 &base_vha->dpc_flags);
3288 }
3289
3290 DEBUG(printk("scsi(%ld): dpc:"
3291 " qla82xx_fcoe_ctx_reset end\n",
3292 base_vha->host_no));
3293 }
3294 }
3295
e315cd28
AC
3296 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3297 &base_vha->dpc_flags)) {
1da177e4
LT
3298
3299 DEBUG(printk("scsi(%ld): dpc: sched "
3300 "qla2x00_abort_isp ha = %p\n",
e315cd28 3301 base_vha->host_no, ha));
1da177e4 3302 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
e315cd28 3303 &base_vha->dpc_flags))) {
1da177e4 3304
a9083016 3305 if (ha->isp_ops->abort_isp(base_vha)) {
1da177e4
LT
3306 /* failed. retry later */
3307 set_bit(ISP_ABORT_NEEDED,
e315cd28 3308 &base_vha->dpc_flags);
99363ef8 3309 }
e315cd28
AC
3310 clear_bit(ABORT_ISP_ACTIVE,
3311 &base_vha->dpc_flags);
99363ef8
SJ
3312 }
3313
1da177e4 3314 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
e315cd28 3315 base_vha->host_no));
1da177e4
LT
3316 }
3317
e315cd28
AC
3318 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
3319 qla2x00_update_fcports(base_vha);
3320 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
c9c5ced9 3321 }
d97994dc 3322
e315cd28
AC
3323 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3324 &base_vha->dpc_flags) &&
3325 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
1da177e4
LT
3326
3327 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
e315cd28 3328 base_vha->host_no));
1da177e4 3329
e315cd28
AC
3330 qla2x00_rst_aen(base_vha);
3331 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
1da177e4
LT
3332 }
3333
3334 /* Retry each device up to login retry count */
e315cd28
AC
3335 if ((test_and_clear_bit(RELOGIN_NEEDED,
3336 &base_vha->dpc_flags)) &&
3337 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3338 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
1da177e4
LT
3339
3340 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
e315cd28
AC
3341 base_vha->host_no));
3342 qla2x00_relogin(base_vha);
3343
1da177e4 3344 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
e315cd28 3345 base_vha->host_no));
1da177e4
LT
3346 }
3347
e315cd28
AC
3348 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3349 &base_vha->dpc_flags)) {
1da177e4
LT
3350
3351 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
e315cd28 3352 base_vha->host_no));
1da177e4
LT
3353
3354 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
e315cd28 3355 &base_vha->dpc_flags))) {
1da177e4 3356
e315cd28 3357 rval = qla2x00_loop_resync(base_vha);
1da177e4 3358
e315cd28
AC
3359 clear_bit(LOOP_RESYNC_ACTIVE,
3360 &base_vha->dpc_flags);
1da177e4
LT
3361 }
3362
3363 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
e315cd28 3364 base_vha->host_no));
1da177e4
LT
3365 }
3366
e315cd28
AC
3367 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
3368 atomic_read(&base_vha->loop_state) == LOOP_READY) {
3369 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
3370 qla2xxx_flash_npiv_conf(base_vha);
272976ca
AV
3371 }
3372
1da177e4 3373 if (!ha->interrupts_on)
fd34f556 3374 ha->isp_ops->enable_intrs(ha);
1da177e4 3375
e315cd28
AC
3376 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
3377 &base_vha->dpc_flags))
3378 ha->isp_ops->beacon_blink(base_vha);
f6df144c 3379
e315cd28 3380 qla2x00_do_dpc_all_vps(base_vha);
2c3dfe3f 3381
1da177e4
LT
3382 ha->dpc_active = 0;
3383 } /* End of while(1) */
3384
e315cd28 3385 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
1da177e4
LT
3386
3387 /*
3388 * Make sure that nobody tries to wake us up again.
3389 */
1da177e4
LT
3390 ha->dpc_active = 0;
3391
ac280b67
AV
3392 /* Cleanup any residual CTX SRBs. */
3393 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3394
39a11240
CH
3395 return 0;
3396}
3397
3398void
e315cd28 3399qla2xxx_wake_dpc(struct scsi_qla_host *vha)
39a11240 3400{
e315cd28 3401 struct qla_hw_data *ha = vha->hw;
c795c1e4
AV
3402 struct task_struct *t = ha->dpc_thread;
3403
e315cd28 3404 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
c795c1e4 3405 wake_up_process(t);
1da177e4
LT
3406}
3407
1da177e4
LT
3408/*
3409* qla2x00_rst_aen
3410* Processes asynchronous reset.
3411*
3412* Input:
3413* ha = adapter block pointer.
3414*/
3415static void
e315cd28 3416qla2x00_rst_aen(scsi_qla_host_t *vha)
1da177e4 3417{
e315cd28
AC
3418 if (vha->flags.online && !vha->flags.reset_active &&
3419 !atomic_read(&vha->loop_down_timer) &&
3420 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
1da177e4 3421 do {
e315cd28 3422 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
3423
3424 /*
3425 * Issue marker command only when we are going to start
3426 * the I/O.
3427 */
e315cd28
AC
3428 vha->marker_needed = 1;
3429 } while (!atomic_read(&vha->loop_down_timer) &&
3430 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
1da177e4
LT
3431 }
3432}
3433
f4f051eb 3434static void
e315cd28 3435qla2x00_sp_free_dma(srb_t *sp)
f4f051eb 3436{
3437 struct scsi_cmnd *cmd = sp->cmd;
bad75002 3438 struct qla_hw_data *ha = sp->fcport->vha->hw;
f4f051eb 3439
3440 if (sp->flags & SRB_DMA_VALID) {
385d70b4 3441 scsi_dma_unmap(cmd);
f4f051eb 3442 sp->flags &= ~SRB_DMA_VALID;
3443 }
bad75002
AE
3444
3445 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3446 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3447 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3448 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3449 }
3450
3451 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3452 /* List assured to be having elements */
3453 qla2x00_clean_dsd_pool(ha, sp);
3454 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3455 }
3456
3457 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3458 dma_pool_free(ha->dl_dma_pool, sp->ctx,
3459 ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3460 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3461 }
3462
fca29703 3463 CMD_SP(cmd) = NULL;
f4f051eb 3464}
3465
3466void
73208dfd 3467qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
f4f051eb 3468{
3469 struct scsi_cmnd *cmd = sp->cmd;
3470
e315cd28 3471 qla2x00_sp_free_dma(sp);
f4f051eb 3472
a9083016
GM
3473 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3474 struct ct6_dsd *ctx = sp->ctx;
3475 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3476 ctx->fcp_cmnd_dma);
3477 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3478 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3479 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3480 mempool_free(sp->ctx, ha->ctx_mempool);
3481 sp->ctx = NULL;
3482 }
f4f051eb 3483
a9083016 3484 mempool_free(sp, ha->srb_mempool);
f4f051eb 3485 cmd->scsi_done(cmd);
3486}
bdf79621 3487
1da177e4
LT
3488/**************************************************************************
3489* qla2x00_timer
3490*
3491* Description:
3492* One second timer
3493*
3494* Context: Interrupt
3495***************************************************************************/
2c3dfe3f 3496void
e315cd28 3497qla2x00_timer(scsi_qla_host_t *vha)
1da177e4 3498{
1da177e4
LT
3499 unsigned long cpu_flags = 0;
3500 fc_port_t *fcport;
1da177e4
LT
3501 int start_dpc = 0;
3502 int index;
3503 srb_t *sp;
f4f051eb 3504 int t;
85880801 3505 uint16_t w;
e315cd28 3506 struct qla_hw_data *ha = vha->hw;
73208dfd 3507 struct req_que *req;
85880801 3508
a9083016
GM
3509 if (IS_QLA82XX(ha))
3510 qla82xx_watchdog(vha);
3511
85880801
AV
3512 /* Hardware read to raise pending EEH errors during mailbox waits. */
3513 if (!pci_channel_offline(ha->pdev))
3514 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
1da177e4
LT
3515 /*
3516 * Ports - Port down timer.
3517 *
3518 * Whenever, a port is in the LOST state we start decrementing its port
3519 * down timer every second until it reaches zero. Once it reaches zero
fa2a1ce5 3520 * the port it marked DEAD.
1da177e4
LT
3521 */
3522 t = 0;
e315cd28 3523 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
3524 if (fcport->port_type != FCT_TARGET)
3525 continue;
3526
3527 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3528
3529 if (atomic_read(&fcport->port_down_timer) == 0)
3530 continue;
3531
fa2a1ce5 3532 if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
1da177e4 3533 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
fa2a1ce5 3534
1da177e4 3535 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
fca29703 3536 "%d remaining\n",
e315cd28 3537 vha->host_no,
1da177e4
LT
3538 t, atomic_read(&fcport->port_down_timer)));
3539 }
3540 t++;
3541 } /* End of for fcport */
3542
1da177e4
LT
3543
3544 /* Loop down handler. */
e315cd28
AC
3545 if (atomic_read(&vha->loop_down_timer) > 0 &&
3546 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3547 && vha->flags.online) {
1da177e4 3548
e315cd28
AC
3549 if (atomic_read(&vha->loop_down_timer) ==
3550 vha->loop_down_abort_time) {
1da177e4
LT
3551
3552 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3553 "queues before time expire\n",
e315cd28 3554 vha->host_no));
1da177e4 3555
e315cd28
AC
3556 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3557 atomic_set(&vha->loop_state, LOOP_DEAD);
1da177e4 3558
f08b7251
AV
3559 /*
3560 * Schedule an ISP abort to return any FCP2-device
3561 * commands.
3562 */
2c3dfe3f 3563 /* NPIV - scan physical port only */
e315cd28 3564 if (!vha->vp_idx) {
2c3dfe3f
SJ
3565 spin_lock_irqsave(&ha->hardware_lock,
3566 cpu_flags);
73208dfd 3567 req = ha->req_q_map[0];
2c3dfe3f
SJ
3568 for (index = 1;
3569 index < MAX_OUTSTANDING_COMMANDS;
3570 index++) {
3571 fc_port_t *sfcp;
3572
e315cd28 3573 sp = req->outstanding_cmds[index];
2c3dfe3f
SJ
3574 if (!sp)
3575 continue;
bad75002 3576 if (sp->ctx && !IS_PROT_IO(sp))
cf53b069 3577 continue;
2c3dfe3f 3578 sfcp = sp->fcport;
f08b7251 3579 if (!(sfcp->flags & FCF_FCP2_DEVICE))
2c3dfe3f 3580 continue;
bdf79621 3581
2c3dfe3f 3582 set_bit(ISP_ABORT_NEEDED,
e315cd28 3583 &vha->dpc_flags);
2c3dfe3f
SJ
3584 break;
3585 }
3586 spin_unlock_irqrestore(&ha->hardware_lock,
e315cd28 3587 cpu_flags);
1da177e4 3588 }
1da177e4
LT
3589 start_dpc++;
3590 }
3591
3592 /* if the loop has been down for 4 minutes, reinit adapter */
e315cd28 3593 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
0d6e61bc 3594 if (!(vha->device_flags & DFLG_NO_CABLE)) {
1da177e4
LT
3595 DEBUG(printk("scsi(%ld): Loop down - "
3596 "aborting ISP.\n",
e315cd28 3597 vha->host_no));
1da177e4
LT
3598 qla_printk(KERN_WARNING, ha,
3599 "Loop down - aborting ISP.\n");
3600
e315cd28 3601 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
3602 }
3603 }
fca29703 3604 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
e315cd28
AC
3605 vha->host_no,
3606 atomic_read(&vha->loop_down_timer)));
1da177e4
LT
3607 }
3608
f6df144c 3609 /* Check if beacon LED needs to be blinked */
3610 if (ha->beacon_blink_led == 1) {
e315cd28 3611 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
f6df144c 3612 start_dpc++;
3613 }
3614
550bf57d 3615 /* Process any deferred work. */
e315cd28 3616 if (!list_empty(&vha->work_list))
550bf57d
AV
3617 start_dpc++;
3618
1da177e4 3619 /* Schedule the DPC routine if needed */
e315cd28
AC
3620 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3621 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3622 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
1da177e4 3623 start_dpc ||
e315cd28
AC
3624 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3625 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
a9083016
GM
3626 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3627 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
e315cd28
AC
3628 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3629 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3630 qla2xxx_wake_dpc(vha);
1da177e4 3631
e315cd28 3632 qla2x00_restart_timer(vha, WATCH_INTERVAL);
1da177e4
LT
3633}
3634
5433383e
AV
3635/* Firmware interface routines. */
3636
a9083016 3637#define FW_BLOBS 8
5433383e
AV
3638#define FW_ISP21XX 0
3639#define FW_ISP22XX 1
3640#define FW_ISP2300 2
3641#define FW_ISP2322 3
48c02fde 3642#define FW_ISP24XX 4
c3a2f0df 3643#define FW_ISP25XX 5
3a03eb79 3644#define FW_ISP81XX 6
a9083016 3645#define FW_ISP82XX 7
5433383e 3646
bb8ee499
AV
3647#define FW_FILE_ISP21XX "ql2100_fw.bin"
3648#define FW_FILE_ISP22XX "ql2200_fw.bin"
3649#define FW_FILE_ISP2300 "ql2300_fw.bin"
3650#define FW_FILE_ISP2322 "ql2322_fw.bin"
3651#define FW_FILE_ISP24XX "ql2400_fw.bin"
c3a2f0df 3652#define FW_FILE_ISP25XX "ql2500_fw.bin"
3a03eb79 3653#define FW_FILE_ISP81XX "ql8100_fw.bin"
a9083016 3654#define FW_FILE_ISP82XX "ql8200_fw.bin"
bb8ee499 3655
e1e82b6f 3656static DEFINE_MUTEX(qla_fw_lock);
5433383e
AV
3657
3658static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
bb8ee499
AV
3659 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3660 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3661 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3662 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3663 { .name = FW_FILE_ISP24XX, },
c3a2f0df 3664 { .name = FW_FILE_ISP25XX, },
3a03eb79 3665 { .name = FW_FILE_ISP81XX, },
a9083016 3666 { .name = FW_FILE_ISP82XX, },
5433383e
AV
3667};
3668
3669struct fw_blob *
e315cd28 3670qla2x00_request_firmware(scsi_qla_host_t *vha)
5433383e 3671{
e315cd28 3672 struct qla_hw_data *ha = vha->hw;
5433383e
AV
3673 struct fw_blob *blob;
3674
3675 blob = NULL;
3676 if (IS_QLA2100(ha)) {
3677 blob = &qla_fw_blobs[FW_ISP21XX];
3678 } else if (IS_QLA2200(ha)) {
3679 blob = &qla_fw_blobs[FW_ISP22XX];
48c02fde 3680 } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
5433383e 3681 blob = &qla_fw_blobs[FW_ISP2300];
48c02fde 3682 } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
5433383e 3683 blob = &qla_fw_blobs[FW_ISP2322];
4d4df193 3684 } else if (IS_QLA24XX_TYPE(ha)) {
5433383e 3685 blob = &qla_fw_blobs[FW_ISP24XX];
c3a2f0df
AV
3686 } else if (IS_QLA25XX(ha)) {
3687 blob = &qla_fw_blobs[FW_ISP25XX];
3a03eb79
AV
3688 } else if (IS_QLA81XX(ha)) {
3689 blob = &qla_fw_blobs[FW_ISP81XX];
a9083016
GM
3690 } else if (IS_QLA82XX(ha)) {
3691 blob = &qla_fw_blobs[FW_ISP82XX];
5433383e
AV
3692 }
3693
e1e82b6f 3694 mutex_lock(&qla_fw_lock);
5433383e
AV
3695 if (blob->fw)
3696 goto out;
3697
3698 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3699 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
e315cd28 3700 "(%s).\n", vha->host_no, blob->name));
5433383e
AV
3701 blob->fw = NULL;
3702 blob = NULL;
3703 goto out;
3704 }
3705
3706out:
e1e82b6f 3707 mutex_unlock(&qla_fw_lock);
5433383e
AV
3708 return blob;
3709}
3710
3711static void
3712qla2x00_release_firmware(void)
3713{
3714 int idx;
3715
e1e82b6f 3716 mutex_lock(&qla_fw_lock);
5433383e
AV
3717 for (idx = 0; idx < FW_BLOBS; idx++)
3718 if (qla_fw_blobs[idx].fw)
3719 release_firmware(qla_fw_blobs[idx].fw);
e1e82b6f 3720 mutex_unlock(&qla_fw_lock);
5433383e
AV
3721}
3722
14e660e6
SJ
3723static pci_ers_result_t
3724qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3725{
85880801
AV
3726 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3727 struct qla_hw_data *ha = vha->hw;
3728
3729 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3730 state));
b9b12f73 3731
14e660e6
SJ
3732 switch (state) {
3733 case pci_channel_io_normal:
85880801 3734 ha->flags.eeh_busy = 0;
14e660e6
SJ
3735 return PCI_ERS_RESULT_CAN_RECOVER;
3736 case pci_channel_io_frozen:
85880801 3737 ha->flags.eeh_busy = 1;
90a86fc0 3738 qla2x00_free_irqs(vha);
14e660e6
SJ
3739 pci_disable_device(pdev);
3740 return PCI_ERS_RESULT_NEED_RESET;
3741 case pci_channel_io_perm_failure:
85880801
AV
3742 ha->flags.pci_channel_io_perm_failure = 1;
3743 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
14e660e6
SJ
3744 return PCI_ERS_RESULT_DISCONNECT;
3745 }
3746 return PCI_ERS_RESULT_NEED_RESET;
3747}
3748
3749static pci_ers_result_t
3750qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3751{
3752 int risc_paused = 0;
3753 uint32_t stat;
3754 unsigned long flags;
e315cd28
AC
3755 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3756 struct qla_hw_data *ha = base_vha->hw;
14e660e6
SJ
3757 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3758 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3759
3760 spin_lock_irqsave(&ha->hardware_lock, flags);
3761 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3762 stat = RD_REG_DWORD(&reg->hccr);
3763 if (stat & HCCR_RISC_PAUSE)
3764 risc_paused = 1;
3765 } else if (IS_QLA23XX(ha)) {
3766 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3767 if (stat & HSR_RISC_PAUSED)
3768 risc_paused = 1;
3769 } else if (IS_FWI2_CAPABLE(ha)) {
3770 stat = RD_REG_DWORD(&reg24->host_status);
3771 if (stat & HSRX_RISC_PAUSED)
3772 risc_paused = 1;
3773 }
3774 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3775
3776 if (risc_paused) {
3777 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3778 "Dumping firmware!\n");
e315cd28 3779 ha->isp_ops->fw_dump(base_vha, 0);
14e660e6
SJ
3780
3781 return PCI_ERS_RESULT_NEED_RESET;
3782 } else
3783 return PCI_ERS_RESULT_RECOVERED;
3784}
3785
3786static pci_ers_result_t
3787qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3788{
3789 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
e315cd28
AC
3790 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3791 struct qla_hw_data *ha = base_vha->hw;
90a86fc0
JC
3792 struct rsp_que *rsp;
3793 int rc, retries = 10;
09483916 3794
85880801
AV
3795 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3796
90a86fc0
JC
3797 /* Workaround: qla2xxx driver which access hardware earlier
3798 * needs error state to be pci_channel_io_online.
3799 * Otherwise mailbox command timesout.
3800 */
3801 pdev->error_state = pci_channel_io_normal;
3802
3803 pci_restore_state(pdev);
3804
8c1496bd
RL
3805 /* pci_restore_state() clears the saved_state flag of the device
3806 * save restored state which resets saved_state flag
3807 */
3808 pci_save_state(pdev);
3809
09483916
BH
3810 if (ha->mem_only)
3811 rc = pci_enable_device_mem(pdev);
3812 else
3813 rc = pci_enable_device(pdev);
14e660e6 3814
09483916 3815 if (rc) {
14e660e6
SJ
3816 qla_printk(KERN_WARNING, ha,
3817 "Can't re-enable PCI device after reset.\n");
14e660e6
SJ
3818 return ret;
3819 }
14e660e6 3820
90a86fc0
JC
3821 rsp = ha->rsp_q_map[0];
3822 if (qla2x00_request_irqs(ha, rsp))
3823 return ret;
3824
e315cd28 3825 if (ha->isp_ops->pci_config(base_vha))
14e660e6
SJ
3826 return ret;
3827
90a86fc0
JC
3828 while (ha->flags.mbox_busy && retries--)
3829 msleep(1000);
85880801 3830
e315cd28 3831 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
a9083016 3832 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
14e660e6 3833 ret = PCI_ERS_RESULT_RECOVERED;
e315cd28 3834 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
14e660e6 3835
90a86fc0 3836
85880801
AV
3837 DEBUG17(qla_printk(KERN_WARNING, ha,
3838 "slot_reset-return:ret=%x\n", ret));
3839
14e660e6
SJ
3840 return ret;
3841}
3842
3843static void
3844qla2xxx_pci_resume(struct pci_dev *pdev)
3845{
e315cd28
AC
3846 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3847 struct qla_hw_data *ha = base_vha->hw;
14e660e6
SJ
3848 int ret;
3849
85880801
AV
3850 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3851
e315cd28 3852 ret = qla2x00_wait_for_hba_online(base_vha);
14e660e6
SJ
3853 if (ret != QLA_SUCCESS) {
3854 qla_printk(KERN_ERR, ha,
3855 "the device failed to resume I/O "
3856 "from slot/link_reset");
3857 }
85880801 3858
3e46f031
LC
3859 pci_cleanup_aer_uncorrect_error_status(pdev);
3860
85880801 3861 ha->flags.eeh_busy = 0;
14e660e6
SJ
3862}
3863
3864static struct pci_error_handlers qla2xxx_err_handler = {
3865 .error_detected = qla2xxx_pci_error_detected,
3866 .mmio_enabled = qla2xxx_pci_mmio_enabled,
3867 .slot_reset = qla2xxx_pci_slot_reset,
3868 .resume = qla2xxx_pci_resume,
3869};
3870
5433383e 3871static struct pci_device_id qla2xxx_pci_tbl[] = {
47f5e069
AV
3872 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
3873 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
3874 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
3875 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
3876 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
3877 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
3878 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
3879 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
3880 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
4d4df193 3881 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
47f5e069
AV
3882 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3883 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
c3a2f0df 3884 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3a03eb79 3885 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
a9083016 3886 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5433383e
AV
3887 { 0 },
3888};
3889MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
3890
fca29703 3891static struct pci_driver qla2xxx_pci_driver = {
cb63067a 3892 .name = QLA2XXX_DRIVER_NAME,
0a21ef1e
JB
3893 .driver = {
3894 .owner = THIS_MODULE,
3895 },
fca29703 3896 .id_table = qla2xxx_pci_tbl,
7ee61397 3897 .probe = qla2x00_probe_one,
4c993f76 3898 .remove = qla2x00_remove_one,
14e660e6 3899 .err_handler = &qla2xxx_err_handler,
fca29703
AV
3900};
3901
6a03b4cd
HZ
3902static struct file_operations apidev_fops = {
3903 .owner = THIS_MODULE,
3904};
3905
1da177e4
LT
3906/**
3907 * qla2x00_module_init - Module initialization.
3908 **/
3909static int __init
3910qla2x00_module_init(void)
3911{
fca29703
AV
3912 int ret = 0;
3913
1da177e4 3914 /* Allocate cache for SRBs. */
354d6b21 3915 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
20c2df83 3916 SLAB_HWCACHE_ALIGN, NULL);
1da177e4
LT
3917 if (srb_cachep == NULL) {
3918 printk(KERN_ERR
3919 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
3920 return -ENOMEM;
3921 }
3922
3923 /* Derive version string. */
3924 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
11010fec 3925 if (ql2xextended_error_logging)
0181944f
AV
3926 strcat(qla2x00_version_str, "-debug");
3927
1c97a12a
AV
3928 qla2xxx_transport_template =
3929 fc_attach_transport(&qla2xxx_transport_functions);
2c3dfe3f
SJ
3930 if (!qla2xxx_transport_template) {
3931 kmem_cache_destroy(srb_cachep);
1da177e4 3932 return -ENODEV;
2c3dfe3f 3933 }
6a03b4cd
HZ
3934
3935 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
3936 if (apidev_major < 0) {
3937 printk(KERN_WARNING "qla2xxx: Unable to register char device "
3938 "%s\n", QLA2XXX_APIDEV);
3939 }
3940
2c3dfe3f
SJ
3941 qla2xxx_transport_vport_template =
3942 fc_attach_transport(&qla2xxx_transport_vport_functions);
3943 if (!qla2xxx_transport_vport_template) {
3944 kmem_cache_destroy(srb_cachep);
3945 fc_release_transport(qla2xxx_transport_template);
1da177e4 3946 return -ENODEV;
2c3dfe3f 3947 }
1da177e4 3948
fd9a29f0
AV
3949 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
3950 qla2x00_version_str);
7ee61397 3951 ret = pci_register_driver(&qla2xxx_pci_driver);
fca29703
AV
3952 if (ret) {
3953 kmem_cache_destroy(srb_cachep);
3954 fc_release_transport(qla2xxx_transport_template);
2c3dfe3f 3955 fc_release_transport(qla2xxx_transport_vport_template);
fca29703
AV
3956 }
3957 return ret;
1da177e4
LT
3958}
3959
3960/**
3961 * qla2x00_module_exit - Module cleanup.
3962 **/
3963static void __exit
3964qla2x00_module_exit(void)
3965{
6a03b4cd 3966 unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
7ee61397 3967 pci_unregister_driver(&qla2xxx_pci_driver);
5433383e 3968 qla2x00_release_firmware();
354d6b21 3969 kmem_cache_destroy(srb_cachep);
a9083016
GM
3970 if (ctx_cachep)
3971 kmem_cache_destroy(ctx_cachep);
1da177e4 3972 fc_release_transport(qla2xxx_transport_template);
2c3dfe3f 3973 fc_release_transport(qla2xxx_transport_vport_template);
1da177e4
LT
3974}
3975
3976module_init(qla2x00_module_init);
3977module_exit(qla2x00_module_exit);
3978
3979MODULE_AUTHOR("QLogic Corporation");
3980MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
3981MODULE_LICENSE("GPL");
3982MODULE_VERSION(QLA2XXX_VERSION);
bb8ee499
AV
3983MODULE_FIRMWARE(FW_FILE_ISP21XX);
3984MODULE_FIRMWARE(FW_FILE_ISP22XX);
3985MODULE_FIRMWARE(FW_FILE_ISP2300);
3986MODULE_FIRMWARE(FW_FILE_ISP2322);
3987MODULE_FIRMWARE(FW_FILE_ISP24XX);
61623fc3 3988MODULE_FIRMWARE(FW_FILE_ISP25XX);