scsi: qla2xxx: Add FC-NVMe command handling
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4 32static int qla2x00_configure_fabric(scsi_qla_host_t *);
726b8548 33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
1da177e4 34static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 35
4d4df193
HK
36static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 38static int qla25xx_init_queues(struct qla_hw_data *);
726b8548 39static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
a5d42f4c 40static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
726b8548
QT
41static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
42 struct event_arg *);
a5d42f4c
DG
43static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
44 struct event_arg *);
4d4df193 45
ac280b67
AV
46/* SRB Extensions ---------------------------------------------------------- */
47
9ba56b95
GM
48void
49qla2x00_sp_timeout(unsigned long __data)
ac280b67
AV
50{
51 srb_t *sp = (srb_t *)__data;
4916392b 52 struct srb_iocb *iocb;
25ff6af1 53 scsi_qla_host_t *vha = sp->vha;
ac280b67
AV
54 struct req_que *req;
55 unsigned long flags;
56
25ff6af1
JC
57 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
58 req = vha->hw->req_q_map[0];
ac280b67 59 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 60 iocb = &sp->u.iocb_cmd;
4916392b 61 iocb->timeout(sp);
25ff6af1
JC
62 sp->free(sp);
63 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
ac280b67
AV
64}
65
9ba56b95 66void
25ff6af1 67qla2x00_sp_free(void *ptr)
ac280b67 68{
25ff6af1 69 srb_t *sp = ptr;
9ba56b95 70 struct srb_iocb *iocb = &sp->u.iocb_cmd;
ac280b67 71
4d97cc53 72 del_timer(&iocb->timer);
25ff6af1 73 qla2x00_rel_sp(sp);
ac280b67
AV
74}
75
ac280b67
AV
76/* Asynchronous Login/Logout Routines -------------------------------------- */
77
a9b6f722 78unsigned long
5b91490e
AV
79qla2x00_get_async_timeout(struct scsi_qla_host *vha)
80{
81 unsigned long tmo;
82 struct qla_hw_data *ha = vha->hw;
83
84 /* Firmware should use switch negotiated r_a_tov for timeout. */
85 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
86 if (IS_QLAFX00(ha)) {
87 tmo = FX00_DEF_RATOV * 2;
88 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
89 /*
90 * Except for earlier ISPs where the timeout is seeded from the
91 * initialization control block.
92 */
93 tmo = ha->login_timeout;
94 }
95 return tmo;
96}
ac280b67 97
726b8548 98void
9ba56b95 99qla2x00_async_iocb_timeout(void *data)
ac280b67 100{
25ff6af1 101 srb_t *sp = data;
ac280b67 102 fc_port_t *fcport = sp->fcport;
726b8548
QT
103 struct srb_iocb *lio = &sp->u.iocb_cmd;
104 struct event_arg ea;
ac280b67 105
7c3df132 106 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
726b8548
QT
107 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
108 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
ac280b67 109
5ff1d584 110 fcport->flags &= ~FCF_ASYNC_SENT;
726b8548
QT
111
112 switch (sp->type) {
113 case SRB_LOGIN_CMD:
6ac52608
AV
114 /* Retry as needed. */
115 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
116 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
117 QLA_LOGIO_LOGIN_RETRIED : 0;
726b8548
QT
118 memset(&ea, 0, sizeof(ea));
119 ea.event = FCME_PLOGI_DONE;
120 ea.fcport = sp->fcport;
121 ea.data[0] = lio->u.logio.data[0];
122 ea.data[1] = lio->u.logio.data[1];
123 ea.sp = sp;
124 qla24xx_handle_plogi_done_event(fcport->vha, &ea);
125 break;
126 case SRB_LOGOUT_CMD:
a6ca8878 127 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
726b8548
QT
128 break;
129 case SRB_CT_PTHRU_CMD:
130 case SRB_MB_IOCB:
131 case SRB_NACK_PLOGI:
132 case SRB_NACK_PRLI:
133 case SRB_NACK_LOGO:
25ff6af1 134 sp->done(sp, QLA_FUNCTION_TIMEOUT);
726b8548 135 break;
6ac52608 136 }
ac280b67
AV
137}
138
99b0bec7 139static void
25ff6af1 140qla2x00_async_login_sp_done(void *ptr, int res)
99b0bec7 141{
25ff6af1
JC
142 srb_t *sp = ptr;
143 struct scsi_qla_host *vha = sp->vha;
9ba56b95 144 struct srb_iocb *lio = &sp->u.iocb_cmd;
726b8548 145 struct event_arg ea;
9ba56b95 146
83548fe2 147 ql_dbg(ql_dbg_disc, vha, 0x20dd,
25ff6af1 148 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
726b8548
QT
149
150 sp->fcport->flags &= ~FCF_ASYNC_SENT;
151 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
152 memset(&ea, 0, sizeof(ea));
153 ea.event = FCME_PLOGI_DONE;
154 ea.fcport = sp->fcport;
155 ea.data[0] = lio->u.logio.data[0];
156 ea.data[1] = lio->u.logio.data[1];
157 ea.iop[0] = lio->u.logio.iop[0];
158 ea.iop[1] = lio->u.logio.iop[1];
159 ea.sp = sp;
160 qla2x00_fcport_event_handler(vha, &ea);
161 }
9ba56b95 162
25ff6af1 163 sp->free(sp);
99b0bec7
AV
164}
165
ac280b67
AV
166int
167qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
168 uint16_t *data)
169{
ac280b67 170 srb_t *sp;
4916392b 171 struct srb_iocb *lio;
726b8548
QT
172 int rval = QLA_FUNCTION_FAILED;
173
174 if (!vha->flags.online)
175 goto done;
176
177 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
178 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
179 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
180 goto done;
ac280b67 181
9ba56b95 182 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
183 if (!sp)
184 goto done;
185
726b8548
QT
186 fcport->flags |= FCF_ASYNC_SENT;
187 fcport->logout_completed = 0;
188
9ba56b95
GM
189 sp->type = SRB_LOGIN_CMD;
190 sp->name = "login";
191 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
192
193 lio = &sp->u.iocb_cmd;
3822263e 194 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 195 sp->done = qla2x00_async_login_sp_done;
4916392b 196 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
a5d42f4c
DG
197
198 if (fcport->fc4f_nvme)
199 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
200
ac280b67 201 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 202 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
ac280b67 203 rval = qla2x00_start_sp(sp);
080c9517
CD
204 if (rval != QLA_SUCCESS) {
205 fcport->flags &= ~FCF_ASYNC_SENT;
206 fcport->flags |= FCF_LOGIN_NEEDED;
207 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ac280b67 208 goto done_free_sp;
080c9517 209 }
ac280b67 210
7c3df132 211 ql_dbg(ql_dbg_disc, vha, 0x2072,
726b8548
QT
212 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
213 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
cfb0919c
CD
214 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
215 fcport->login_retry);
ac280b67
AV
216 return rval;
217
218done_free_sp:
25ff6af1 219 sp->free(sp);
ac280b67 220done:
726b8548 221 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
222 return rval;
223}
224
99b0bec7 225static void
25ff6af1 226qla2x00_async_logout_sp_done(void *ptr, int res)
99b0bec7 227{
25ff6af1 228 srb_t *sp = ptr;
9ba56b95 229 struct srb_iocb *lio = &sp->u.iocb_cmd;
9ba56b95 230
726b8548 231 sp->fcport->flags &= ~FCF_ASYNC_SENT;
25ff6af1
JC
232 if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
233 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
9ba56b95 234 lio->u.logio.data);
25ff6af1 235 sp->free(sp);
99b0bec7
AV
236}
237
ac280b67
AV
238int
239qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
240{
ac280b67 241 srb_t *sp;
4916392b 242 struct srb_iocb *lio;
ac280b67
AV
243 int rval;
244
245 rval = QLA_FUNCTION_FAILED;
726b8548 246 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 247 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
248 if (!sp)
249 goto done;
250
9ba56b95
GM
251 sp->type = SRB_LOGOUT_CMD;
252 sp->name = "logout";
253 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
254
255 lio = &sp->u.iocb_cmd;
3822263e 256 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 257 sp->done = qla2x00_async_logout_sp_done;
ac280b67
AV
258 rval = qla2x00_start_sp(sp);
259 if (rval != QLA_SUCCESS)
260 goto done_free_sp;
261
7c3df132 262 ql_dbg(ql_dbg_disc, vha, 0x2070,
726b8548 263 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
cfb0919c 264 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
726b8548
QT
265 fcport->d_id.b.area, fcport->d_id.b.al_pa,
266 fcport->port_name);
ac280b67
AV
267 return rval;
268
269done_free_sp:
25ff6af1 270 sp->free(sp);
ac280b67 271done:
726b8548 272 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
273 return rval;
274}
275
5ff1d584 276static void
25ff6af1 277qla2x00_async_adisc_sp_done(void *ptr, int res)
5ff1d584 278{
25ff6af1
JC
279 srb_t *sp = ptr;
280 struct scsi_qla_host *vha = sp->vha;
9ba56b95 281 struct srb_iocb *lio = &sp->u.iocb_cmd;
9ba56b95
GM
282
283 if (!test_bit(UNLOADING, &vha->dpc_flags))
25ff6af1 284 qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
9ba56b95 285 lio->u.logio.data);
25ff6af1 286 sp->free(sp);
5ff1d584
AV
287}
288
289int
290qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
291 uint16_t *data)
292{
5ff1d584 293 srb_t *sp;
4916392b 294 struct srb_iocb *lio;
5ff1d584
AV
295 int rval;
296
297 rval = QLA_FUNCTION_FAILED;
726b8548 298 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 299 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
300 if (!sp)
301 goto done;
302
9ba56b95
GM
303 sp->type = SRB_ADISC_CMD;
304 sp->name = "adisc";
305 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
306
307 lio = &sp->u.iocb_cmd;
3822263e 308 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 309 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 310 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 311 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584
AV
312 rval = qla2x00_start_sp(sp);
313 if (rval != QLA_SUCCESS)
314 goto done_free_sp;
315
7c3df132 316 ql_dbg(ql_dbg_disc, vha, 0x206f,
cfb0919c
CD
317 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
318 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
319 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5ff1d584
AV
320 return rval;
321
322done_free_sp:
25ff6af1 323 sp->free(sp);
5ff1d584 324done:
726b8548
QT
325 fcport->flags &= ~FCF_ASYNC_SENT;
326 return rval;
327}
328
329static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
330 struct event_arg *ea)
331{
332 fc_port_t *fcport, *conflict_fcport;
333 struct get_name_list_extended *e;
334 u16 i, n, found = 0, loop_id;
335 port_id_t id;
336 u64 wwn;
a5d42f4c 337 u8 opt = 0, current_login_state;
726b8548
QT
338
339 fcport = ea->fcport;
340
341 if (ea->rc) { /* rval */
342 if (fcport->login_retry == 0) {
343 fcport->login_retry = vha->hw->login_retry_count;
83548fe2
QT
344 ql_dbg(ql_dbg_disc, vha, 0x20de,
345 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
346 fcport->port_name, fcport->login_retry);
726b8548
QT
347 }
348 return;
349 }
350
351 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 352 ql_dbg(ql_dbg_disc, vha, 0x20df,
726b8548
QT
353 "%s %8phC rscn gen changed rscn %d|%d \n",
354 __func__, fcport->port_name,
355 fcport->last_rscn_gen, fcport->rscn_gen);
356 qla24xx_post_gidpn_work(vha, fcport);
357 return;
358 } else if (fcport->last_login_gen != fcport->login_gen) {
83548fe2
QT
359 ql_dbg(ql_dbg_disc, vha, 0x20e0,
360 "%s %8phC login gen changed login %d|%d\n",
361 __func__, fcport->port_name,
362 fcport->last_login_gen, fcport->login_gen);
726b8548
QT
363 return;
364 }
365
366 n = ea->data[0] / sizeof(struct get_name_list_extended);
367
83548fe2 368 ql_dbg(ql_dbg_disc, vha, 0x20e1,
726b8548
QT
369 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
370 __func__, __LINE__, fcport->port_name, n,
371 fcport->d_id.b.domain, fcport->d_id.b.area,
372 fcport->d_id.b.al_pa, fcport->loop_id);
373
374 for (i = 0; i < n; i++) {
375 e = &vha->gnl.l[i];
376 wwn = wwn_to_u64(e->port_name);
377
378 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
379 continue;
380
381 found = 1;
382 id.b.domain = e->port_id[2];
383 id.b.area = e->port_id[1];
384 id.b.al_pa = e->port_id[0];
385 id.b.rsvd_1 = 0;
386
387 loop_id = le16_to_cpu(e->nport_handle);
388 loop_id = (loop_id & 0x7fff);
389
83548fe2
QT
390 ql_dbg(ql_dbg_disc, vha, 0x20e2,
391 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
392 __func__, fcport->port_name,
393 e->current_login_state, fcport->fw_login_state,
394 id.b.domain, id.b.area, id.b.al_pa,
395 fcport->d_id.b.domain, fcport->d_id.b.area,
396 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
726b8548
QT
397
398 if ((id.b24 != fcport->d_id.b24) ||
399 ((fcport->loop_id != FC_NO_LOOP_ID) &&
400 (fcport->loop_id != loop_id))) {
83548fe2
QT
401 ql_dbg(ql_dbg_disc, vha, 0x20e3,
402 "%s %d %8phC post del sess\n",
403 __func__, __LINE__, fcport->port_name);
726b8548
QT
404 qlt_schedule_sess_for_deletion(fcport, 1);
405 return;
406 }
407
408 fcport->loop_id = loop_id;
409
410 wwn = wwn_to_u64(fcport->port_name);
411 qlt_find_sess_invalidate_other(vha, wwn,
412 id, loop_id, &conflict_fcport);
413
414 if (conflict_fcport) {
415 /*
416 * Another share fcport share the same loop_id &
417 * nport id. Conflict fcport needs to finish
418 * cleanup before this fcport can proceed to login.
419 */
420 conflict_fcport->conflict = fcport;
421 fcport->login_pause = 1;
422 }
423
a5d42f4c
DG
424 if (fcport->fc4f_nvme)
425 current_login_state = e->current_login_state >> 4;
426 else
427 current_login_state = e->current_login_state & 0xf;
428
429 switch (current_login_state) {
726b8548 430 case DSC_LS_PRLI_COMP:
83548fe2
QT
431 ql_dbg(ql_dbg_disc, vha, 0x20e4,
432 "%s %d %8phC post gpdb\n",
433 __func__, __LINE__, fcport->port_name);
726b8548
QT
434 opt = PDO_FORCE_ADISC;
435 qla24xx_post_gpdb_work(vha, fcport, opt);
436 break;
726b8548
QT
437 case DSC_LS_PORT_UNAVAIL:
438 default:
439 if (fcport->loop_id == FC_NO_LOOP_ID) {
440 qla2x00_find_new_loop_id(vha, fcport);
441 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
442 }
83548fe2
QT
443 ql_dbg(ql_dbg_disc, vha, 0x20e5,
444 "%s %d %8phC\n",
445 __func__, __LINE__, fcport->port_name);
726b8548
QT
446 qla24xx_fcport_handle_login(vha, fcport);
447 break;
448 }
449 }
450
451 if (!found) {
452 /* fw has no record of this port */
453 if (fcport->loop_id == FC_NO_LOOP_ID) {
454 qla2x00_find_new_loop_id(vha, fcport);
455 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
456 } else {
457 for (i = 0; i < n; i++) {
458 e = &vha->gnl.l[i];
459 id.b.domain = e->port_id[0];
460 id.b.area = e->port_id[1];
461 id.b.al_pa = e->port_id[2];
462 id.b.rsvd_1 = 0;
463 loop_id = le16_to_cpu(e->nport_handle);
464
465 if (fcport->d_id.b24 == id.b24) {
466 conflict_fcport =
467 qla2x00_find_fcport_by_wwpn(vha,
468 e->port_name, 0);
469
83548fe2 470 ql_dbg(ql_dbg_disc, vha, 0x20e6,
726b8548
QT
471 "%s %d %8phC post del sess\n",
472 __func__, __LINE__,
473 conflict_fcport->port_name);
474 qlt_schedule_sess_for_deletion
475 (conflict_fcport, 1);
476 }
477
478 if (fcport->loop_id == loop_id) {
479 /* FW already picked this loop id for another fcport */
480 qla2x00_find_new_loop_id(vha, fcport);
481 }
482 }
483 }
484 qla24xx_fcport_handle_login(vha, fcport);
485 }
486} /* gnl_event */
487
488static void
25ff6af1 489qla24xx_async_gnl_sp_done(void *s, int res)
726b8548 490{
25ff6af1
JC
491 struct srb *sp = s;
492 struct scsi_qla_host *vha = sp->vha;
726b8548
QT
493 unsigned long flags;
494 struct fc_port *fcport = NULL, *tf;
495 u16 i, n = 0, loop_id;
496 struct event_arg ea;
497 struct get_name_list_extended *e;
498 u64 wwn;
499 struct list_head h;
500
83548fe2 501 ql_dbg(ql_dbg_disc, vha, 0x20e7,
726b8548
QT
502 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
503 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
504 sp->u.iocb_cmd.u.mbx.in_mb[2]);
505
506 memset(&ea, 0, sizeof(ea));
507 ea.sp = sp;
508 ea.rc = res;
509 ea.event = FCME_GNL_DONE;
510
511 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
512 sizeof(struct get_name_list_extended)) {
513 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
514 sizeof(struct get_name_list_extended);
515 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
516 }
517
518 for (i = 0; i < n; i++) {
519 e = &vha->gnl.l[i];
520 loop_id = le16_to_cpu(e->nport_handle);
521 /* mask out reserve bit */
522 loop_id = (loop_id & 0x7fff);
523 set_bit(loop_id, vha->hw->loop_id_map);
524 wwn = wwn_to_u64(e->port_name);
525
83548fe2 526 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
726b8548
QT
527 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
528 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
529 e->port_id[0], e->current_login_state, e->last_login_state,
530 (loop_id & 0x7fff));
531 }
532
533 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
534 vha->gnl.sent = 0;
535
536 INIT_LIST_HEAD(&h);
537 fcport = tf = NULL;
538 if (!list_empty(&vha->gnl.fcports))
539 list_splice_init(&vha->gnl.fcports, &h);
540
541 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
542 list_del_init(&fcport->gnl_entry);
543 fcport->flags &= ~FCF_ASYNC_SENT;
544 ea.fcport = fcport;
545
546 qla2x00_fcport_event_handler(vha, &ea);
547 }
548
549 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
550
25ff6af1 551 sp->free(sp);
726b8548
QT
552}
553
554int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
555{
556 srb_t *sp;
557 struct srb_iocb *mbx;
558 int rval = QLA_FUNCTION_FAILED;
559 unsigned long flags;
560 u16 *mb;
561
562 if (!vha->flags.online)
563 goto done;
564
83548fe2 565 ql_dbg(ql_dbg_disc, vha, 0x20d9,
726b8548
QT
566 "Async-gnlist WWPN %8phC \n", fcport->port_name);
567
568 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
569 fcport->flags |= FCF_ASYNC_SENT;
570 fcport->disc_state = DSC_GNL;
571 fcport->last_rscn_gen = fcport->rscn_gen;
572 fcport->last_login_gen = fcport->login_gen;
573
574 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
575 if (vha->gnl.sent) {
576 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
577 rval = QLA_SUCCESS;
578 goto done;
579 }
580 vha->gnl.sent = 1;
581 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
582
583 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
584 if (!sp)
585 goto done;
586 sp->type = SRB_MB_IOCB;
587 sp->name = "gnlist";
588 sp->gen1 = fcport->rscn_gen;
589 sp->gen2 = fcport->login_gen;
590
591 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
592
593 mb = sp->u.iocb_cmd.u.mbx.out_mb;
594 mb[0] = MBC_PORT_NODE_NAME_LIST;
595 mb[1] = BIT_2 | BIT_3;
596 mb[2] = MSW(vha->gnl.ldma);
597 mb[3] = LSW(vha->gnl.ldma);
598 mb[6] = MSW(MSD(vha->gnl.ldma));
599 mb[7] = LSW(MSD(vha->gnl.ldma));
600 mb[8] = vha->gnl.size;
601 mb[9] = vha->vp_idx;
602
603 mbx = &sp->u.iocb_cmd;
604 mbx->timeout = qla2x00_async_iocb_timeout;
605
606 sp->done = qla24xx_async_gnl_sp_done;
607
608 rval = qla2x00_start_sp(sp);
609 if (rval != QLA_SUCCESS)
610 goto done_free_sp;
611
83548fe2
QT
612 ql_dbg(ql_dbg_disc, vha, 0x20da,
613 "Async-%s - OUT WWPN %8phC hndl %x\n",
614 sp->name, fcport->port_name, sp->handle);
726b8548
QT
615
616 return rval;
617
618done_free_sp:
25ff6af1 619 sp->free(sp);
726b8548
QT
620done:
621 fcport->flags &= ~FCF_ASYNC_SENT;
622 return rval;
623}
624
625int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
626{
627 struct qla_work_evt *e;
628
629 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
630 if (!e)
631 return QLA_FUNCTION_FAILED;
632
633 e->u.fcport.fcport = fcport;
634 return qla2x00_post_work(vha, e);
635}
636
637static
25ff6af1 638void qla24xx_async_gpdb_sp_done(void *s, int res)
726b8548 639{
25ff6af1
JC
640 struct srb *sp = s;
641 struct scsi_qla_host *vha = sp->vha;
726b8548 642 struct qla_hw_data *ha = vha->hw;
726b8548
QT
643 struct port_database_24xx *pd;
644 fc_port_t *fcport = sp->fcport;
645 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
646 int rval = QLA_SUCCESS;
647 struct event_arg ea;
648
83548fe2 649 ql_dbg(ql_dbg_disc, vha, 0x20db,
726b8548
QT
650 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
651 sp->name, res, fcport->port_name, mb[1], mb[2]);
652
653 fcport->flags &= ~FCF_ASYNC_SENT;
654
655 if (res) {
656 rval = res;
657 goto gpd_error_out;
658 }
659
660 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
661
15f30a57 662 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
726b8548
QT
663
664gpd_error_out:
665 memset(&ea, 0, sizeof(ea));
666 ea.event = FCME_GPDB_DONE;
667 ea.rc = rval;
668 ea.fcport = fcport;
669 ea.sp = sp;
670
671 qla2x00_fcport_event_handler(vha, &ea);
672
673 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
674 sp->u.iocb_cmd.u.mbx.in_dma);
675
25ff6af1 676 sp->free(sp);
726b8548
QT
677}
678
a5d42f4c
DG
679static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
680{
681 struct qla_work_evt *e;
682
683 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
684 if (!e)
685 return QLA_FUNCTION_FAILED;
686
687 e->u.fcport.fcport = fcport;
688
689 return qla2x00_post_work(vha, e);
690}
691
692static void
693qla2x00_async_prli_sp_done(void *ptr, int res)
694{
695 srb_t *sp = ptr;
696 struct scsi_qla_host *vha = sp->vha;
697 struct srb_iocb *lio = &sp->u.iocb_cmd;
698 struct event_arg ea;
699
700 ql_dbg(ql_dbg_disc, vha, 0x2129,
701 "%s %8phC res %d \n", __func__,
702 sp->fcport->port_name, res);
703
704 sp->fcport->flags &= ~FCF_ASYNC_SENT;
705
706 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
707 memset(&ea, 0, sizeof(ea));
708 ea.event = FCME_PRLI_DONE;
709 ea.fcport = sp->fcport;
710 ea.data[0] = lio->u.logio.data[0];
711 ea.data[1] = lio->u.logio.data[1];
712 ea.iop[0] = lio->u.logio.iop[0];
713 ea.iop[1] = lio->u.logio.iop[1];
714 ea.sp = sp;
715
716 qla2x00_fcport_event_handler(vha, &ea);
717 }
718
719 sp->free(sp);
720}
721
722int
723qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
724{
725 srb_t *sp;
726 struct srb_iocb *lio;
727 int rval = QLA_FUNCTION_FAILED;
728
729 if (!vha->flags.online)
730 return rval;
731
732 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
733 fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
734 fcport->fw_login_state == DSC_LS_PRLI_PEND)
735 return rval;
736
737 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
738 if (!sp)
739 return rval;
740
741 fcport->flags |= FCF_ASYNC_SENT;
742 fcport->logout_completed = 0;
743
744 sp->type = SRB_PRLI_CMD;
745 sp->name = "prli";
746 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
747
748 lio = &sp->u.iocb_cmd;
749 lio->timeout = qla2x00_async_iocb_timeout;
750 sp->done = qla2x00_async_prli_sp_done;
751 lio->u.logio.flags = 0;
752
753 if (fcport->fc4f_nvme)
754 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
755
756 rval = qla2x00_start_sp(sp);
757 if (rval != QLA_SUCCESS) {
758 fcport->flags &= ~FCF_ASYNC_SENT;
759 fcport->flags |= FCF_LOGIN_NEEDED;
760 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
761 goto done_free_sp;
762 }
763
764 ql_dbg(ql_dbg_disc, vha, 0x211b,
765 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
766 fcport->port_name, sp->handle, fcport->loop_id,
767 fcport->d_id.b24, fcport->login_retry);
768
769 return rval;
770
771done_free_sp:
772 sp->free(sp);
773 fcport->flags &= ~FCF_ASYNC_SENT;
774 return rval;
775}
776
726b8548
QT
777static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
778 u8 opt)
779{
780 struct qla_work_evt *e;
781
782 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
783 if (!e)
784 return QLA_FUNCTION_FAILED;
785
786 e->u.fcport.fcport = fcport;
787 e->u.fcport.opt = opt;
788 return qla2x00_post_work(vha, e);
789}
790
791int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
792{
793 srb_t *sp;
794 struct srb_iocb *mbx;
795 int rval = QLA_FUNCTION_FAILED;
796 u16 *mb;
797 dma_addr_t pd_dma;
798 struct port_database_24xx *pd;
799 struct qla_hw_data *ha = vha->hw;
800
801 if (!vha->flags.online)
802 goto done;
803
804 fcport->flags |= FCF_ASYNC_SENT;
805 fcport->disc_state = DSC_GPDB;
806
807 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
808 if (!sp)
809 goto done;
810
811 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
812 if (pd == NULL) {
83548fe2
QT
813 ql_log(ql_log_warn, vha, 0xd043,
814 "Failed to allocate port database structure.\n");
726b8548
QT
815 goto done_free_sp;
816 }
817 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
818
819 sp->type = SRB_MB_IOCB;
820 sp->name = "gpdb";
821 sp->gen1 = fcport->rscn_gen;
822 sp->gen2 = fcport->login_gen;
823 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
824
825 mb = sp->u.iocb_cmd.u.mbx.out_mb;
826 mb[0] = MBC_GET_PORT_DATABASE;
827 mb[1] = fcport->loop_id;
828 mb[2] = MSW(pd_dma);
829 mb[3] = LSW(pd_dma);
830 mb[6] = MSW(MSD(pd_dma));
831 mb[7] = LSW(MSD(pd_dma));
832 mb[9] = vha->vp_idx;
833 mb[10] = opt;
834
835 mbx = &sp->u.iocb_cmd;
836 mbx->timeout = qla2x00_async_iocb_timeout;
837 mbx->u.mbx.in = (void *)pd;
838 mbx->u.mbx.in_dma = pd_dma;
839
840 sp->done = qla24xx_async_gpdb_sp_done;
841
842 rval = qla2x00_start_sp(sp);
843 if (rval != QLA_SUCCESS)
844 goto done_free_sp;
845
83548fe2
QT
846 ql_dbg(ql_dbg_disc, vha, 0x20dc,
847 "Async-%s %8phC hndl %x opt %x\n",
848 sp->name, fcport->port_name, sp->handle, opt);
726b8548
QT
849
850 return rval;
851
852done_free_sp:
853 if (pd)
854 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
855
25ff6af1 856 sp->free(sp);
726b8548
QT
857done:
858 fcport->flags &= ~FCF_ASYNC_SENT;
859 qla24xx_post_gpdb_work(vha, fcport, opt);
5ff1d584
AV
860 return rval;
861}
862
726b8548
QT
863static
864void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
865{
866 int rval = ea->rc;
867 fc_port_t *fcport = ea->fcport;
868 unsigned long flags;
869
870 fcport->flags &= ~FCF_ASYNC_SENT;
871
83548fe2 872 ql_dbg(ql_dbg_disc, vha, 0x20d2,
726b8548
QT
873 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
874 fcport->disc_state, fcport->fw_login_state, rval);
875
876 if (ea->sp->gen2 != fcport->login_gen) {
877 /* target side must have changed it. */
83548fe2 878 ql_dbg(ql_dbg_disc, vha, 0x20d3,
726b8548
QT
879 "%s %8phC generation changed rscn %d|%d login %d|%d \n",
880 __func__, fcport->port_name, fcport->last_rscn_gen,
881 fcport->rscn_gen, fcport->last_login_gen,
882 fcport->login_gen);
883 return;
884 } else if (ea->sp->gen1 != fcport->rscn_gen) {
83548fe2 885 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
726b8548
QT
886 __func__, __LINE__, fcport->port_name);
887 qla24xx_post_gidpn_work(vha, fcport);
888 return;
889 }
890
891 if (rval != QLA_SUCCESS) {
83548fe2 892 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
726b8548
QT
893 __func__, __LINE__, fcport->port_name);
894 qlt_schedule_sess_for_deletion_lock(fcport);
895 return;
896 }
897
898 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
899 ea->fcport->login_gen++;
900 ea->fcport->deleted = 0;
901 ea->fcport->logout_on_delete = 1;
902
903 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
904 vha->fcport_count++;
905 ea->fcport->login_succ = 1;
906
907 if (!IS_IIDMA_CAPABLE(vha->hw) ||
908 !vha->hw->flags.gpsc_supported) {
83548fe2 909 ql_dbg(ql_dbg_disc, vha, 0x20d6,
726b8548
QT
910 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
911 __func__, __LINE__, fcport->port_name,
912 vha->fcport_count);
913
914 qla24xx_post_upd_fcport_work(vha, fcport);
915 } else {
83548fe2 916 ql_dbg(ql_dbg_disc, vha, 0x20d7,
726b8548
QT
917 "%s %d %8phC post gpsc fcp_cnt %d\n",
918 __func__, __LINE__, fcport->port_name,
919 vha->fcport_count);
920
921 qla24xx_post_gpsc_work(vha, fcport);
922 }
923 }
924 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
925} /* gpdb event */
926
927int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
928{
929 if (fcport->login_retry == 0)
930 return 0;
931
932 if (fcport->scan_state != QLA_FCPORT_FOUND)
933 return 0;
934
83548fe2 935 ql_dbg(ql_dbg_disc, vha, 0x20d8,
726b8548
QT
936 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
937 __func__, fcport->port_name, fcport->disc_state,
938 fcport->fw_login_state, fcport->login_pause, fcport->flags,
939 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
940 fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
941 fcport->loop_id);
942
943 fcport->login_retry--;
944
945 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
946 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
947 return 0;
948
5b33469a
QT
949 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
950 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
951 return 0;
952 }
953
726b8548
QT
954 /* for pure Target Mode. Login will not be initiated */
955 if (vha->host->active_mode == MODE_TARGET)
956 return 0;
957
958 if (fcport->flags & FCF_ASYNC_SENT) {
959 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
960 return 0;
961 }
962
963 switch (fcport->disc_state) {
964 case DSC_DELETED:
965 if (fcport->loop_id == FC_NO_LOOP_ID) {
83548fe2
QT
966 ql_dbg(ql_dbg_disc, vha, 0x20bd,
967 "%s %d %8phC post gnl\n",
968 __func__, __LINE__, fcport->port_name);
726b8548
QT
969 qla24xx_async_gnl(vha, fcport);
970 } else {
83548fe2
QT
971 ql_dbg(ql_dbg_disc, vha, 0x20bf,
972 "%s %d %8phC post login\n",
973 __func__, __LINE__, fcport->port_name);
726b8548
QT
974 fcport->disc_state = DSC_LOGIN_PEND;
975 qla2x00_post_async_login_work(vha, fcport, NULL);
976 }
977 break;
978
979 case DSC_GNL:
980 if (fcport->login_pause) {
981 fcport->last_rscn_gen = fcport->rscn_gen;
982 fcport->last_login_gen = fcport->login_gen;
983 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
984 break;
985 }
986
987 if (fcport->flags & FCF_FCP2_DEVICE) {
988 u8 opt = PDO_FORCE_ADISC;
989
83548fe2
QT
990 ql_dbg(ql_dbg_disc, vha, 0x20c9,
991 "%s %d %8phC post gpdb\n",
992 __func__, __LINE__, fcport->port_name);
726b8548
QT
993
994 fcport->disc_state = DSC_GPDB;
995 qla24xx_post_gpdb_work(vha, fcport, opt);
996 } else {
83548fe2
QT
997 ql_dbg(ql_dbg_disc, vha, 0x20cf,
998 "%s %d %8phC post login\n",
999 __func__, __LINE__, fcport->port_name);
726b8548
QT
1000 fcport->disc_state = DSC_LOGIN_PEND;
1001 qla2x00_post_async_login_work(vha, fcport, NULL);
1002 }
1003
1004 break;
1005
1006 case DSC_LOGIN_FAILED:
83548fe2
QT
1007 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1008 "%s %d %8phC post gidpn\n",
1009 __func__, __LINE__, fcport->port_name);
726b8548
QT
1010
1011 qla24xx_post_gidpn_work(vha, fcport);
1012 break;
1013
1014 case DSC_LOGIN_COMPLETE:
1015 /* recheck login state */
83548fe2
QT
1016 ql_dbg(ql_dbg_disc, vha, 0x20d1,
1017 "%s %d %8phC post gpdb\n",
1018 __func__, __LINE__, fcport->port_name);
726b8548
QT
1019
1020 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
1021 break;
1022
1023 default:
1024 break;
1025 }
1026
1027 return 0;
1028}
1029
1030static
1031void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1032{
1033 fcport->rscn_gen++;
1034
83548fe2
QT
1035 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
1036 "%s %8phC DS %d LS %d\n",
1037 __func__, fcport->port_name, fcport->disc_state,
1038 fcport->fw_login_state);
726b8548
QT
1039
1040 if (fcport->flags & FCF_ASYNC_SENT)
1041 return;
1042
1043 switch (fcport->disc_state) {
1044 case DSC_DELETED:
1045 case DSC_LOGIN_COMPLETE:
1046 qla24xx_post_gidpn_work(fcport->vha, fcport);
1047 break;
1048
1049 default:
1050 break;
1051 }
1052}
1053
1054int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1055 u8 *port_name, void *pla)
1056{
1057 struct qla_work_evt *e;
1058 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1059 if (!e)
1060 return QLA_FUNCTION_FAILED;
1061
1062 e->u.new_sess.id = *id;
1063 e->u.new_sess.pla = pla;
1064 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1065
1066 return qla2x00_post_work(vha, e);
1067}
1068
1069static
1070int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
1071 struct event_arg *ea)
1072{
1073 fc_port_t *fcport = ea->fcport;
1074
1075 if (test_bit(UNLOADING, &vha->dpc_flags))
1076 return 0;
1077
1078 switch (vha->host->active_mode) {
1079 case MODE_INITIATOR:
1080 case MODE_DUAL:
1081 if (fcport->scan_state == QLA_FCPORT_FOUND)
1082 qla24xx_fcport_handle_login(vha, fcport);
1083 break;
1084
1085 case MODE_TARGET:
1086 default:
1087 /* no-op */
1088 break;
1089 }
1090
1091 return 0;
1092}
1093
1094static
1095void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1096 struct event_arg *ea)
1097{
1098 fc_port_t *fcport = ea->fcport;
1099
1100 if (fcport->scan_state != QLA_FCPORT_FOUND) {
1101 fcport->login_retry++;
1102 return;
1103 }
1104
83548fe2
QT
1105 ql_dbg(ql_dbg_disc, vha, 0x2102,
1106 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1107 __func__, fcport->port_name, fcport->disc_state,
1108 fcport->fw_login_state, fcport->login_pause,
1109 fcport->deleted, fcport->conflict,
1110 fcport->last_rscn_gen, fcport->rscn_gen,
1111 fcport->last_login_gen, fcport->login_gen,
1112 fcport->flags);
726b8548
QT
1113
1114 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
1115 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1116 return;
1117
5b33469a
QT
1118 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1119 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
1120 return;
1121 }
1122
726b8548
QT
1123 if (fcport->flags & FCF_ASYNC_SENT) {
1124 fcport->login_retry++;
1125 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1126 return;
1127 }
1128
1129 if (fcport->disc_state == DSC_DELETE_PEND) {
1130 fcport->login_retry++;
1131 return;
1132 }
1133
1134 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 1135 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
726b8548
QT
1136 __func__, __LINE__, fcport->port_name);
1137
1138 qla24xx_async_gidpn(vha, fcport);
1139 return;
1140 }
1141
1142 qla24xx_fcport_handle_login(vha, fcport);
1143}
1144
41dc529a 1145void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1146{
41dc529a
QT
1147 fc_port_t *fcport, *f, *tf;
1148 uint32_t id = 0, mask, rid;
726b8548
QT
1149 int rc;
1150
b98ae0d7
QT
1151 switch (ea->event) {
1152 case FCME_RELOGIN:
1153 case FCME_RSCN:
1154 case FCME_GIDPN_DONE:
1155 case FCME_GPSC_DONE:
1156 case FCME_GPNID_DONE:
1157 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1158 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1159 return;
1160 break;
1161 default:
1162 break;
1163 }
1164
726b8548
QT
1165 switch (ea->event) {
1166 case FCME_RELOGIN:
1167 if (test_bit(UNLOADING, &vha->dpc_flags))
1168 return;
5ff1d584 1169
726b8548
QT
1170 qla24xx_handle_relogin_event(vha, ea);
1171 break;
1172 case FCME_RSCN:
1173 if (test_bit(UNLOADING, &vha->dpc_flags))
1174 return;
41dc529a
QT
1175 switch (ea->id.b.rsvd_1) {
1176 case RSCN_PORT_ADDR:
1177 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1178 if (!fcport) {
1179 /* cable moved */
1180 rc = qla24xx_post_gpnid_work(vha, &ea->id);
1181 if (rc) {
83548fe2
QT
1182 ql_log(ql_log_warn, vha, 0xd044,
1183 "RSCN GPNID work failed %02x%02x%02x\n",
1184 ea->id.b.domain, ea->id.b.area,
1185 ea->id.b.al_pa);
41dc529a
QT
1186 }
1187 } else {
1188 ea->fcport = fcport;
1189 qla24xx_handle_rscn_event(fcport, ea);
1190 }
1191 break;
1192 case RSCN_AREA_ADDR:
1193 case RSCN_DOM_ADDR:
1194 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1195 mask = 0xffff00;
83548fe2
QT
1196 ql_dbg(ql_dbg_async, vha, 0x5044,
1197 "RSCN: Area 0x%06x was affected\n",
1198 ea->id.b24);
41dc529a
QT
1199 } else {
1200 mask = 0xff0000;
83548fe2
QT
1201 ql_dbg(ql_dbg_async, vha, 0x507a,
1202 "RSCN: Domain 0x%06x was affected\n",
1203 ea->id.b24);
41dc529a 1204 }
726b8548 1205
41dc529a
QT
1206 rid = ea->id.b24 & mask;
1207 list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1208 list) {
1209 id = f->d_id.b24 & mask;
1210 if (rid == id) {
1211 ea->fcport = f;
1212 qla24xx_handle_rscn_event(f, ea);
1213 }
726b8548 1214 }
41dc529a
QT
1215 break;
1216 case RSCN_FAB_ADDR:
1217 default:
83548fe2
QT
1218 ql_log(ql_log_warn, vha, 0xd045,
1219 "RSCN: Fabric was affected. Addr format %d\n",
1220 ea->id.b.rsvd_1);
41dc529a
QT
1221 qla2x00_mark_all_devices_lost(vha, 1);
1222 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1223 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
726b8548
QT
1224 }
1225 break;
1226 case FCME_GIDPN_DONE:
1227 qla24xx_handle_gidpn_event(vha, ea);
1228 break;
1229 case FCME_GNL_DONE:
1230 qla24xx_handle_gnl_done_event(vha, ea);
1231 break;
1232 case FCME_GPSC_DONE:
1233 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1234 break;
1235 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1236 qla24xx_handle_plogi_done_event(vha, ea);
1237 break;
a5d42f4c
DG
1238 case FCME_PRLI_DONE:
1239 qla24xx_handle_prli_done_event(vha, ea);
1240 break;
726b8548
QT
1241 case FCME_GPDB_DONE:
1242 qla24xx_handle_gpdb_event(vha, ea);
1243 break;
1244 case FCME_GPNID_DONE:
1245 qla24xx_handle_gpnid_event(vha, ea);
1246 break;
1247 case FCME_DELETE_DONE:
1248 qla24xx_handle_delete_done_event(vha, ea);
1249 break;
1250 default:
1251 BUG_ON(1);
1252 break;
1253 }
5ff1d584
AV
1254}
1255
3822263e 1256static void
faef62d1 1257qla2x00_tmf_iocb_timeout(void *data)
3822263e 1258{
25ff6af1 1259 srb_t *sp = data;
faef62d1 1260 struct srb_iocb *tmf = &sp->u.iocb_cmd;
3822263e 1261
faef62d1
AB
1262 tmf->u.tmf.comp_status = CS_TIMEOUT;
1263 complete(&tmf->u.tmf.comp);
1264}
9ba56b95 1265
faef62d1 1266static void
25ff6af1 1267qla2x00_tmf_sp_done(void *ptr, int res)
faef62d1 1268{
25ff6af1 1269 srb_t *sp = ptr;
faef62d1 1270 struct srb_iocb *tmf = &sp->u.iocb_cmd;
25ff6af1 1271
faef62d1 1272 complete(&tmf->u.tmf.comp);
3822263e
MI
1273}
1274
1275int
faef62d1 1276qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
3822263e
MI
1277 uint32_t tag)
1278{
1279 struct scsi_qla_host *vha = fcport->vha;
faef62d1 1280 struct srb_iocb *tm_iocb;
3822263e 1281 srb_t *sp;
faef62d1 1282 int rval = QLA_FUNCTION_FAILED;
3822263e 1283
9ba56b95 1284 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
1285 if (!sp)
1286 goto done;
1287
faef62d1 1288 tm_iocb = &sp->u.iocb_cmd;
9ba56b95
GM
1289 sp->type = SRB_TM_CMD;
1290 sp->name = "tmf";
faef62d1
AB
1291 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1292 tm_iocb->u.tmf.flags = flags;
1293 tm_iocb->u.tmf.lun = lun;
1294 tm_iocb->u.tmf.data = tag;
1295 sp->done = qla2x00_tmf_sp_done;
1296 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1297 init_completion(&tm_iocb->u.tmf.comp);
3822263e
MI
1298
1299 rval = qla2x00_start_sp(sp);
1300 if (rval != QLA_SUCCESS)
1301 goto done_free_sp;
1302
7c3df132 1303 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
1304 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1305 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1306 fcport->d_id.b.area, fcport->d_id.b.al_pa);
faef62d1
AB
1307
1308 wait_for_completion(&tm_iocb->u.tmf.comp);
1309
1310 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1311 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1312
1313 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1314 ql_dbg(ql_dbg_taskm, vha, 0x8030,
1315 "TM IOCB failed (%x).\n", rval);
1316 }
1317
1318 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1319 flags = tm_iocb->u.tmf.flags;
1320 lun = (uint16_t)tm_iocb->u.tmf.lun;
1321
1322 /* Issue Marker IOCB */
1323 qla2x00_marker(vha, vha->hw->req_q_map[0],
1324 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1325 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1326 }
3822263e
MI
1327
1328done_free_sp:
25ff6af1 1329 sp->free(sp);
3822263e
MI
1330done:
1331 return rval;
1332}
1333
4440e46d
AB
1334static void
1335qla24xx_abort_iocb_timeout(void *data)
1336{
25ff6af1 1337 srb_t *sp = data;
4440e46d
AB
1338 struct srb_iocb *abt = &sp->u.iocb_cmd;
1339
1340 abt->u.abt.comp_status = CS_TIMEOUT;
1341 complete(&abt->u.abt.comp);
1342}
1343
1344static void
25ff6af1 1345qla24xx_abort_sp_done(void *ptr, int res)
4440e46d 1346{
25ff6af1 1347 srb_t *sp = ptr;
4440e46d
AB
1348 struct srb_iocb *abt = &sp->u.iocb_cmd;
1349
1350 complete(&abt->u.abt.comp);
1351}
1352
15f30a57 1353int
4440e46d
AB
1354qla24xx_async_abort_cmd(srb_t *cmd_sp)
1355{
25ff6af1 1356 scsi_qla_host_t *vha = cmd_sp->vha;
4440e46d
AB
1357 fc_port_t *fcport = cmd_sp->fcport;
1358 struct srb_iocb *abt_iocb;
1359 srb_t *sp;
1360 int rval = QLA_FUNCTION_FAILED;
1361
1362 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1363 if (!sp)
1364 goto done;
1365
1366 abt_iocb = &sp->u.iocb_cmd;
1367 sp->type = SRB_ABT_CMD;
1368 sp->name = "abort";
1369 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1370 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1371 sp->done = qla24xx_abort_sp_done;
1372 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1373 init_completion(&abt_iocb->u.abt.comp);
1374
1375 rval = qla2x00_start_sp(sp);
1376 if (rval != QLA_SUCCESS)
1377 goto done_free_sp;
1378
1379 ql_dbg(ql_dbg_async, vha, 0x507c,
1380 "Abort command issued - hdl=%x, target_id=%x\n",
1381 cmd_sp->handle, fcport->tgt_id);
1382
1383 wait_for_completion(&abt_iocb->u.abt.comp);
1384
1385 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1386 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1387
1388done_free_sp:
25ff6af1 1389 sp->free(sp);
4440e46d
AB
1390done:
1391 return rval;
1392}
1393
1394int
1395qla24xx_async_abort_command(srb_t *sp)
1396{
1397 unsigned long flags = 0;
1398
1399 uint32_t handle;
1400 fc_port_t *fcport = sp->fcport;
1401 struct scsi_qla_host *vha = fcport->vha;
1402 struct qla_hw_data *ha = vha->hw;
1403 struct req_que *req = vha->req;
1404
1405 spin_lock_irqsave(&ha->hardware_lock, flags);
1406 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1407 if (req->outstanding_cmds[handle] == sp)
1408 break;
1409 }
1410 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1411 if (handle == req->num_outstanding_cmds) {
1412 /* Command not found. */
1413 return QLA_FUNCTION_FAILED;
1414 }
1415 if (sp->type == SRB_FXIOCB_DCMD)
1416 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1417 FXDISC_ABORT_IOCTL);
1418
1419 return qla24xx_async_abort_cmd(sp);
1420}
1421
a5d42f4c
DG
1422static void
1423qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1424{
1425 switch (ea->data[0]) {
1426 case MBS_COMMAND_COMPLETE:
1427 ql_dbg(ql_dbg_disc, vha, 0x2118,
1428 "%s %d %8phC post gpdb\n",
1429 __func__, __LINE__, ea->fcport->port_name);
1430
1431 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1432 ea->fcport->logout_on_delete = 1;
1433 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1434 break;
1435 default:
1436 ql_dbg(ql_dbg_disc, vha, 0x2119,
1437 "%s %d %8phC unhandle event of %x\n",
1438 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1439 break;
1440 }
1441}
1442
726b8548
QT
1443static void
1444qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ac280b67 1445{
726b8548 1446 port_id_t cid; /* conflict Nport id */
ac280b67 1447
726b8548 1448 switch (ea->data[0]) {
ac280b67 1449 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
1450 /*
1451 * Driver must validate login state - If PRLI not complete,
1452 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1453 * requests.
1454 */
a5d42f4c
DG
1455 if (ea->fcport->fc4f_nvme) {
1456 ql_dbg(ql_dbg_disc, vha, 0x2117,
1457 "%s %d %8phC post prli\n",
1458 __func__, __LINE__, ea->fcport->port_name);
1459 qla24xx_post_prli_work(vha, ea->fcport);
1460 } else {
1461 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1462 "%s %d %8phC post gpdb\n",
1463 __func__, __LINE__, ea->fcport->port_name);
1464 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1465 ea->fcport->logout_on_delete = 1;
1466 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1467 }
ac280b67
AV
1468 break;
1469 case MBS_COMMAND_ERROR:
83548fe2 1470 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
726b8548
QT
1471 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1472
1473 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1474 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1475 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
ac280b67
AV
1476 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1477 else
726b8548 1478 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
ac280b67
AV
1479 break;
1480 case MBS_LOOP_ID_USED:
726b8548
QT
1481 /* data[1] = IO PARAM 1 = nport ID */
1482 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1483 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1484 cid.b.al_pa = ea->iop[1] & 0xff;
1485 cid.b.rsvd_1 = 0;
1486
83548fe2
QT
1487 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1488 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1489 __func__, __LINE__, ea->fcport->port_name,
1490 ea->fcport->loop_id);
726b8548
QT
1491
1492 if (IS_SW_RESV_ADDR(cid)) {
1493 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1494 ea->fcport->loop_id = FC_NO_LOOP_ID;
1495 } else {
1496 qla2x00_clear_loop_id(ea->fcport);
ac280b67 1497 }
726b8548
QT
1498 qla24xx_post_gnl_work(vha, ea->fcport);
1499 break;
1500 case MBS_PORT_ID_USED:
83548fe2
QT
1501 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1502 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1503 __func__, __LINE__, ea->fcport->port_name,
1504 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1505 ea->fcport->d_id.b.al_pa);
726b8548
QT
1506
1507 qla2x00_clear_loop_id(ea->fcport);
1508 qla24xx_post_gidpn_work(vha, ea->fcport);
ac280b67
AV
1509 break;
1510 }
4916392b 1511 return;
ac280b67
AV
1512}
1513
4916392b 1514void
ac280b67
AV
1515qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1516 uint16_t *data)
1517{
726b8548 1518 qla2x00_mark_device_lost(vha, fcport, 1, 0);
a6ca8878 1519 qlt_logo_completion_handler(fcport, data[0]);
726b8548 1520 fcport->login_gen++;
4916392b 1521 return;
ac280b67
AV
1522}
1523
4916392b 1524void
5ff1d584
AV
1525qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1526 uint16_t *data)
1527{
1528 if (data[0] == MBS_COMMAND_COMPLETE) {
1529 qla2x00_update_fcport(vha, fcport);
1530
4916392b 1531 return;
5ff1d584
AV
1532 }
1533
1534 /* Retry login. */
1535 fcport->flags &= ~FCF_ASYNC_SENT;
1536 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1537 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1538 else
80d79440 1539 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5ff1d584 1540
4916392b 1541 return;
5ff1d584
AV
1542}
1543
1da177e4
LT
1544/****************************************************************************/
1545/* QLogic ISP2x00 Hardware Support Functions. */
1546/****************************************************************************/
1547
fa492630 1548static int
7d613ac6
SV
1549qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1550{
1551 int rval = QLA_SUCCESS;
1552 struct qla_hw_data *ha = vha->hw;
1553 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 1554 uint16_t config[4];
7d613ac6
SV
1555
1556 qla83xx_idc_lock(vha, 0);
1557
1558 /* SV: TODO: Assign initialization timeout from
1559 * flash-info / other param
1560 */
1561 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1562 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1563
1564 /* Set our fcoe function presence */
1565 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1566 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1567 "Error while setting DRV-Presence.\n");
1568 rval = QLA_FUNCTION_FAILED;
1569 goto exit;
1570 }
1571
1572 /* Decide the reset ownership */
1573 qla83xx_reset_ownership(vha);
1574
1575 /*
1576 * On first protocol driver load:
1577 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1578 * register.
1579 * Others: Check compatibility with current IDC Major version.
1580 */
1581 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1582 if (ha->flags.nic_core_reset_owner) {
1583 /* Set IDC Major version */
1584 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1585 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1586
1587 /* Clearing IDC-Lock-Recovery register */
1588 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1589 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1590 /*
1591 * Clear further IDC participation if we are not compatible with
1592 * the current IDC Major Version.
1593 */
1594 ql_log(ql_log_warn, vha, 0xb07d,
1595 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1596 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1597 __qla83xx_clear_drv_presence(vha);
1598 rval = QLA_FUNCTION_FAILED;
1599 goto exit;
1600 }
1601 /* Each function sets its supported Minor version. */
1602 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1603 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1604 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1605
711aa7f7
SK
1606 if (ha->flags.nic_core_reset_owner) {
1607 memset(config, 0, sizeof(config));
1608 if (!qla81xx_get_port_config(vha, config))
1609 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1610 QLA8XXX_DEV_READY);
1611 }
1612
7d613ac6
SV
1613 rval = qla83xx_idc_state_handler(vha);
1614
1615exit:
1616 qla83xx_idc_unlock(vha, 0);
1617
1618 return rval;
1619}
1620
1da177e4
LT
1621/*
1622* qla2x00_initialize_adapter
1623* Initialize board.
1624*
1625* Input:
1626* ha = adapter block pointer.
1627*
1628* Returns:
1629* 0 = success
1630*/
1631int
e315cd28 1632qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
1633{
1634 int rval;
e315cd28 1635 struct qla_hw_data *ha = vha->hw;
73208dfd 1636 struct req_que *req = ha->req_q_map[0];
2533cf67 1637
fc90adaf
JC
1638 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1639 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1640
1da177e4 1641 /* Clear adapter flags. */
e315cd28 1642 vha->flags.online = 0;
2533cf67 1643 ha->flags.chip_reset_done = 0;
e315cd28 1644 vha->flags.reset_active = 0;
85880801
AV
1645 ha->flags.pci_channel_io_perm_failure = 0;
1646 ha->flags.eeh_busy = 0;
fabbb8df 1647 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
e315cd28
AC
1648 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1649 atomic_set(&vha->loop_state, LOOP_DOWN);
1650 vha->device_flags = DFLG_NO_CABLE;
1651 vha->dpc_flags = 0;
1652 vha->flags.management_server_logged_in = 0;
1653 vha->marker_needed = 0;
1da177e4
LT
1654 ha->isp_abort_cnt = 0;
1655 ha->beacon_blink_led = 0;
1656
73208dfd
AC
1657 set_bit(0, ha->req_qid_map);
1658 set_bit(0, ha->rsp_qid_map);
1659
cfb0919c 1660 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 1661 "Configuring PCI space...\n");
e315cd28 1662 rval = ha->isp_ops->pci_config(vha);
1da177e4 1663 if (rval) {
7c3df132
SK
1664 ql_log(ql_log_warn, vha, 0x0044,
1665 "Unable to configure PCI space.\n");
1da177e4
LT
1666 return (rval);
1667 }
1668
e315cd28 1669 ha->isp_ops->reset_chip(vha);
1da177e4 1670
e315cd28 1671 rval = qla2xxx_get_flash_info(vha);
c00d8994 1672 if (rval) {
7c3df132
SK
1673 ql_log(ql_log_fatal, vha, 0x004f,
1674 "Unable to validate FLASH data.\n");
7ec0effd
AD
1675 return rval;
1676 }
1677
1678 if (IS_QLA8044(ha)) {
1679 qla8044_read_reset_template(vha);
1680
1681 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1682 * If DONRESET_BIT0 is set, drivers should not set dev_state
1683 * to NEED_RESET. But if NEED_RESET is set, drivers should
1684 * should honor the reset. */
1685 if (ql2xdontresethba == 1)
1686 qla8044_set_idc_dontreset(vha);
c00d8994
AV
1687 }
1688
73208dfd 1689 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 1690 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 1691 "Configure NVRAM parameters...\n");
0107109e 1692
e315cd28 1693 ha->isp_ops->nvram_config(vha);
1da177e4 1694
d4c760c2
AV
1695 if (ha->flags.disable_serdes) {
1696 /* Mask HBA via NVRAM settings? */
7c3df132 1697 ql_log(ql_log_info, vha, 0x0077,
7b833558 1698 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
d4c760c2
AV
1699 return QLA_FUNCTION_FAILED;
1700 }
1701
cfb0919c 1702 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 1703 "Verifying loaded RISC code...\n");
1da177e4 1704
e315cd28
AC
1705 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1706 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
1707 if (rval)
1708 return (rval);
e315cd28 1709 rval = qla2x00_setup_chip(vha);
d19044c3
AV
1710 if (rval)
1711 return (rval);
1da177e4 1712 }
a9083016 1713
4d4df193 1714 if (IS_QLA84XX(ha)) {
e315cd28 1715 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 1716 if (!ha->cs84xx) {
7c3df132 1717 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
1718 "Unable to configure ISP84XX.\n");
1719 return QLA_FUNCTION_FAILED;
1720 }
1721 }
2d70c103 1722
ead03855 1723 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2d70c103
NB
1724 rval = qla2x00_init_rings(vha);
1725
2533cf67 1726 ha->flags.chip_reset_done = 1;
1da177e4 1727
9a069e19 1728 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 1729 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
1730 rval = qla84xx_init_chip(vha);
1731 if (rval != QLA_SUCCESS) {
7c3df132
SK
1732 ql_log(ql_log_warn, vha, 0x00d4,
1733 "Unable to initialize ISP84XX.\n");
8d2b21db 1734 qla84xx_put_chip(vha);
9a069e19
GM
1735 }
1736 }
1737
7d613ac6
SV
1738 /* Load the NIC Core f/w if we are the first protocol driver. */
1739 if (IS_QLA8031(ha)) {
1740 rval = qla83xx_nic_core_fw_load(vha);
1741 if (rval)
1742 ql_log(ql_log_warn, vha, 0x0124,
1743 "Error in initializing NIC Core f/w.\n");
1744 }
1745
2f0f3f4f
MI
1746 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
1747 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 1748
c46e65c7
JC
1749 if (IS_P3P_TYPE(ha))
1750 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
1751 else
1752 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
1753
1da177e4
LT
1754 return (rval);
1755}
1756
1757/**
abbd8870 1758 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1da177e4
LT
1759 * @ha: HA context
1760 *
1761 * Returns 0 on success.
1762 */
abbd8870 1763int
e315cd28 1764qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 1765{
a157b101 1766 uint16_t w;
abbd8870 1767 unsigned long flags;
e315cd28 1768 struct qla_hw_data *ha = vha->hw;
3d71644c 1769 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1770
1da177e4 1771 pci_set_master(ha->pdev);
af6177d8 1772 pci_try_set_mwi(ha->pdev);
1da177e4 1773
1da177e4 1774 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1775 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
1776 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1777
737faece 1778 pci_disable_rom(ha->pdev);
1da177e4
LT
1779
1780 /* Get PCI bus information. */
1781 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 1782 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
1783 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1784
abbd8870
AV
1785 return QLA_SUCCESS;
1786}
1da177e4 1787
abbd8870
AV
1788/**
1789 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
1790 * @ha: HA context
1791 *
1792 * Returns 0 on success.
1793 */
1794int
e315cd28 1795qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 1796{
a157b101 1797 uint16_t w;
abbd8870
AV
1798 unsigned long flags = 0;
1799 uint32_t cnt;
e315cd28 1800 struct qla_hw_data *ha = vha->hw;
3d71644c 1801 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1802
abbd8870 1803 pci_set_master(ha->pdev);
af6177d8 1804 pci_try_set_mwi(ha->pdev);
1da177e4 1805
abbd8870 1806 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1807 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 1808
abbd8870
AV
1809 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1810 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 1811 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 1812
abbd8870
AV
1813 /*
1814 * If this is a 2300 card and not 2312, reset the
1815 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
1816 * the 2310 also reports itself as a 2300 so we need to get the
1817 * fb revision level -- a 6 indicates it really is a 2300 and
1818 * not a 2310.
1819 */
1820 if (IS_QLA2300(ha)) {
1821 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 1822
abbd8870 1823 /* Pause RISC. */
3d71644c 1824 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 1825 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 1826 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 1827 break;
1da177e4 1828
abbd8870
AV
1829 udelay(10);
1830 }
1da177e4 1831
abbd8870 1832 /* Select FPM registers. */
3d71644c
AV
1833 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1834 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1835
1836 /* Get the fb rev level */
3d71644c 1837 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
1838
1839 if (ha->fb_rev == FPM_2300)
a157b101 1840 pci_clear_mwi(ha->pdev);
abbd8870
AV
1841
1842 /* Deselect FPM registers. */
3d71644c
AV
1843 WRT_REG_WORD(&reg->ctrl_status, 0x0);
1844 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1845
1846 /* Release RISC module. */
3d71644c 1847 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 1848 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 1849 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
1850 break;
1851
1852 udelay(10);
1da177e4 1853 }
1da177e4 1854
abbd8870
AV
1855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1856 }
1da177e4 1857
abbd8870
AV
1858 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1859
737faece 1860 pci_disable_rom(ha->pdev);
1da177e4 1861
abbd8870
AV
1862 /* Get PCI bus information. */
1863 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 1864 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1865 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1866
1867 return QLA_SUCCESS;
1da177e4
LT
1868}
1869
0107109e
AV
1870/**
1871 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
1872 * @ha: HA context
1873 *
1874 * Returns 0 on success.
1875 */
1876int
e315cd28 1877qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 1878{
a157b101 1879 uint16_t w;
0107109e 1880 unsigned long flags = 0;
e315cd28 1881 struct qla_hw_data *ha = vha->hw;
0107109e 1882 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
1883
1884 pci_set_master(ha->pdev);
af6177d8 1885 pci_try_set_mwi(ha->pdev);
0107109e
AV
1886
1887 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1888 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
1889 w &= ~PCI_COMMAND_INTX_DISABLE;
1890 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1891
1892 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1893
1894 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
1895 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
1896 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
1897
1898 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 1899 if (pci_is_pcie(ha->pdev))
5ffd3a52 1900 pcie_set_readrq(ha->pdev, 4096);
0107109e 1901
737faece 1902 pci_disable_rom(ha->pdev);
0107109e 1903
44c10138 1904 ha->chip_revision = ha->pdev->revision;
a8488abe 1905
0107109e
AV
1906 /* Get PCI bus information. */
1907 spin_lock_irqsave(&ha->hardware_lock, flags);
1908 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
1909 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1910
1911 return QLA_SUCCESS;
1912}
1913
c3a2f0df
AV
1914/**
1915 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
1916 * @ha: HA context
1917 *
1918 * Returns 0 on success.
1919 */
1920int
e315cd28 1921qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
1922{
1923 uint16_t w;
e315cd28 1924 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
1925
1926 pci_set_master(ha->pdev);
1927 pci_try_set_mwi(ha->pdev);
1928
1929 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1930 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1931 w &= ~PCI_COMMAND_INTX_DISABLE;
1932 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1933
1934 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 1935 if (pci_is_pcie(ha->pdev))
5ffd3a52 1936 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 1937
737faece 1938 pci_disable_rom(ha->pdev);
c3a2f0df
AV
1939
1940 ha->chip_revision = ha->pdev->revision;
1941
1942 return QLA_SUCCESS;
1943}
1944
1da177e4
LT
1945/**
1946 * qla2x00_isp_firmware() - Choose firmware image.
1947 * @ha: HA context
1948 *
1949 * Returns 0 on success.
1950 */
1951static int
e315cd28 1952qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
1953{
1954 int rval;
42e421b1
AV
1955 uint16_t loop_id, topo, sw_cap;
1956 uint8_t domain, area, al_pa;
e315cd28 1957 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1958
1959 /* Assume loading risc code */
fa2a1ce5 1960 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
1961
1962 if (ha->flags.disable_risc_code_load) {
7c3df132 1963 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
1964
1965 /* Verify checksum of loaded RISC code. */
e315cd28 1966 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
1967 if (rval == QLA_SUCCESS) {
1968 /* And, verify we are not in ROM code. */
e315cd28 1969 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
1970 &area, &domain, &topo, &sw_cap);
1971 }
1da177e4
LT
1972 }
1973
7c3df132
SK
1974 if (rval)
1975 ql_dbg(ql_dbg_init, vha, 0x007a,
1976 "**** Load RISC code ****.\n");
1da177e4
LT
1977
1978 return (rval);
1979}
1980
1981/**
1982 * qla2x00_reset_chip() - Reset ISP chip.
1983 * @ha: HA context
1984 *
1985 * Returns 0 on success.
1986 */
abbd8870 1987void
e315cd28 1988qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
1989{
1990 unsigned long flags = 0;
e315cd28 1991 struct qla_hw_data *ha = vha->hw;
3d71644c 1992 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1993 uint32_t cnt;
1da177e4
LT
1994 uint16_t cmd;
1995
85880801
AV
1996 if (unlikely(pci_channel_offline(ha->pdev)))
1997 return;
1998
fd34f556 1999 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
2000
2001 spin_lock_irqsave(&ha->hardware_lock, flags);
2002
2003 /* Turn off master enable */
2004 cmd = 0;
2005 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2006 cmd &= ~PCI_COMMAND_MASTER;
2007 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2008
2009 if (!IS_QLA2100(ha)) {
2010 /* Pause RISC. */
2011 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2012 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2013 for (cnt = 0; cnt < 30000; cnt++) {
2014 if ((RD_REG_WORD(&reg->hccr) &
2015 HCCR_RISC_PAUSE) != 0)
2016 break;
2017 udelay(100);
2018 }
2019 } else {
2020 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2021 udelay(10);
2022 }
2023
2024 /* Select FPM registers. */
2025 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2026 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2027
2028 /* FPM Soft Reset. */
2029 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2030 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2031
2032 /* Toggle Fpm Reset. */
2033 if (!IS_QLA2200(ha)) {
2034 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2035 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2036 }
2037
2038 /* Select frame buffer registers. */
2039 WRT_REG_WORD(&reg->ctrl_status, 0x10);
2040 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2041
2042 /* Reset frame buffer FIFOs. */
2043 if (IS_QLA2200(ha)) {
2044 WRT_FB_CMD_REG(ha, reg, 0xa000);
2045 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2046 } else {
2047 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2048
2049 /* Read back fb_cmd until zero or 3 seconds max */
2050 for (cnt = 0; cnt < 3000; cnt++) {
2051 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2052 break;
2053 udelay(100);
2054 }
2055 }
2056
2057 /* Select RISC module registers. */
2058 WRT_REG_WORD(&reg->ctrl_status, 0);
2059 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2060
2061 /* Reset RISC processor. */
2062 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2063 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2064
2065 /* Release RISC processor. */
2066 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2067 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2068 }
2069
2070 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2071 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2072
2073 /* Reset ISP chip. */
2074 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2075
2076 /* Wait for RISC to recover from reset. */
2077 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2078 /*
2079 * It is necessary to for a delay here since the card doesn't
2080 * respond to PCI reads during a reset. On some architectures
2081 * this will result in an MCA.
2082 */
2083 udelay(20);
2084 for (cnt = 30000; cnt; cnt--) {
2085 if ((RD_REG_WORD(&reg->ctrl_status) &
2086 CSR_ISP_SOFT_RESET) == 0)
2087 break;
2088 udelay(100);
2089 }
2090 } else
2091 udelay(10);
2092
2093 /* Reset RISC processor. */
2094 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2095
2096 WRT_REG_WORD(&reg->semaphore, 0);
2097
2098 /* Release RISC processor. */
2099 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2100 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2101
2102 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2103 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 2104 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 2105 break;
1da177e4
LT
2106
2107 udelay(100);
2108 }
2109 } else
2110 udelay(100);
2111
2112 /* Turn on master enable */
2113 cmd |= PCI_COMMAND_MASTER;
2114 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2115
2116 /* Disable RISC pause on FPM parity error. */
2117 if (!IS_QLA2100(ha)) {
2118 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2119 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2120 }
2121
2122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2123}
2124
b1d46989
MI
2125/**
2126 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2127 *
2128 * Returns 0 on success.
2129 */
fa492630 2130static int
b1d46989
MI
2131qla81xx_reset_mpi(scsi_qla_host_t *vha)
2132{
2133 uint16_t mb[4] = {0x1010, 0, 1, 0};
2134
6246b8a1
GM
2135 if (!IS_QLA81XX(vha->hw))
2136 return QLA_SUCCESS;
2137
b1d46989
MI
2138 return qla81xx_write_mpi_register(vha, mb);
2139}
2140
0107109e 2141/**
88c26663 2142 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
0107109e
AV
2143 * @ha: HA context
2144 *
2145 * Returns 0 on success.
2146 */
d14e72fb 2147static inline int
e315cd28 2148qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
2149{
2150 unsigned long flags = 0;
e315cd28 2151 struct qla_hw_data *ha = vha->hw;
0107109e 2152 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
52c82823 2153 uint32_t cnt;
335a1cc9 2154 uint16_t wd;
b1d46989 2155 static int abts_cnt; /* ISP abort retry counts */
d14e72fb 2156 int rval = QLA_SUCCESS;
0107109e 2157
0107109e
AV
2158 spin_lock_irqsave(&ha->hardware_lock, flags);
2159
2160 /* Reset RISC. */
2161 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2162 for (cnt = 0; cnt < 30000; cnt++) {
2163 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2164 break;
2165
2166 udelay(10);
2167 }
2168
d14e72fb
HM
2169 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2170 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2171
2172 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2173 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2174 RD_REG_DWORD(&reg->hccr),
2175 RD_REG_DWORD(&reg->ctrl_status),
2176 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2177
0107109e
AV
2178 WRT_REG_DWORD(&reg->ctrl_status,
2179 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 2180 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 2181
335a1cc9 2182 udelay(100);
d14e72fb 2183
88c26663 2184 /* Wait for firmware to complete NVRAM accesses. */
52c82823 2185 RD_REG_WORD(&reg->mailbox0);
d14e72fb
HM
2186 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2187 rval == QLA_SUCCESS; cnt--) {
88c26663 2188 barrier();
d14e72fb
HM
2189 if (cnt)
2190 udelay(5);
2191 else
2192 rval = QLA_FUNCTION_TIMEOUT;
88c26663
AV
2193 }
2194
d14e72fb
HM
2195 if (rval == QLA_SUCCESS)
2196 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2197
2198 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2199 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2200 RD_REG_DWORD(&reg->hccr),
2201 RD_REG_DWORD(&reg->mailbox0));
2202
335a1cc9 2203 /* Wait for soft-reset to complete. */
52c82823 2204 RD_REG_DWORD(&reg->ctrl_status);
200ffb15 2205 for (cnt = 0; cnt < 60; cnt++) {
0107109e 2206 barrier();
d14e72fb
HM
2207 if ((RD_REG_DWORD(&reg->ctrl_status) &
2208 CSRX_ISP_SOFT_RESET) == 0)
2209 break;
2210
2211 udelay(5);
0107109e 2212 }
d14e72fb
HM
2213 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2214 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2215
2216 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2217 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2218 RD_REG_DWORD(&reg->hccr),
2219 RD_REG_DWORD(&reg->ctrl_status));
0107109e 2220
b1d46989
MI
2221 /* If required, do an MPI FW reset now */
2222 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2223 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2224 if (++abts_cnt < 5) {
2225 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2226 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2227 } else {
2228 /*
2229 * We exhausted the ISP abort retries. We have to
2230 * set the board offline.
2231 */
2232 abts_cnt = 0;
2233 vha->flags.online = 0;
2234 }
2235 }
2236 }
2237
0107109e
AV
2238 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2239 RD_REG_DWORD(&reg->hccr);
2240
2241 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2242 RD_REG_DWORD(&reg->hccr);
2243
2244 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2245 RD_REG_DWORD(&reg->hccr);
2246
52c82823 2247 RD_REG_WORD(&reg->mailbox0);
200ffb15 2248 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
d14e72fb 2249 rval == QLA_SUCCESS; cnt--) {
0107109e 2250 barrier();
d14e72fb
HM
2251 if (cnt)
2252 udelay(5);
2253 else
2254 rval = QLA_FUNCTION_TIMEOUT;
0107109e 2255 }
d14e72fb
HM
2256 if (rval == QLA_SUCCESS)
2257 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2258
2259 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2260 "Host Risc 0x%x, mailbox0 0x%x\n",
2261 RD_REG_DWORD(&reg->hccr),
2262 RD_REG_WORD(&reg->mailbox0));
0107109e
AV
2263
2264 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6 2265
d14e72fb
HM
2266 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2267 "Driver in %s mode\n",
2268 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2269
124f85e6
AV
2270 if (IS_NOPOLLING_TYPE(ha))
2271 ha->isp_ops->enable_intrs(ha);
d14e72fb
HM
2272
2273 return rval;
0107109e
AV
2274}
2275
4ea2c9c7
JC
2276static void
2277qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2278{
2279 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2280
2281 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2282 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2283
2284}
2285
2286static void
2287qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2288{
2289 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2290
2291 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2292 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2293}
2294
2295static void
2296qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2297{
4ea2c9c7
JC
2298 uint32_t wd32 = 0;
2299 uint delta_msec = 100;
2300 uint elapsed_msec = 0;
2301 uint timeout_msec;
2302 ulong n;
2303
cc790764
JC
2304 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2305 vha->hw->pdev->subsystem_device != 0x0240)
4ea2c9c7
JC
2306 return;
2307
8dd7e3a5
JC
2308 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2309 udelay(100);
2310
4ea2c9c7
JC
2311attempt:
2312 timeout_msec = TIMEOUT_SEMAPHORE;
2313 n = timeout_msec / delta_msec;
2314 while (n--) {
2315 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2316 qla25xx_read_risc_sema_reg(vha, &wd32);
2317 if (wd32 & RISC_SEMAPHORE)
2318 break;
2319 msleep(delta_msec);
2320 elapsed_msec += delta_msec;
2321 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2322 goto force;
2323 }
2324
2325 if (!(wd32 & RISC_SEMAPHORE))
2326 goto force;
2327
2328 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2329 goto acquired;
2330
2331 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2332 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2333 n = timeout_msec / delta_msec;
2334 while (n--) {
2335 qla25xx_read_risc_sema_reg(vha, &wd32);
2336 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2337 break;
2338 msleep(delta_msec);
2339 elapsed_msec += delta_msec;
2340 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2341 goto force;
2342 }
2343
2344 if (wd32 & RISC_SEMAPHORE_FORCE)
2345 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2346
2347 goto attempt;
2348
2349force:
2350 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2351
2352acquired:
2353 return;
2354}
2355
88c26663
AV
2356/**
2357 * qla24xx_reset_chip() - Reset ISP24xx chip.
2358 * @ha: HA context
2359 *
2360 * Returns 0 on success.
2361 */
2362void
e315cd28 2363qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 2364{
e315cd28 2365 struct qla_hw_data *ha = vha->hw;
85880801
AV
2366
2367 if (pci_channel_offline(ha->pdev) &&
2368 ha->flags.pci_channel_io_perm_failure) {
2369 return;
2370 }
2371
fd34f556 2372 ha->isp_ops->disable_intrs(ha);
88c26663 2373
4ea2c9c7
JC
2374 qla25xx_manipulate_risc_semaphore(vha);
2375
88c26663 2376 /* Perform RISC reset. */
e315cd28 2377 qla24xx_reset_risc(vha);
88c26663
AV
2378}
2379
1da177e4
LT
2380/**
2381 * qla2x00_chip_diag() - Test chip for proper operation.
2382 * @ha: HA context
2383 *
2384 * Returns 0 on success.
2385 */
abbd8870 2386int
e315cd28 2387qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
2388{
2389 int rval;
e315cd28 2390 struct qla_hw_data *ha = vha->hw;
3d71644c 2391 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
2392 unsigned long flags = 0;
2393 uint16_t data;
2394 uint32_t cnt;
2395 uint16_t mb[5];
73208dfd 2396 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
2397
2398 /* Assume a failed state */
2399 rval = QLA_FUNCTION_FAILED;
2400
7c3df132
SK
2401 ql_dbg(ql_dbg_init, vha, 0x007b,
2402 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1da177e4
LT
2403
2404 spin_lock_irqsave(&ha->hardware_lock, flags);
2405
2406 /* Reset ISP chip. */
2407 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2408
2409 /*
2410 * We need to have a delay here since the card will not respond while
2411 * in reset causing an MCA on some architectures.
2412 */
2413 udelay(20);
2414 data = qla2x00_debounce_register(&reg->ctrl_status);
2415 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2416 udelay(5);
2417 data = RD_REG_WORD(&reg->ctrl_status);
2418 barrier();
2419 }
2420
2421 if (!cnt)
2422 goto chip_diag_failed;
2423
7c3df132
SK
2424 ql_dbg(ql_dbg_init, vha, 0x007c,
2425 "Reset register cleared by chip reset.\n");
1da177e4
LT
2426
2427 /* Reset RISC processor. */
2428 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2429 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2430
2431 /* Workaround for QLA2312 PCI parity error */
2432 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2433 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2434 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2435 udelay(5);
2436 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 2437 barrier();
1da177e4
LT
2438 }
2439 } else
2440 udelay(10);
2441
2442 if (!cnt)
2443 goto chip_diag_failed;
2444
2445 /* Check product ID of chip */
5a68a1c2 2446 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
1da177e4
LT
2447
2448 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2449 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2450 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2451 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2452 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2453 mb[3] != PROD_ID_3) {
7c3df132
SK
2454 ql_log(ql_log_warn, vha, 0x0062,
2455 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2456 mb[1], mb[2], mb[3]);
1da177e4
LT
2457
2458 goto chip_diag_failed;
2459 }
2460 ha->product_id[0] = mb[1];
2461 ha->product_id[1] = mb[2];
2462 ha->product_id[2] = mb[3];
2463 ha->product_id[3] = mb[4];
2464
2465 /* Adjust fw RISC transfer size */
73208dfd 2466 if (req->length > 1024)
1da177e4
LT
2467 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2468 else
2469 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 2470 req->length;
1da177e4
LT
2471
2472 if (IS_QLA2200(ha) &&
2473 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2474 /* Limit firmware transfer size with a 2200A */
7c3df132 2475 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 2476
ea5b6382 2477 ha->device_type |= DT_ISP2200A;
1da177e4
LT
2478 ha->fw_transfer_size = 128;
2479 }
2480
2481 /* Wrap Incoming Mailboxes Test. */
2482 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2483
7c3df132 2484 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 2485 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
2486 if (rval)
2487 ql_log(ql_log_warn, vha, 0x0080,
2488 "Failed mailbox send register test.\n");
2489 else
1da177e4
LT
2490 /* Flag a successful rval */
2491 rval = QLA_SUCCESS;
1da177e4
LT
2492 spin_lock_irqsave(&ha->hardware_lock, flags);
2493
2494chip_diag_failed:
2495 if (rval)
7c3df132
SK
2496 ql_log(ql_log_info, vha, 0x0081,
2497 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
2498
2499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2500
2501 return (rval);
2502}
2503
0107109e
AV
2504/**
2505 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2506 * @ha: HA context
2507 *
2508 * Returns 0 on success.
2509 */
2510int
e315cd28 2511qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
2512{
2513 int rval;
e315cd28 2514 struct qla_hw_data *ha = vha->hw;
73208dfd 2515 struct req_que *req = ha->req_q_map[0];
0107109e 2516
7ec0effd 2517 if (IS_P3P_TYPE(ha))
a9083016
GM
2518 return QLA_SUCCESS;
2519
73208dfd 2520 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 2521
e315cd28 2522 rval = qla2x00_mbx_reg_test(vha);
0107109e 2523 if (rval) {
7c3df132
SK
2524 ql_log(ql_log_warn, vha, 0x0082,
2525 "Failed mailbox send register test.\n");
0107109e
AV
2526 } else {
2527 /* Flag a successful rval */
2528 rval = QLA_SUCCESS;
2529 }
2530
2531 return rval;
2532}
2533
a7a167bf 2534void
e315cd28 2535qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
0107109e 2536{
a7a167bf
AV
2537 int rval;
2538 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
73208dfd 2539 eft_size, fce_size, mq_size;
df613b96
AV
2540 dma_addr_t tc_dma;
2541 void *tc;
e315cd28 2542 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
2543 struct req_que *req = ha->req_q_map[0];
2544 struct rsp_que *rsp = ha->rsp_q_map[0];
a7a167bf
AV
2545
2546 if (ha->fw_dump) {
7c3df132
SK
2547 ql_dbg(ql_dbg_init, vha, 0x00bd,
2548 "Firmware dump already allocated.\n");
a7a167bf
AV
2549 return;
2550 }
d4e3e04d 2551
0107109e 2552 ha->fw_dumped = 0;
61f098dd 2553 ha->fw_dump_cap_flags = 0;
f73cb695
CD
2554 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2555 req_q_size = rsp_q_size = 0;
2556
2557 if (IS_QLA27XX(ha))
2558 goto try_fce;
2559
d4e3e04d 2560 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
a7a167bf 2561 fixed_size = sizeof(struct qla2100_fw_dump);
d4e3e04d 2562 } else if (IS_QLA23XX(ha)) {
a7a167bf
AV
2563 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2564 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2565 sizeof(uint16_t);
e428924c 2566 } else if (IS_FWI2_CAPABLE(ha)) {
b20f02e1 2567 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
6246b8a1
GM
2568 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2569 else if (IS_QLA81XX(ha))
3a03eb79
AV
2570 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2571 else if (IS_QLA25XX(ha))
2572 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2573 else
2574 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
f73cb695 2575
a7a167bf
AV
2576 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2577 sizeof(uint32_t);
050c9bb1 2578 if (ha->mqenable) {
b20f02e1 2579 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
6246b8a1 2580 mq_size = sizeof(struct qla2xxx_mq_chain);
050c9bb1
GM
2581 /*
2582 * Allocate maximum buffer size for all queues.
2583 * Resizing must be done at end-of-dump processing.
2584 */
2585 mq_size += ha->max_req_queues *
2586 (req->length * sizeof(request_t));
2587 mq_size += ha->max_rsp_queues *
2588 (rsp->length * sizeof(response_t));
2589 }
00876ae8 2590 if (ha->tgt.atio_ring)
2d70c103 2591 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
df613b96 2592 /* Allocate memory for Fibre Channel Event Buffer. */
f73cb695
CD
2593 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2594 !IS_QLA27XX(ha))
436a7b11 2595 goto try_eft;
df613b96 2596
f73cb695
CD
2597try_fce:
2598 if (ha->fce)
2599 dma_free_coherent(&ha->pdev->dev,
2600 FCE_SIZE, ha->fce, ha->fce_dma);
2601
2602 /* Allocate memory for Fibre Channel Event Buffer. */
0ea85b50
JP
2603 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2604 GFP_KERNEL);
df613b96 2605 if (!tc) {
7c3df132
SK
2606 ql_log(ql_log_warn, vha, 0x00be,
2607 "Unable to allocate (%d KB) for FCE.\n",
2608 FCE_SIZE / 1024);
17d98630 2609 goto try_eft;
df613b96
AV
2610 }
2611
e315cd28 2612 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
2613 ha->fce_mb, &ha->fce_bufs);
2614 if (rval) {
7c3df132
SK
2615 ql_log(ql_log_warn, vha, 0x00bf,
2616 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
2617 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2618 tc_dma);
2619 ha->flags.fce_enabled = 0;
17d98630 2620 goto try_eft;
df613b96 2621 }
cfb0919c 2622 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 2623 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 2624
7d9dade3 2625 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
df613b96
AV
2626 ha->flags.fce_enabled = 1;
2627 ha->fce_dma = tc_dma;
2628 ha->fce = tc;
f73cb695 2629
436a7b11 2630try_eft:
f73cb695
CD
2631 if (ha->eft)
2632 dma_free_coherent(&ha->pdev->dev,
2633 EFT_SIZE, ha->eft, ha->eft_dma);
2634
436a7b11 2635 /* Allocate memory for Extended Trace Buffer. */
0ea85b50
JP
2636 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2637 GFP_KERNEL);
436a7b11 2638 if (!tc) {
7c3df132
SK
2639 ql_log(ql_log_warn, vha, 0x00c1,
2640 "Unable to allocate (%d KB) for EFT.\n",
2641 EFT_SIZE / 1024);
436a7b11
AV
2642 goto cont_alloc;
2643 }
2644
e315cd28 2645 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 2646 if (rval) {
7c3df132
SK
2647 ql_log(ql_log_warn, vha, 0x00c2,
2648 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
2649 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2650 tc_dma);
2651 goto cont_alloc;
2652 }
cfb0919c 2653 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 2654 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11
AV
2655
2656 eft_size = EFT_SIZE;
2657 ha->eft_dma = tc_dma;
2658 ha->eft = tc;
d4e3e04d 2659 }
f73cb695 2660
a7a167bf 2661cont_alloc:
f73cb695
CD
2662 if (IS_QLA27XX(ha)) {
2663 if (!ha->fw_dump_template) {
2664 ql_log(ql_log_warn, vha, 0x00ba,
2665 "Failed missing fwdump template\n");
2666 return;
2667 }
2668 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2669 ql_dbg(ql_dbg_init, vha, 0x00fa,
2670 "-> allocating fwdump (%x bytes)...\n", dump_size);
2671 goto allocate;
2672 }
2673
73208dfd
AC
2674 req_q_size = req->length * sizeof(request_t);
2675 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf 2676 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 2677 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
2678 ha->chain_offset = dump_size;
2679 dump_size += mq_size + fce_size;
d4e3e04d 2680
b945e777
QT
2681 if (ha->exchoffld_buf)
2682 dump_size += sizeof(struct qla2xxx_offld_chain) +
2683 ha->exchoffld_size;
2684 if (ha->exlogin_buf)
2685 dump_size += sizeof(struct qla2xxx_offld_chain) +
2686 ha->exlogin_size;
2687
f73cb695 2688allocate:
d4e3e04d 2689 ha->fw_dump = vmalloc(dump_size);
a7a167bf 2690 if (!ha->fw_dump) {
7c3df132
SK
2691 ql_log(ql_log_warn, vha, 0x00c4,
2692 "Unable to allocate (%d KB) for firmware dump.\n",
2693 dump_size / 1024);
a7a167bf 2694
e30d1756
MI
2695 if (ha->fce) {
2696 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2697 ha->fce_dma);
2698 ha->fce = NULL;
2699 ha->fce_dma = 0;
2700 }
2701
a7a167bf
AV
2702 if (ha->eft) {
2703 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
2704 ha->eft_dma);
2705 ha->eft = NULL;
2706 ha->eft_dma = 0;
2707 }
2708 return;
2709 }
f73cb695 2710 ha->fw_dump_len = dump_size;
cfb0919c 2711 ql_dbg(ql_dbg_init, vha, 0x00c5,
7c3df132 2712 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
a7a167bf 2713
f73cb695
CD
2714 if (IS_QLA27XX(ha))
2715 return;
2716
a7a167bf
AV
2717 ha->fw_dump->signature[0] = 'Q';
2718 ha->fw_dump->signature[1] = 'L';
2719 ha->fw_dump->signature[2] = 'G';
2720 ha->fw_dump->signature[3] = 'C';
ad950360 2721 ha->fw_dump->version = htonl(1);
a7a167bf
AV
2722
2723 ha->fw_dump->fixed_size = htonl(fixed_size);
2724 ha->fw_dump->mem_size = htonl(mem_size);
2725 ha->fw_dump->req_q_size = htonl(req_q_size);
2726 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2727
2728 ha->fw_dump->eft_size = htonl(eft_size);
2729 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2730 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2731
2732 ha->fw_dump->header_size =
2733 htonl(offsetof(struct qla2xxx_fw_dump, isp));
0107109e
AV
2734}
2735
18e7555a
AV
2736static int
2737qla81xx_mpi_sync(scsi_qla_host_t *vha)
2738{
2739#define MPS_MASK 0xe0
2740 int rval;
2741 uint16_t dc;
2742 uint32_t dw;
18e7555a
AV
2743
2744 if (!IS_QLA81XX(vha->hw))
2745 return QLA_SUCCESS;
2746
2747 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
2748 if (rval != QLA_SUCCESS) {
7c3df132
SK
2749 ql_log(ql_log_warn, vha, 0x0105,
2750 "Unable to acquire semaphore.\n");
18e7555a
AV
2751 goto done;
2752 }
2753
2754 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
2755 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
2756 if (rval != QLA_SUCCESS) {
7c3df132 2757 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
2758 goto done_release;
2759 }
2760
2761 dc &= MPS_MASK;
2762 if (dc == (dw & MPS_MASK))
2763 goto done_release;
2764
2765 dw &= ~MPS_MASK;
2766 dw |= dc;
2767 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
2768 if (rval != QLA_SUCCESS) {
7c3df132 2769 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
2770 }
2771
2772done_release:
2773 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
2774 if (rval != QLA_SUCCESS) {
7c3df132
SK
2775 ql_log(ql_log_warn, vha, 0x006d,
2776 "Unable to release semaphore.\n");
18e7555a
AV
2777 }
2778
2779done:
2780 return rval;
2781}
2782
8d93f550
CD
2783int
2784qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
2785{
2786 /* Don't try to reallocate the array */
2787 if (req->outstanding_cmds)
2788 return QLA_SUCCESS;
2789
d7459527 2790 if (!IS_FWI2_CAPABLE(ha))
8d93f550
CD
2791 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
2792 else {
03e8c680
QT
2793 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
2794 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
8d93f550 2795 else
03e8c680 2796 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
8d93f550
CD
2797 }
2798
2799 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2800 req->num_outstanding_cmds, GFP_KERNEL);
2801
2802 if (!req->outstanding_cmds) {
2803 /*
2804 * Try to allocate a minimal size just so we can get through
2805 * initialization.
2806 */
2807 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
2808 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2809 req->num_outstanding_cmds, GFP_KERNEL);
2810
2811 if (!req->outstanding_cmds) {
2812 ql_log(ql_log_fatal, NULL, 0x0126,
2813 "Failed to allocate memory for "
2814 "outstanding_cmds for req_que %p.\n", req);
2815 req->num_outstanding_cmds = 0;
2816 return QLA_FUNCTION_FAILED;
2817 }
2818 }
2819
2820 return QLA_SUCCESS;
2821}
2822
1da177e4
LT
2823/**
2824 * qla2x00_setup_chip() - Load and start RISC firmware.
2825 * @ha: HA context
2826 *
2827 * Returns 0 on success.
2828 */
2829static int
e315cd28 2830qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 2831{
0107109e
AV
2832 int rval;
2833 uint32_t srisc_address = 0;
e315cd28 2834 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
2835 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2836 unsigned long flags;
dda772e8 2837 uint16_t fw_major_version;
3db0652e 2838
7ec0effd 2839 if (IS_P3P_TYPE(ha)) {
a9083016 2840 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
2841 if (rval == QLA_SUCCESS) {
2842 qla2x00_stop_firmware(vha);
a9083016 2843 goto enable_82xx_npiv;
14e303d9 2844 } else
b963752f 2845 goto failed;
a9083016
GM
2846 }
2847
3db0652e
AV
2848 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2849 /* Disable SRAM, Instruction RAM and GP RAM parity. */
2850 spin_lock_irqsave(&ha->hardware_lock, flags);
2851 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
2852 RD_REG_WORD(&reg->hccr);
2853 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2854 }
1da177e4 2855
18e7555a
AV
2856 qla81xx_mpi_sync(vha);
2857
1da177e4 2858 /* Load firmware sequences */
e315cd28 2859 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 2860 if (rval == QLA_SUCCESS) {
7c3df132
SK
2861 ql_dbg(ql_dbg_init, vha, 0x00c9,
2862 "Verifying Checksum of loaded RISC code.\n");
1da177e4 2863
e315cd28 2864 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
2865 if (rval == QLA_SUCCESS) {
2866 /* Start firmware execution. */
7c3df132
SK
2867 ql_dbg(ql_dbg_init, vha, 0x00ca,
2868 "Starting firmware.\n");
1da177e4 2869
b0d6cabd
HM
2870 if (ql2xexlogins)
2871 ha->flags.exlogins_enabled = 1;
2872
99e1b683 2873 if (qla_is_exch_offld_enabled(vha))
2f56a7f1
HM
2874 ha->flags.exchoffld_enabled = 1;
2875
e315cd28 2876 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 2877 /* Retrieve firmware information. */
dda772e8 2878 if (rval == QLA_SUCCESS) {
b0d6cabd
HM
2879 rval = qla2x00_set_exlogins_buffer(vha);
2880 if (rval != QLA_SUCCESS)
2881 goto failed;
2882
2f56a7f1
HM
2883 rval = qla2x00_set_exchoffld_buffer(vha);
2884 if (rval != QLA_SUCCESS)
2885 goto failed;
2886
a9083016 2887enable_82xx_npiv:
dda772e8 2888 fw_major_version = ha->fw_major_version;
7ec0effd 2889 if (IS_P3P_TYPE(ha))
3173167f 2890 qla82xx_check_md_needed(vha);
6246b8a1
GM
2891 else
2892 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
2893 if (rval != QLA_SUCCESS)
2894 goto failed;
2c3dfe3f 2895 ha->flags.npiv_supported = 0;
e315cd28 2896 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 2897 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 2898 ha->flags.npiv_supported = 1;
4d0ea247
SJ
2899 if ((!ha->max_npiv_vports) ||
2900 ((ha->max_npiv_vports + 1) %
eb66dc60 2901 MIN_MULTI_ID_FABRIC))
4d0ea247 2902 ha->max_npiv_vports =
eb66dc60 2903 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 2904 }
03e8c680 2905 qla2x00_get_resource_cnts(vha);
d743de66 2906
8d93f550
CD
2907 /*
2908 * Allocate the array of outstanding commands
2909 * now that we know the firmware resources.
2910 */
2911 rval = qla2x00_alloc_outstanding_cmds(ha,
2912 vha->req);
2913 if (rval != QLA_SUCCESS)
2914 goto failed;
2915
be5ea3cf 2916 if (!fw_major_version && ql2xallocfwdump
7ec0effd 2917 && !(IS_P3P_TYPE(ha)))
08de2844 2918 qla2x00_alloc_fw_dump(vha);
3b6e5b9d
CD
2919 } else {
2920 goto failed;
1da177e4
LT
2921 }
2922 } else {
7c3df132
SK
2923 ql_log(ql_log_fatal, vha, 0x00cd,
2924 "ISP Firmware failed checksum.\n");
2925 goto failed;
1da177e4 2926 }
c74d88a4
AV
2927 } else
2928 goto failed;
1da177e4 2929
3db0652e
AV
2930 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2931 /* Enable proper parity. */
2932 spin_lock_irqsave(&ha->hardware_lock, flags);
2933 if (IS_QLA2300(ha))
2934 /* SRAM parity */
2935 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
2936 else
2937 /* SRAM, Instruction RAM and GP RAM parity */
2938 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
2939 RD_REG_WORD(&reg->hccr);
2940 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2941 }
2942
f3982d89
CD
2943 if (IS_QLA27XX(ha))
2944 ha->flags.fac_supported = 1;
2945 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1d2874de
JC
2946 uint32_t size;
2947
2948 rval = qla81xx_fac_get_sector_size(vha, &size);
2949 if (rval == QLA_SUCCESS) {
2950 ha->flags.fac_supported = 1;
2951 ha->fdt_block_size = size << 2;
2952 } else {
7c3df132 2953 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
2954 "Unsupported FAC firmware (%d.%02d.%02d).\n",
2955 ha->fw_major_version, ha->fw_minor_version,
2956 ha->fw_subminor_version);
1ca60e3b 2957
f73cb695 2958 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6246b8a1
GM
2959 ha->flags.fac_supported = 0;
2960 rval = QLA_SUCCESS;
2961 }
1d2874de
JC
2962 }
2963 }
ca9e9c3e 2964failed:
1da177e4 2965 if (rval) {
7c3df132
SK
2966 ql_log(ql_log_fatal, vha, 0x00cf,
2967 "Setup chip ****FAILED****.\n");
1da177e4
LT
2968 }
2969
2970 return (rval);
2971}
2972
2973/**
2974 * qla2x00_init_response_q_entries() - Initializes response queue entries.
2975 * @ha: HA context
2976 *
2977 * Beginning of request ring has initialization control block already built
2978 * by nvram config routine.
2979 *
2980 * Returns 0 on success.
2981 */
73208dfd
AC
2982void
2983qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
2984{
2985 uint16_t cnt;
2986 response_t *pkt;
2987
2afa19a9
AC
2988 rsp->ring_ptr = rsp->ring;
2989 rsp->ring_index = 0;
2990 rsp->status_srb = NULL;
e315cd28
AC
2991 pkt = rsp->ring_ptr;
2992 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
2993 pkt->signature = RESPONSE_PROCESSED;
2994 pkt++;
2995 }
1da177e4
LT
2996}
2997
2998/**
2999 * qla2x00_update_fw_options() - Read and process firmware options.
3000 * @ha: HA context
3001 *
3002 * Returns 0 on success.
3003 */
abbd8870 3004void
e315cd28 3005qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
3006{
3007 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 3008 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3009
3010 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 3011 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
3012
3013 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3014 return;
3015
3016 /* Serial Link options. */
7c3df132
SK
3017 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3018 "Serial link options.\n");
3019 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3020 (uint8_t *)&ha->fw_seriallink_options,
3021 sizeof(ha->fw_seriallink_options));
1da177e4
LT
3022
3023 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3024 if (ha->fw_seriallink_options[3] & BIT_2) {
3025 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3026
3027 /* 1G settings */
3028 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3029 emphasis = (ha->fw_seriallink_options[2] &
3030 (BIT_4 | BIT_3)) >> 3;
3031 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 3032 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3033 rx_sens = (ha->fw_seriallink_options[0] &
3034 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3035 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3036 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3037 if (rx_sens == 0x0)
3038 rx_sens = 0x3;
3039 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3040 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3041 ha->fw_options[10] |= BIT_5 |
3042 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3043 (tx_sens & (BIT_1 | BIT_0));
3044
3045 /* 2G settings */
3046 swing = (ha->fw_seriallink_options[2] &
3047 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3048 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3049 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 3050 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3051 rx_sens = (ha->fw_seriallink_options[1] &
3052 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3053 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3054 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3055 if (rx_sens == 0x0)
3056 rx_sens = 0x3;
3057 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3058 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3059 ha->fw_options[11] |= BIT_5 |
3060 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3061 (tx_sens & (BIT_1 | BIT_0));
3062 }
3063
3064 /* FCP2 options. */
3065 /* Return command IOCBs without waiting for an ABTS to complete. */
3066 ha->fw_options[3] |= BIT_13;
3067
3068 /* LED scheme. */
3069 if (ha->flags.enable_led_scheme)
3070 ha->fw_options[2] |= BIT_12;
3071
48c02fde 3072 /* Detect ISP6312. */
3073 if (IS_QLA6312(ha))
3074 ha->fw_options[2] |= BIT_13;
3075
088d09d4
GM
3076 /* Set Retry FLOGI in case of P2P connection */
3077 if (ha->operating_mode == P2P) {
3078 ha->fw_options[2] |= BIT_3;
3079 ql_dbg(ql_dbg_disc, vha, 0x2100,
3080 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3081 __func__, ha->fw_options[2]);
3082 }
3083
1da177e4 3084 /* Update firmware options. */
e315cd28 3085 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
3086}
3087
0107109e 3088void
e315cd28 3089qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
3090{
3091 int rval;
e315cd28 3092 struct qla_hw_data *ha = vha->hw;
0107109e 3093
7ec0effd 3094 if (IS_P3P_TYPE(ha))
a9083016
GM
3095 return;
3096
f198cafa
HM
3097 /* Hold status IOCBs until ABTS response received. */
3098 if (ql2xfwholdabts)
3099 ha->fw_options[3] |= BIT_12;
3100
088d09d4
GM
3101 /* Set Retry FLOGI in case of P2P connection */
3102 if (ha->operating_mode == P2P) {
3103 ha->fw_options[2] |= BIT_3;
3104 ql_dbg(ql_dbg_disc, vha, 0x2101,
3105 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3106 __func__, ha->fw_options[2]);
3107 }
3108
41dc529a 3109 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3c4810ff
QT
3110 if (ql2xmvasynctoatio &&
3111 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
41dc529a
QT
3112 if (qla_tgt_mode_enabled(vha) ||
3113 qla_dual_mode_enabled(vha))
3114 ha->fw_options[2] |= BIT_11;
3115 else
3116 ha->fw_options[2] &= ~BIT_11;
3117 }
3118
f7e761f5
QT
3119 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3120 /*
3121 * Tell FW to track each exchange to prevent
3122 * driver from using stale exchange.
3123 */
3124 if (qla_tgt_mode_enabled(vha) ||
3125 qla_dual_mode_enabled(vha))
3126 ha->fw_options[2] |= BIT_4;
3127 else
3128 ha->fw_options[2] &= ~BIT_4;
3129 }
3130
83548fe2
QT
3131 ql_dbg(ql_dbg_init, vha, 0x00e8,
3132 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3133 __func__, ha->fw_options[1], ha->fw_options[2],
3134 ha->fw_options[3], vha->host->active_mode);
3c4810ff
QT
3135
3136 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3137 qla2x00_set_fw_options(vha, ha->fw_options);
41dc529a 3138
0107109e 3139 /* Update Serial Link options. */
f94097ed 3140 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
3141 return;
3142
e315cd28 3143 rval = qla2x00_set_serdes_params(vha,
f94097ed 3144 le16_to_cpu(ha->fw_seriallink_options24[1]),
3145 le16_to_cpu(ha->fw_seriallink_options24[2]),
3146 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 3147 if (rval != QLA_SUCCESS) {
7c3df132 3148 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
3149 "Unable to update Serial Link options (%x).\n", rval);
3150 }
3151}
3152
abbd8870 3153void
e315cd28 3154qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 3155{
e315cd28 3156 struct qla_hw_data *ha = vha->hw;
3d71644c 3157 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
3158 struct req_que *req = ha->req_q_map[0];
3159 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
3160
3161 /* Setup ring parameters in initialization control block. */
ad950360
BVA
3162 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3163 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3164 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3165 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3166 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3167 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3168 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3169 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
abbd8870
AV
3170
3171 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3172 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3173 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3174 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3175 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3176}
3177
0107109e 3178void
e315cd28 3179qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 3180{
e315cd28 3181 struct qla_hw_data *ha = vha->hw;
118e2ef9 3182 device_reg_t *reg = ISP_QUE_REG(ha, 0);
73208dfd
AC
3183 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3184 struct qla_msix_entry *msix;
0107109e 3185 struct init_cb_24xx *icb;
73208dfd
AC
3186 uint16_t rid = 0;
3187 struct req_que *req = ha->req_q_map[0];
3188 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 3189
6246b8a1 3190 /* Setup ring parameters in initialization control block. */
0107109e 3191 icb = (struct init_cb_24xx *)ha->init_cb;
ad950360
BVA
3192 icb->request_q_outpointer = cpu_to_le16(0);
3193 icb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3194 icb->request_q_length = cpu_to_le16(req->length);
3195 icb->response_q_length = cpu_to_le16(rsp->length);
3196 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3197 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3198 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3199 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
0107109e 3200
2d70c103 3201 /* Setup ATIO queue dma pointers for target mode */
ad950360 3202 icb->atio_q_inpointer = cpu_to_le16(0);
2d70c103
NB
3203 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3204 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3205 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3206
7c6300e3 3207 if (IS_SHADOW_REG_CAPABLE(ha))
ad950360 3208 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
7c6300e3 3209
f73cb695 3210 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ad950360
BVA
3211 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3212 icb->rid = cpu_to_le16(rid);
73208dfd
AC
3213 if (ha->flags.msix_enabled) {
3214 msix = &ha->msix_entries[1];
83548fe2 3215 ql_dbg(ql_dbg_init, vha, 0x0019,
7c3df132
SK
3216 "Registering vector 0x%x for base que.\n",
3217 msix->entry);
73208dfd
AC
3218 icb->msix = cpu_to_le16(msix->entry);
3219 }
3220 /* Use alternate PCI bus number */
3221 if (MSB(rid))
ad950360 3222 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
73208dfd
AC
3223 /* Use alternate PCI devfn */
3224 if (LSB(rid))
ad950360 3225 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
73208dfd 3226
3155754a 3227 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
3228 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3229 (ha->flags.msix_enabled)) {
ad950360 3230 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3155754a 3231 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
3232 ql_dbg(ql_dbg_init, vha, 0x00fe,
3233 "MSIX Handshake Disable Mode turned on.\n");
3155754a 3234 } else {
ad950360 3235 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3155754a 3236 }
ad950360 3237 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
73208dfd
AC
3238
3239 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3240 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3241 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3242 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3243 } else {
3244 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3245 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3246 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3247 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3248 }
aa230bc5 3249 qlt_24xx_config_rings(vha);
2d70c103 3250
73208dfd
AC
3251 /* PCI posting */
3252 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
3253}
3254
1da177e4
LT
3255/**
3256 * qla2x00_init_rings() - Initializes firmware.
3257 * @ha: HA context
3258 *
3259 * Beginning of request ring has initialization control block already built
3260 * by nvram config routine.
3261 *
3262 * Returns 0 on success.
3263 */
8ae6d9c7 3264int
e315cd28 3265qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
3266{
3267 int rval;
3268 unsigned long flags = 0;
29bdccbe 3269 int cnt, que;
e315cd28 3270 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
3271 struct req_que *req;
3272 struct rsp_que *rsp;
2c3dfe3f
SJ
3273 struct mid_init_cb_24xx *mid_init_cb =
3274 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
3275
3276 spin_lock_irqsave(&ha->hardware_lock, flags);
3277
3278 /* Clear outstanding commands array. */
2afa19a9 3279 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 3280 req = ha->req_q_map[que];
cb43285f 3281 if (!req || !test_bit(que, ha->req_qid_map))
29bdccbe 3282 continue;
7c6300e3
JC
3283 req->out_ptr = (void *)(req->ring + req->length);
3284 *req->out_ptr = 0;
8d93f550 3285 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 3286 req->outstanding_cmds[cnt] = NULL;
1da177e4 3287
2afa19a9 3288 req->current_outstanding_cmd = 1;
1da177e4 3289
29bdccbe
AC
3290 /* Initialize firmware. */
3291 req->ring_ptr = req->ring;
3292 req->ring_index = 0;
3293 req->cnt = req->length;
3294 }
1da177e4 3295
2afa19a9 3296 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe 3297 rsp = ha->rsp_q_map[que];
cb43285f 3298 if (!rsp || !test_bit(que, ha->rsp_qid_map))
29bdccbe 3299 continue;
7c6300e3
JC
3300 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3301 *rsp->in_ptr = 0;
29bdccbe 3302 /* Initialize response queue entries */
8ae6d9c7
GM
3303 if (IS_QLAFX00(ha))
3304 qlafx00_init_response_q_entries(rsp);
3305 else
3306 qla2x00_init_response_q_entries(rsp);
29bdccbe 3307 }
1da177e4 3308
2d70c103
NB
3309 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3310 ha->tgt.atio_ring_index = 0;
3311 /* Initialize ATIO queue entries */
3312 qlt_init_atio_q_entries(vha);
3313
e315cd28 3314 ha->isp_ops->config_rings(vha);
1da177e4
LT
3315
3316 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3317
8ae6d9c7
GM
3318 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3319
3320 if (IS_QLAFX00(ha)) {
3321 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3322 goto next_check;
3323 }
3324
1da177e4 3325 /* Update any ISP specific firmware options before initialization. */
e315cd28 3326 ha->isp_ops->update_fw_options(vha);
1da177e4 3327
605aa2bc 3328 if (ha->flags.npiv_supported) {
45980cc2 3329 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 3330 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 3331 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
3332 }
3333
24a08138 3334 if (IS_FWI2_CAPABLE(ha)) {
ad950360 3335 mid_init_cb->options = cpu_to_le16(BIT_1);
24a08138 3336 mid_init_cb->init_cb.execution_throttle =
03e8c680 3337 cpu_to_le16(ha->cur_fw_xcb_count);
40f3862b
JC
3338 ha->flags.dport_enabled =
3339 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3340 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3341 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3342 /* FA-WWPN Status */
2486c627 3343 ha->flags.fawwpn_enabled =
40f3862b 3344 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
83548fe2 3345 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
2486c627 3346 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
24a08138 3347 }
2c3dfe3f 3348
e315cd28 3349 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 3350next_check:
1da177e4 3351 if (rval) {
7c3df132
SK
3352 ql_log(ql_log_fatal, vha, 0x00d2,
3353 "Init Firmware **** FAILED ****.\n");
1da177e4 3354 } else {
7c3df132
SK
3355 ql_dbg(ql_dbg_init, vha, 0x00d3,
3356 "Init Firmware -- success.\n");
4b60c827 3357 QLA_FW_STARTED(ha);
1da177e4
LT
3358 }
3359
3360 return (rval);
3361}
3362
3363/**
3364 * qla2x00_fw_ready() - Waits for firmware ready.
3365 * @ha: HA context
3366 *
3367 * Returns 0 on success.
3368 */
3369static int
e315cd28 3370qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
3371{
3372 int rval;
4d4df193 3373 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
3374 uint16_t min_wait; /* Minimum wait time if loop is down */
3375 uint16_t wait_time; /* Wait time if loop is coming ready */
b5a340dd 3376 uint16_t state[6];
e315cd28 3377 struct qla_hw_data *ha = vha->hw;
1da177e4 3378
8ae6d9c7
GM
3379 if (IS_QLAFX00(vha->hw))
3380 return qlafx00_fw_ready(vha);
3381
1da177e4
LT
3382 rval = QLA_SUCCESS;
3383
33461491
CD
3384 /* Time to wait for loop down */
3385 if (IS_P3P_TYPE(ha))
3386 min_wait = 30;
3387 else
3388 min_wait = 20;
1da177e4
LT
3389
3390 /*
3391 * Firmware should take at most one RATOV to login, plus 5 seconds for
3392 * our own processing.
3393 */
3394 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3395 wait_time = min_wait;
3396 }
3397
3398 /* Min wait time if loop down */
3399 mtime = jiffies + (min_wait * HZ);
3400
3401 /* wait time before firmware ready */
3402 wtime = jiffies + (wait_time * HZ);
3403
3404 /* Wait for ISP to finish LIP */
e315cd28 3405 if (!vha->flags.init_done)
7c3df132
SK
3406 ql_log(ql_log_info, vha, 0x801e,
3407 "Waiting for LIP to complete.\n");
1da177e4
LT
3408
3409 do {
5b939038 3410 memset(state, -1, sizeof(state));
e315cd28 3411 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 3412 if (rval == QLA_SUCCESS) {
4d4df193 3413 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 3414 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 3415 }
4d4df193 3416 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
3417 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3418 "fw_state=%x 84xx=%x.\n", state[0],
3419 state[2]);
4d4df193
HK
3420 if ((state[2] & FSTATE_LOGGED_IN) &&
3421 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
3422 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3423 "Sending verify iocb.\n");
4d4df193
HK
3424
3425 cs84xx_time = jiffies;
e315cd28 3426 rval = qla84xx_init_chip(vha);
7c3df132
SK
3427 if (rval != QLA_SUCCESS) {
3428 ql_log(ql_log_warn,
cfb0919c 3429 vha, 0x8007,
7c3df132 3430 "Init chip failed.\n");
4d4df193 3431 break;
7c3df132 3432 }
4d4df193
HK
3433
3434 /* Add time taken to initialize. */
3435 cs84xx_time = jiffies - cs84xx_time;
3436 wtime += cs84xx_time;
3437 mtime += cs84xx_time;
cfb0919c 3438 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
3439 "Increasing wait time by %ld. "
3440 "New time %ld.\n", cs84xx_time,
3441 wtime);
4d4df193
HK
3442 }
3443 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
3444 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3445 "F/W Ready - OK.\n");
1da177e4 3446
e315cd28 3447 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
3448 &ha->login_timeout, &ha->r_a_tov);
3449
3450 rval = QLA_SUCCESS;
3451 break;
3452 }
3453
3454 rval = QLA_FUNCTION_FAILED;
3455
e315cd28 3456 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 3457 state[0] != FSTATE_READY) {
1da177e4 3458 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
3459 * other than Wait for Login.
3460 */
1da177e4 3461 if (time_after_eq(jiffies, mtime)) {
7c3df132 3462 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
3463 "Cable is unplugged...\n");
3464
e315cd28 3465 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
3466 break;
3467 }
3468 }
3469 } else {
3470 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 3471 if (time_after_eq(jiffies, mtime) ||
7190575f 3472 ha->flags.isp82xx_fw_hung)
1da177e4
LT
3473 break;
3474 }
3475
3476 if (time_after_eq(jiffies, wtime))
3477 break;
3478
3479 /* Delay for a while */
3480 msleep(500);
1da177e4
LT
3481 } while (1);
3482
7c3df132 3483 ql_dbg(ql_dbg_taskm, vha, 0x803a,
b5a340dd
JC
3484 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3485 state[1], state[2], state[3], state[4], state[5], jiffies);
1da177e4 3486
cfb0919c 3487 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
3488 ql_log(ql_log_warn, vha, 0x803b,
3489 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
3490 }
3491
3492 return (rval);
3493}
3494
3495/*
3496* qla2x00_configure_hba
3497* Setup adapter context.
3498*
3499* Input:
3500* ha = adapter state pointer.
3501*
3502* Returns:
3503* 0 = success
3504*
3505* Context:
3506* Kernel context.
3507*/
3508static int
e315cd28 3509qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
3510{
3511 int rval;
3512 uint16_t loop_id;
3513 uint16_t topo;
2c3dfe3f 3514 uint16_t sw_cap;
1da177e4
LT
3515 uint8_t al_pa;
3516 uint8_t area;
3517 uint8_t domain;
3518 char connect_type[22];
e315cd28 3519 struct qla_hw_data *ha = vha->hw;
61e1b269 3520 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
482c9dc7 3521 port_id_t id;
1da177e4
LT
3522
3523 /* Get host addresses. */
e315cd28 3524 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 3525 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 3526 if (rval != QLA_SUCCESS) {
e315cd28 3527 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 3528 IS_CNA_CAPABLE(ha) ||
33135aa2 3529 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
3530 ql_dbg(ql_dbg_disc, vha, 0x2008,
3531 "Loop is in a transition state.\n");
33135aa2 3532 } else {
7c3df132
SK
3533 ql_log(ql_log_warn, vha, 0x2009,
3534 "Unable to get host loop ID.\n");
61e1b269
JC
3535 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3536 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3537 ql_log(ql_log_warn, vha, 0x1151,
3538 "Doing link init.\n");
3539 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3540 return rval;
3541 }
e315cd28 3542 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 3543 }
1da177e4
LT
3544 return (rval);
3545 }
3546
3547 if (topo == 4) {
7c3df132
SK
3548 ql_log(ql_log_info, vha, 0x200a,
3549 "Cannot get topology - retrying.\n");
1da177e4
LT
3550 return (QLA_FUNCTION_FAILED);
3551 }
3552
e315cd28 3553 vha->loop_id = loop_id;
1da177e4
LT
3554
3555 /* initialize */
3556 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
3557 ha->operating_mode = LOOP;
2c3dfe3f 3558 ha->switch_cap = 0;
1da177e4
LT
3559
3560 switch (topo) {
3561 case 0:
7c3df132 3562 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
3563 ha->current_topology = ISP_CFG_NL;
3564 strcpy(connect_type, "(Loop)");
3565 break;
3566
3567 case 1:
7c3df132 3568 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 3569 ha->switch_cap = sw_cap;
1da177e4
LT
3570 ha->current_topology = ISP_CFG_FL;
3571 strcpy(connect_type, "(FL_Port)");
3572 break;
3573
3574 case 2:
7c3df132 3575 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
3576 ha->operating_mode = P2P;
3577 ha->current_topology = ISP_CFG_N;
3578 strcpy(connect_type, "(N_Port-to-N_Port)");
3579 break;
3580
3581 case 3:
7c3df132 3582 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 3583 ha->switch_cap = sw_cap;
1da177e4
LT
3584 ha->operating_mode = P2P;
3585 ha->current_topology = ISP_CFG_F;
3586 strcpy(connect_type, "(F_Port)");
3587 break;
3588
3589 default:
7c3df132
SK
3590 ql_dbg(ql_dbg_disc, vha, 0x200f,
3591 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
3592 ha->current_topology = ISP_CFG_NL;
3593 strcpy(connect_type, "(Loop)");
3594 break;
3595 }
3596
3597 /* Save Host port and loop ID. */
3598 /* byte order - Big Endian */
482c9dc7
QT
3599 id.b.domain = domain;
3600 id.b.area = area;
3601 id.b.al_pa = al_pa;
3602 id.b.rsvd_1 = 0;
3603 qlt_update_host_map(vha, id);
2d70c103 3604
e315cd28 3605 if (!vha->flags.init_done)
7c3df132
SK
3606 ql_log(ql_log_info, vha, 0x2010,
3607 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 3608 connect_type, vha->loop_id);
1da177e4 3609
1da177e4
LT
3610 return(rval);
3611}
3612
a9083016 3613inline void
e315cd28
AC
3614qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
3615 char *def)
9bb9fcf2
AV
3616{
3617 char *st, *en;
3618 uint16_t index;
e315cd28 3619 struct qla_hw_data *ha = vha->hw;
ab671149 3620 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 3621 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2
AV
3622
3623 if (memcmp(model, BINZERO, len) != 0) {
3624 strncpy(ha->model_number, model, len);
3625 st = en = ha->model_number;
3626 en += len - 1;
3627 while (en > st) {
3628 if (*en != 0x20 && *en != 0x00)
3629 break;
3630 *en-- = '\0';
3631 }
3632
3633 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
3634 if (use_tbl &&
3635 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 3636 index < QLA_MODEL_NAMES)
1ee27146
JC
3637 strncpy(ha->model_desc,
3638 qla2x00_model_name[index * 2 + 1],
3639 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
3640 } else {
3641 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
3642 if (use_tbl &&
3643 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
3644 index < QLA_MODEL_NAMES) {
3645 strcpy(ha->model_number,
3646 qla2x00_model_name[index * 2]);
1ee27146
JC
3647 strncpy(ha->model_desc,
3648 qla2x00_model_name[index * 2 + 1],
3649 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
3650 } else {
3651 strcpy(ha->model_number, def);
3652 }
3653 }
1ee27146 3654 if (IS_FWI2_CAPABLE(ha))
e315cd28 3655 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 3656 sizeof(ha->model_desc));
9bb9fcf2
AV
3657}
3658
4e08df3f
DM
3659/* On sparc systems, obtain port and node WWN from firmware
3660 * properties.
3661 */
e315cd28 3662static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
3663{
3664#ifdef CONFIG_SPARC
e315cd28 3665 struct qla_hw_data *ha = vha->hw;
4e08df3f 3666 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
3667 struct device_node *dp = pci_device_to_OF_node(pdev);
3668 const u8 *val;
4e08df3f
DM
3669 int len;
3670
3671 val = of_get_property(dp, "port-wwn", &len);
3672 if (val && len >= WWN_SIZE)
3673 memcpy(nv->port_name, val, WWN_SIZE);
3674
3675 val = of_get_property(dp, "node-wwn", &len);
3676 if (val && len >= WWN_SIZE)
3677 memcpy(nv->node_name, val, WWN_SIZE);
3678#endif
3679}
3680
1da177e4
LT
3681/*
3682* NVRAM configuration for ISP 2xxx
3683*
3684* Input:
3685* ha = adapter block pointer.
3686*
3687* Output:
3688* initialization control block in response_ring
3689* host adapters parameters in host adapter block
3690*
3691* Returns:
3692* 0 = success.
3693*/
abbd8870 3694int
e315cd28 3695qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 3696{
4e08df3f 3697 int rval;
0107109e
AV
3698 uint8_t chksum = 0;
3699 uint16_t cnt;
3700 uint8_t *dptr1, *dptr2;
e315cd28 3701 struct qla_hw_data *ha = vha->hw;
0107109e 3702 init_cb_t *icb = ha->init_cb;
281afe19
SJ
3703 nvram_t *nv = ha->nvram;
3704 uint8_t *ptr = ha->nvram;
3d71644c 3705 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 3706
4e08df3f
DM
3707 rval = QLA_SUCCESS;
3708
1da177e4 3709 /* Determine NVRAM starting address. */
0107109e 3710 ha->nvram_size = sizeof(nvram_t);
1da177e4
LT
3711 ha->nvram_base = 0;
3712 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
3713 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
3714 ha->nvram_base = 0x80;
3715
3716 /* Get NVRAM data and calculate checksum. */
e315cd28 3717 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
3718 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
3719 chksum += *ptr++;
1da177e4 3720
7c3df132
SK
3721 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
3722 "Contents of NVRAM.\n");
3723 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
3724 (uint8_t *)nv, ha->nvram_size);
1da177e4
LT
3725
3726 /* Bad NVRAM data, set defaults parameters. */
3727 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
3728 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
3729 /* Reset NVRAM data. */
7c3df132 3730 ql_log(ql_log_warn, vha, 0x0064,
9e336520 3731 "Inconsistent NVRAM "
7c3df132
SK
3732 "detected: checksum=0x%x id=%c version=0x%x.\n",
3733 chksum, nv->id[0], nv->nvram_version);
3734 ql_log(ql_log_warn, vha, 0x0065,
3735 "Falling back to "
3736 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
3737
3738 /*
3739 * Set default initialization control block.
3740 */
3741 memset(nv, 0, ha->nvram_size);
3742 nv->parameter_block_version = ICB_VERSION;
3743
3744 if (IS_QLA23XX(ha)) {
3745 nv->firmware_options[0] = BIT_2 | BIT_1;
3746 nv->firmware_options[1] = BIT_7 | BIT_5;
3747 nv->add_firmware_options[0] = BIT_5;
3748 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 3749 nv->frame_payload_size = 2048;
4e08df3f
DM
3750 nv->special_options[1] = BIT_7;
3751 } else if (IS_QLA2200(ha)) {
3752 nv->firmware_options[0] = BIT_2 | BIT_1;
3753 nv->firmware_options[1] = BIT_7 | BIT_5;
3754 nv->add_firmware_options[0] = BIT_5;
3755 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 3756 nv->frame_payload_size = 1024;
4e08df3f
DM
3757 } else if (IS_QLA2100(ha)) {
3758 nv->firmware_options[0] = BIT_3 | BIT_1;
3759 nv->firmware_options[1] = BIT_5;
98aee70d 3760 nv->frame_payload_size = 1024;
4e08df3f
DM
3761 }
3762
ad950360
BVA
3763 nv->max_iocb_allocation = cpu_to_le16(256);
3764 nv->execution_throttle = cpu_to_le16(16);
4e08df3f
DM
3765 nv->retry_count = 8;
3766 nv->retry_delay = 1;
3767
3768 nv->port_name[0] = 33;
3769 nv->port_name[3] = 224;
3770 nv->port_name[4] = 139;
3771
e315cd28 3772 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
3773
3774 nv->login_timeout = 4;
3775
3776 /*
3777 * Set default host adapter parameters
3778 */
3779 nv->host_p[1] = BIT_2;
3780 nv->reset_delay = 5;
3781 nv->port_down_retry_count = 8;
ad950360 3782 nv->max_luns_per_target = cpu_to_le16(8);
4e08df3f
DM
3783 nv->link_down_timeout = 60;
3784
3785 rval = 1;
1da177e4
LT
3786 }
3787
3788#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
3789 /*
3790 * The SN2 does not provide BIOS emulation which means you can't change
3791 * potentially bogus BIOS settings. Force the use of default settings
3792 * for link rate and frame size. Hope that the rest of the settings
3793 * are valid.
3794 */
3795 if (ia64_platform_is("sn2")) {
98aee70d 3796 nv->frame_payload_size = 2048;
1da177e4
LT
3797 if (IS_QLA23XX(ha))
3798 nv->special_options[1] = BIT_7;
3799 }
3800#endif
3801
3802 /* Reset Initialization control block */
0107109e 3803 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
3804
3805 /*
3806 * Setup driver NVRAM options.
3807 */
3808 nv->firmware_options[0] |= (BIT_6 | BIT_1);
3809 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
3810 nv->firmware_options[1] |= (BIT_5 | BIT_0);
3811 nv->firmware_options[1] &= ~BIT_4;
3812
3813 if (IS_QLA23XX(ha)) {
3814 nv->firmware_options[0] |= BIT_2;
3815 nv->firmware_options[0] &= ~BIT_3;
2d70c103 3816 nv->special_options[0] &= ~BIT_6;
0107109e 3817 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
3818
3819 if (IS_QLA2300(ha)) {
3820 if (ha->fb_rev == FPM_2310) {
3821 strcpy(ha->model_number, "QLA2310");
3822 } else {
3823 strcpy(ha->model_number, "QLA2300");
3824 }
3825 } else {
e315cd28 3826 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 3827 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
3828 }
3829 } else if (IS_QLA2200(ha)) {
3830 nv->firmware_options[0] |= BIT_2;
3831 /*
3832 * 'Point-to-point preferred, else loop' is not a safe
3833 * connection mode setting.
3834 */
3835 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
3836 (BIT_5 | BIT_4)) {
3837 /* Force 'loop preferred, else point-to-point'. */
3838 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
3839 nv->add_firmware_options[0] |= BIT_5;
3840 }
3841 strcpy(ha->model_number, "QLA22xx");
3842 } else /*if (IS_QLA2100(ha))*/ {
3843 strcpy(ha->model_number, "QLA2100");
3844 }
3845
3846 /*
3847 * Copy over NVRAM RISC parameter block to initialization control block.
3848 */
3849 dptr1 = (uint8_t *)icb;
3850 dptr2 = (uint8_t *)&nv->parameter_block_version;
3851 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
3852 while (cnt--)
3853 *dptr1++ = *dptr2++;
3854
3855 /* Copy 2nd half. */
3856 dptr1 = (uint8_t *)icb->add_firmware_options;
3857 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
3858 while (cnt--)
3859 *dptr1++ = *dptr2++;
3860
5341e868
AV
3861 /* Use alternate WWN? */
3862 if (nv->host_p[1] & BIT_7) {
3863 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
3864 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
3865 }
3866
1da177e4
LT
3867 /* Prepare nodename */
3868 if ((icb->firmware_options[1] & BIT_6) == 0) {
3869 /*
3870 * Firmware will apply the following mask if the nodename was
3871 * not provided.
3872 */
3873 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
3874 icb->node_name[0] &= 0xF0;
3875 }
3876
3877 /*
3878 * Set host adapter parameters.
3879 */
3ce8866c
SK
3880
3881 /*
3882 * BIT_7 in the host-parameters section allows for modification to
3883 * internal driver logging.
3884 */
0181944f 3885 if (nv->host_p[0] & BIT_7)
cfb0919c 3886 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
3887 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
3888 /* Always load RISC code on non ISP2[12]00 chips. */
3889 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
3890 ha->flags.disable_risc_code_load = 0;
3891 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
3892 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
3893 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 3894 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 3895 ha->flags.disable_serdes = 0;
1da177e4
LT
3896
3897 ha->operating_mode =
3898 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
3899
3900 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
3901 sizeof(ha->fw_seriallink_options));
3902
3903 /* save HBA serial number */
3904 ha->serial0 = icb->port_name[5];
3905 ha->serial1 = icb->port_name[6];
3906 ha->serial2 = icb->port_name[7];
e315cd28
AC
3907 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3908 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4 3909
ad950360 3910 icb->execution_throttle = cpu_to_le16(0xFFFF);
1da177e4
LT
3911
3912 ha->retry_count = nv->retry_count;
3913
3914 /* Set minimum login_timeout to 4 seconds. */
5b91490e 3915 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
3916 nv->login_timeout = ql2xlogintimeout;
3917 if (nv->login_timeout < 4)
3918 nv->login_timeout = 4;
3919 ha->login_timeout = nv->login_timeout;
1da177e4 3920
00a537b8
AV
3921 /* Set minimum RATOV to 100 tenths of a second. */
3922 ha->r_a_tov = 100;
1da177e4 3923
1da177e4
LT
3924 ha->loop_reset_delay = nv->reset_delay;
3925
1da177e4
LT
3926 /* Link Down Timeout = 0:
3927 *
3928 * When Port Down timer expires we will start returning
3929 * I/O's to OS with "DID_NO_CONNECT".
3930 *
3931 * Link Down Timeout != 0:
3932 *
3933 * The driver waits for the link to come up after link down
3934 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 3935 */
1da177e4
LT
3936 if (nv->link_down_timeout == 0) {
3937 ha->loop_down_abort_time =
354d6b21 3938 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
3939 } else {
3940 ha->link_down_timeout = nv->link_down_timeout;
3941 ha->loop_down_abort_time =
3942 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 3943 }
1da177e4 3944
1da177e4
LT
3945 /*
3946 * Need enough time to try and get the port back.
3947 */
3948 ha->port_down_retry_count = nv->port_down_retry_count;
3949 if (qlport_down_retry)
3950 ha->port_down_retry_count = qlport_down_retry;
3951 /* Set login_retry_count */
3952 ha->login_retry_count = nv->retry_count;
3953 if (ha->port_down_retry_count == nv->port_down_retry_count &&
3954 ha->port_down_retry_count > 3)
3955 ha->login_retry_count = ha->port_down_retry_count;
3956 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
3957 ha->login_retry_count = ha->port_down_retry_count;
3958 if (ql2xloginretrycount)
3959 ha->login_retry_count = ql2xloginretrycount;
3960
ad950360 3961 icb->lun_enables = cpu_to_le16(0);
1da177e4
LT
3962 icb->command_resource_count = 0;
3963 icb->immediate_notify_resource_count = 0;
ad950360 3964 icb->timeout = cpu_to_le16(0);
1da177e4
LT
3965
3966 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3967 /* Enable RIO */
3968 icb->firmware_options[0] &= ~BIT_3;
3969 icb->add_firmware_options[0] &=
3970 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
3971 icb->add_firmware_options[0] |= BIT_2;
3972 icb->response_accumulation_timer = 3;
3973 icb->interrupt_delay_timer = 5;
3974
e315cd28 3975 vha->flags.process_response_queue = 1;
1da177e4 3976 } else {
4fdfefe5 3977 /* Enable ZIO. */
e315cd28 3978 if (!vha->flags.init_done) {
4fdfefe5
AV
3979 ha->zio_mode = icb->add_firmware_options[0] &
3980 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3981 ha->zio_timer = icb->interrupt_delay_timer ?
3982 icb->interrupt_delay_timer: 2;
3983 }
1da177e4
LT
3984 icb->add_firmware_options[0] &=
3985 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 3986 vha->flags.process_response_queue = 0;
4fdfefe5 3987 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 3988 ha->zio_mode = QLA_ZIO_MODE_6;
3989
7c3df132 3990 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
3991 "ZIO mode %d enabled; timer delay (%d us).\n",
3992 ha->zio_mode, ha->zio_timer * 100);
1da177e4 3993
4fdfefe5
AV
3994 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
3995 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 3996 vha->flags.process_response_queue = 1;
1da177e4
LT
3997 }
3998 }
3999
4e08df3f 4000 if (rval) {
7c3df132
SK
4001 ql_log(ql_log_warn, vha, 0x0069,
4002 "NVRAM configuration failed.\n");
4e08df3f
DM
4003 }
4004 return (rval);
1da177e4
LT
4005}
4006
19a7b4ae
JSEC
4007static void
4008qla2x00_rport_del(void *data)
4009{
4010 fc_port_t *fcport = data;
d97994dc 4011 struct fc_rport *rport;
044d78e1 4012 unsigned long flags;
d97994dc 4013
044d78e1 4014 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
ac280b67 4015 rport = fcport->drport ? fcport->drport: fcport->rport;
d97994dc 4016 fcport->drport = NULL;
044d78e1 4017 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
726b8548 4018 if (rport) {
83548fe2
QT
4019 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4020 "%s %8phN. rport %p roles %x\n",
4021 __func__, fcport->port_name, rport,
4022 rport->roles);
726b8548 4023
d97994dc 4024 fc_remote_port_delete(rport);
726b8548 4025 }
19a7b4ae
JSEC
4026}
4027
1da177e4
LT
4028/**
4029 * qla2x00_alloc_fcport() - Allocate a generic fcport.
4030 * @ha: HA context
4031 * @flags: allocation flags
4032 *
4033 * Returns a pointer to the allocated fcport, or NULL, if none available.
4034 */
9a069e19 4035fc_port_t *
e315cd28 4036qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
4037{
4038 fc_port_t *fcport;
4039
bbfbbbc1
MK
4040 fcport = kzalloc(sizeof(fc_port_t), flags);
4041 if (!fcport)
4042 return NULL;
1da177e4
LT
4043
4044 /* Setup fcport template structure. */
e315cd28 4045 fcport->vha = vha;
1da177e4
LT
4046 fcport->port_type = FCT_UNKNOWN;
4047 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 4048 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 4049 fcport->supported_classes = FC_COS_UNSPECIFIED;
1da177e4 4050
726b8548
QT
4051 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4052 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
6cb3216a 4053 flags);
726b8548
QT
4054 fcport->disc_state = DSC_DELETED;
4055 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4056 fcport->deleted = QLA_SESS_DELETED;
4057 fcport->login_retry = vha->hw->login_retry_count;
4058 fcport->login_retry = 5;
4059 fcport->logout_on_delete = 1;
4060
4061 if (!fcport->ct_desc.ct_sns) {
83548fe2 4062 ql_log(ql_log_warn, vha, 0xd049,
726b8548
QT
4063 "Failed to allocate ct_sns request.\n");
4064 kfree(fcport);
4065 fcport = NULL;
4066 }
4067 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4068 INIT_LIST_HEAD(&fcport->gnl_entry);
4069 INIT_LIST_HEAD(&fcport->list);
4070
bbfbbbc1 4071 return fcport;
1da177e4
LT
4072}
4073
726b8548
QT
4074void
4075qla2x00_free_fcport(fc_port_t *fcport)
4076{
4077 if (fcport->ct_desc.ct_sns) {
4078 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4079 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4080 fcport->ct_desc.ct_sns_dma);
4081
4082 fcport->ct_desc.ct_sns = NULL;
4083 }
4084 kfree(fcport);
4085}
4086
1da177e4
LT
4087/*
4088 * qla2x00_configure_loop
4089 * Updates Fibre Channel Device Database with what is actually on loop.
4090 *
4091 * Input:
4092 * ha = adapter block pointer.
4093 *
4094 * Returns:
4095 * 0 = success.
4096 * 1 = error.
4097 * 2 = database was full and device was not configured.
4098 */
4099static int
e315cd28 4100qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
4101{
4102 int rval;
4103 unsigned long flags, save_flags;
e315cd28 4104 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
4105 rval = QLA_SUCCESS;
4106
4107 /* Get Initiator ID */
e315cd28
AC
4108 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4109 rval = qla2x00_configure_hba(vha);
1da177e4 4110 if (rval != QLA_SUCCESS) {
7c3df132
SK
4111 ql_dbg(ql_dbg_disc, vha, 0x2013,
4112 "Unable to configure HBA.\n");
1da177e4
LT
4113 return (rval);
4114 }
4115 }
4116
e315cd28 4117 save_flags = flags = vha->dpc_flags;
7c3df132
SK
4118 ql_dbg(ql_dbg_disc, vha, 0x2014,
4119 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
4120
4121 /*
4122 * If we have both an RSCN and PORT UPDATE pending then handle them
4123 * both at the same time.
4124 */
e315cd28
AC
4125 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4126 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 4127
3064ff39
MH
4128 qla2x00_get_data_rate(vha);
4129
1da177e4
LT
4130 /* Determine what we need to do */
4131 if (ha->current_topology == ISP_CFG_FL &&
4132 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4133
1da177e4
LT
4134 set_bit(RSCN_UPDATE, &flags);
4135
4136 } else if (ha->current_topology == ISP_CFG_F &&
4137 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4138
1da177e4
LT
4139 set_bit(RSCN_UPDATE, &flags);
4140 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
4141
4142 } else if (ha->current_topology == ISP_CFG_N) {
4143 clear_bit(RSCN_UPDATE, &flags);
41dc529a
QT
4144 } else if (ha->current_topology == ISP_CFG_NL) {
4145 clear_bit(RSCN_UPDATE, &flags);
4146 set_bit(LOCAL_LOOP_UPDATE, &flags);
e315cd28 4147 } else if (!vha->flags.online ||
1da177e4 4148 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1da177e4
LT
4149 set_bit(RSCN_UPDATE, &flags);
4150 set_bit(LOCAL_LOOP_UPDATE, &flags);
4151 }
4152
4153 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
4154 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4155 ql_dbg(ql_dbg_disc, vha, 0x2015,
4156 "Loop resync needed, failing.\n");
1da177e4 4157 rval = QLA_FUNCTION_FAILED;
642ef983 4158 } else
e315cd28 4159 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
4160 }
4161
4162 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132 4163 if (LOOP_TRANSITION(vha)) {
83548fe2 4164 ql_dbg(ql_dbg_disc, vha, 0x2099,
7c3df132 4165 "Needs RSCN update and loop transition.\n");
1da177e4 4166 rval = QLA_FUNCTION_FAILED;
7c3df132 4167 }
e315cd28
AC
4168 else
4169 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
4170 }
4171
4172 if (rval == QLA_SUCCESS) {
e315cd28
AC
4173 if (atomic_read(&vha->loop_down_timer) ||
4174 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
4175 rval = QLA_FUNCTION_FAILED;
4176 } else {
e315cd28 4177 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
4178 ql_dbg(ql_dbg_disc, vha, 0x2069,
4179 "LOOP READY.\n");
ec7193e2 4180 ha->flags.fw_init_done = 1;
3bb67df5
DKU
4181
4182 /*
4183 * Process any ATIO queue entries that came in
4184 * while we weren't online.
4185 */
ead03855
QT
4186 if (qla_tgt_mode_enabled(vha) ||
4187 qla_dual_mode_enabled(vha)) {
3bb67df5
DKU
4188 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4189 spin_lock_irqsave(&ha->tgt.atio_lock,
4190 flags);
4191 qlt_24xx_process_atio_queue(vha, 0);
4192 spin_unlock_irqrestore(
4193 &ha->tgt.atio_lock, flags);
4194 } else {
4195 spin_lock_irqsave(&ha->hardware_lock,
4196 flags);
4197 qlt_24xx_process_atio_queue(vha, 1);
4198 spin_unlock_irqrestore(
4199 &ha->hardware_lock, flags);
4200 }
4201 }
1da177e4
LT
4202 }
4203 }
4204
4205 if (rval) {
7c3df132
SK
4206 ql_dbg(ql_dbg_disc, vha, 0x206a,
4207 "%s *** FAILED ***.\n", __func__);
1da177e4 4208 } else {
7c3df132
SK
4209 ql_dbg(ql_dbg_disc, vha, 0x206b,
4210 "%s: exiting normally.\n", __func__);
1da177e4
LT
4211 }
4212
cc3ef7bc 4213 /* Restore state if a resync event occurred during processing */
e315cd28 4214 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 4215 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 4216 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 4217 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 4218 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 4219 }
1da177e4
LT
4220 }
4221
4222 return (rval);
4223}
4224
4225
4226
4227/*
4228 * qla2x00_configure_local_loop
4229 * Updates Fibre Channel Device Database with local loop devices.
4230 *
4231 * Input:
4232 * ha = adapter block pointer.
4233 *
4234 * Returns:
4235 * 0 = success.
4236 */
4237static int
e315cd28 4238qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
4239{
4240 int rval, rval2;
4241 int found_devs;
4242 int found;
4243 fc_port_t *fcport, *new_fcport;
4244
4245 uint16_t index;
4246 uint16_t entries;
4247 char *id_iter;
4248 uint16_t loop_id;
4249 uint8_t domain, area, al_pa;
e315cd28 4250 struct qla_hw_data *ha = vha->hw;
41dc529a 4251 unsigned long flags;
1da177e4
LT
4252
4253 found_devs = 0;
4254 new_fcport = NULL;
642ef983 4255 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 4256
1da177e4 4257 /* Get list of logged in devices. */
642ef983 4258 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 4259 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
4260 &entries);
4261 if (rval != QLA_SUCCESS)
4262 goto cleanup_allocation;
4263
83548fe2 4264 ql_dbg(ql_dbg_disc, vha, 0x2011,
7c3df132
SK
4265 "Entries in ID list (%d).\n", entries);
4266 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4267 (uint8_t *)ha->gid_list,
4268 entries * sizeof(struct gid_list_info));
1da177e4
LT
4269
4270 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4271 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4272 if (new_fcport == NULL) {
83548fe2 4273 ql_log(ql_log_warn, vha, 0x2012,
7c3df132 4274 "Memory allocation failed for fcport.\n");
1da177e4
LT
4275 rval = QLA_MEMORY_ALLOC_FAILED;
4276 goto cleanup_allocation;
4277 }
4278 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4279
4280 /*
4281 * Mark local devices that were present with FCF_DEVICE_LOST for now.
4282 */
e315cd28 4283 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4284 if (atomic_read(&fcport->state) == FCS_ONLINE &&
4285 fcport->port_type != FCT_BROADCAST &&
4286 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4287
83548fe2 4288 ql_dbg(ql_dbg_disc, vha, 0x2096,
7c3df132
SK
4289 "Marking port lost loop_id=0x%04x.\n",
4290 fcport->loop_id);
1da177e4 4291
41dc529a 4292 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1da177e4
LT
4293 }
4294 }
4295
4296 /* Add devices to port list. */
4297 id_iter = (char *)ha->gid_list;
4298 for (index = 0; index < entries; index++) {
4299 domain = ((struct gid_list_info *)id_iter)->domain;
4300 area = ((struct gid_list_info *)id_iter)->area;
4301 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 4302 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
4303 loop_id = (uint16_t)
4304 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 4305 else
1da177e4
LT
4306 loop_id = le16_to_cpu(
4307 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 4308 id_iter += ha->gid_list_info_size;
1da177e4
LT
4309
4310 /* Bypass reserved domain fields. */
4311 if ((domain & 0xf0) == 0xf0)
4312 continue;
4313
4314 /* Bypass if not same domain and area of adapter. */
f7d289f6 4315 if (area && domain &&
e315cd28 4316 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
1da177e4
LT
4317 continue;
4318
4319 /* Bypass invalid local loop ID. */
4320 if (loop_id > LAST_LOCAL_LOOP_ID)
4321 continue;
4322
41dc529a 4323 memset(new_fcport->port_name, 0, WWN_SIZE);
370d550e 4324
1da177e4
LT
4325 /* Fill in member data. */
4326 new_fcport->d_id.b.domain = domain;
4327 new_fcport->d_id.b.area = area;
4328 new_fcport->d_id.b.al_pa = al_pa;
4329 new_fcport->loop_id = loop_id;
41dc529a 4330
e315cd28 4331 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 4332 if (rval2 != QLA_SUCCESS) {
83548fe2 4333 ql_dbg(ql_dbg_disc, vha, 0x2097,
7c3df132
SK
4334 "Failed to retrieve fcport information "
4335 "-- get_port_database=%x, loop_id=0x%04x.\n",
4336 rval2, new_fcport->loop_id);
83548fe2 4337 ql_dbg(ql_dbg_disc, vha, 0x2105,
7c3df132 4338 "Scheduling resync.\n");
e315cd28 4339 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
4340 continue;
4341 }
4342
41dc529a 4343 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
4344 /* Check for matching device in port list. */
4345 found = 0;
4346 fcport = NULL;
e315cd28 4347 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4348 if (memcmp(new_fcport->port_name, fcport->port_name,
4349 WWN_SIZE))
4350 continue;
4351
ddb9b126 4352 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
4353 fcport->loop_id = new_fcport->loop_id;
4354 fcport->port_type = new_fcport->port_type;
4355 fcport->d_id.b24 = new_fcport->d_id.b24;
4356 memcpy(fcport->node_name, new_fcport->node_name,
4357 WWN_SIZE);
4358
41dc529a
QT
4359 if (!fcport->login_succ) {
4360 vha->fcport_count++;
4361 fcport->login_succ = 1;
4362 fcport->disc_state = DSC_LOGIN_COMPLETE;
4363 }
4364
1da177e4
LT
4365 found++;
4366 break;
4367 }
4368
4369 if (!found) {
4370 /* New device, add to fcports list. */
e315cd28 4371 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
4372
4373 /* Allocate a new replacement fcport. */
4374 fcport = new_fcport;
41dc529a
QT
4375 if (!fcport->login_succ) {
4376 vha->fcport_count++;
4377 fcport->login_succ = 1;
4378 fcport->disc_state = DSC_LOGIN_COMPLETE;
4379 }
4380
4381 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4382
e315cd28 4383 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
41dc529a 4384
1da177e4 4385 if (new_fcport == NULL) {
83548fe2 4386 ql_log(ql_log_warn, vha, 0xd031,
7c3df132 4387 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4388 rval = QLA_MEMORY_ALLOC_FAILED;
4389 goto cleanup_allocation;
4390 }
41dc529a 4391 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
4392 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4393 }
4394
41dc529a
QT
4395 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4396
d8b45213 4397 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 4398 fcport->fp_speed = ha->link_data_rate;
d8b45213 4399
e315cd28 4400 qla2x00_update_fcport(vha, fcport);
1da177e4
LT
4401
4402 found_devs++;
4403 }
4404
4405cleanup_allocation:
c9475cb0 4406 kfree(new_fcport);
1da177e4
LT
4407
4408 if (rval != QLA_SUCCESS) {
83548fe2 4409 ql_dbg(ql_dbg_disc, vha, 0x2098,
7c3df132 4410 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
4411 }
4412
1da177e4
LT
4413 return (rval);
4414}
4415
d8b45213 4416static void
e315cd28 4417qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 4418{
d8b45213 4419 int rval;
93f2bd67 4420 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 4421 struct qla_hw_data *ha = vha->hw;
d8b45213 4422
c76f2c01 4423 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
4424 return;
4425
c9afb9a2
GM
4426 if (atomic_read(&fcport->state) != FCS_ONLINE)
4427 return;
4428
39bd9622
AV
4429 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
4430 fcport->fp_speed > ha->link_data_rate)
d8b45213
AV
4431 return;
4432
e315cd28 4433 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 4434 mb);
d8b45213 4435 if (rval != QLA_SUCCESS) {
7c3df132 4436 ql_dbg(ql_dbg_disc, vha, 0x2004,
7b833558
OK
4437 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4438 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
d8b45213 4439 } else {
7c3df132 4440 ql_dbg(ql_dbg_disc, vha, 0x2005,
7b833558 4441 "iIDMA adjusted to %s GB/s on %8phN.\n",
d0297c9a 4442 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
7b833558 4443 fcport->port_name);
d8b45213
AV
4444 }
4445}
4446
726b8548 4447/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
23be331d 4448static void
e315cd28 4449qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118 4450{
4451 struct fc_rport_identifiers rport_ids;
bdf79621 4452 struct fc_rport *rport;
044d78e1 4453 unsigned long flags;
8482e118 4454
f8b02a85
AV
4455 rport_ids.node_name = wwn_to_u64(fcport->node_name);
4456 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118 4457 rport_ids.port_id = fcport->d_id.b.domain << 16 |
4458 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 4459 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 4460 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 4461 if (!rport) {
7c3df132
SK
4462 ql_log(ql_log_warn, vha, 0x2006,
4463 "Unable to allocate fc remote port.\n");
77d74143
AV
4464 return;
4465 }
2d70c103 4466
044d78e1 4467 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 4468 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 4469 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 4470
ad3e0eda 4471 rport->supported_classes = fcport->supported_classes;
77d74143 4472
8482e118 4473 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4474 if (fcport->port_type == FCT_INITIATOR)
4475 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4476 if (fcport->port_type == FCT_TARGET)
4477 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
726b8548 4478
83548fe2
QT
4479 ql_dbg(ql_dbg_disc, vha, 0x20ee,
4480 "%s %8phN. rport %p is %s mode\n",
4481 __func__, fcport->port_name, rport,
4482 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
726b8548 4483
77d74143 4484 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
4485}
4486
23be331d
AB
4487/*
4488 * qla2x00_update_fcport
4489 * Updates device on list.
4490 *
4491 * Input:
4492 * ha = adapter block pointer.
4493 * fcport = port structure pointer.
4494 *
4495 * Return:
4496 * 0 - Success
4497 * BIT_0 - error
4498 *
4499 * Context:
4500 * Kernel context.
4501 */
4502void
e315cd28 4503qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 4504{
e315cd28 4505 fcport->vha = vha;
8ae6d9c7 4506
726b8548
QT
4507 if (IS_SW_RESV_ADDR(fcport->d_id))
4508 return;
4509
83548fe2 4510 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
726b8548
QT
4511 __func__, fcport->port_name);
4512
8ae6d9c7
GM
4513 if (IS_QLAFX00(vha->hw)) {
4514 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
d20ed91b 4515 goto reg_port;
8ae6d9c7 4516 }
23be331d 4517 fcport->login_retry = 0;
5ff1d584 4518 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
726b8548
QT
4519 fcport->disc_state = DSC_LOGIN_COMPLETE;
4520 fcport->deleted = 0;
4521 fcport->logout_on_delete = 1;
23be331d 4522
1f93da52 4523 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e315cd28 4524 qla2x00_iidma_fcport(vha, fcport);
21090cbe 4525 qla24xx_update_fcport_fcp_prio(vha, fcport);
d20ed91b
AP
4526
4527reg_port:
726b8548
QT
4528 switch (vha->host->active_mode) {
4529 case MODE_INITIATOR:
d20ed91b 4530 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
4531 break;
4532 case MODE_TARGET:
4533 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4534 !vha->vha_tgt.qla_tgt->tgt_stopped)
4535 qlt_fc_port_added(vha, fcport);
4536 break;
4537 case MODE_DUAL:
d20ed91b 4538 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
4539 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4540 !vha->vha_tgt.qla_tgt->tgt_stopped)
4541 qlt_fc_port_added(vha, fcport);
4542 break;
4543 default:
4544 break;
d20ed91b 4545 }
23be331d
AB
4546}
4547
1da177e4
LT
4548/*
4549 * qla2x00_configure_fabric
4550 * Setup SNS devices with loop ID's.
4551 *
4552 * Input:
4553 * ha = adapter block pointer.
4554 *
4555 * Returns:
4556 * 0 = success.
4557 * BIT_0 = error
4558 */
4559static int
e315cd28 4560qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 4561{
b3b02e6e 4562 int rval;
726b8548 4563 fc_port_t *fcport;
1da177e4 4564 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 4565 uint16_t loop_id;
1da177e4 4566 LIST_HEAD(new_fcports);
e315cd28 4567 struct qla_hw_data *ha = vha->hw;
df673274 4568 int discovery_gen;
1da177e4
LT
4569
4570 /* If FL port exists, then SNS is present */
e428924c 4571 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
4572 loop_id = NPH_F_PORT;
4573 else
4574 loop_id = SNS_FL_PORT;
e315cd28 4575 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 4576 if (rval != QLA_SUCCESS) {
83548fe2 4577 ql_dbg(ql_dbg_disc, vha, 0x20a0,
7c3df132 4578 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 4579
e315cd28 4580 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
4581 return (QLA_SUCCESS);
4582 }
e315cd28 4583 vha->device_flags |= SWITCH_FOUND;
1da177e4 4584
41dc529a
QT
4585
4586 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
4587 rval = qla2x00_send_change_request(vha, 0x3, 0);
4588 if (rval != QLA_SUCCESS)
4589 ql_log(ql_log_warn, vha, 0x121,
4590 "Failed to enable receiving of RSCN requests: 0x%x.\n",
4591 rval);
4592 }
4593
4594
1da177e4 4595 do {
726b8548
QT
4596 qla2x00_mgmt_svr_login(vha);
4597
cca5335c
AV
4598 /* FDMI support. */
4599 if (ql2xfdmienable &&
e315cd28
AC
4600 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
4601 qla2x00_fdmi_register(vha);
cca5335c 4602
1da177e4 4603 /* Ensure we are logged into the SNS. */
e428924c 4604 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
4605 loop_id = NPH_SNS;
4606 else
4607 loop_id = SIMPLE_NAME_SERVER;
0b91d116
CD
4608 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
4609 0xfc, mb, BIT_1|BIT_0);
4610 if (rval != QLA_SUCCESS) {
4611 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 4612 return rval;
0b91d116 4613 }
1da177e4 4614 if (mb[0] != MBS_COMMAND_COMPLETE) {
83548fe2 4615 ql_dbg(ql_dbg_disc, vha, 0x20a1,
7c3df132
SK
4616 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
4617 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
4618 mb[2], mb[6], mb[7]);
1da177e4
LT
4619 return (QLA_SUCCESS);
4620 }
4621
e315cd28
AC
4622 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
4623 if (qla2x00_rft_id(vha)) {
1da177e4 4624 /* EMPTY */
83548fe2 4625 ql_dbg(ql_dbg_disc, vha, 0x20a2,
7c3df132 4626 "Register FC-4 TYPE failed.\n");
b98ae0d7
QT
4627 if (test_bit(LOOP_RESYNC_NEEDED,
4628 &vha->dpc_flags))
4629 break;
1da177e4 4630 }
e315cd28 4631 if (qla2x00_rff_id(vha)) {
1da177e4 4632 /* EMPTY */
83548fe2 4633 ql_dbg(ql_dbg_disc, vha, 0x209a,
7c3df132 4634 "Register FC-4 Features failed.\n");
b98ae0d7
QT
4635 if (test_bit(LOOP_RESYNC_NEEDED,
4636 &vha->dpc_flags))
4637 break;
1da177e4 4638 }
e315cd28 4639 if (qla2x00_rnn_id(vha)) {
1da177e4 4640 /* EMPTY */
83548fe2 4641 ql_dbg(ql_dbg_disc, vha, 0x2104,
7c3df132 4642 "Register Node Name failed.\n");
b98ae0d7
QT
4643 if (test_bit(LOOP_RESYNC_NEEDED,
4644 &vha->dpc_flags))
4645 break;
e315cd28 4646 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 4647 /* EMPTY */
83548fe2 4648 ql_dbg(ql_dbg_disc, vha, 0x209b,
7c3df132 4649 "Register Symobilic Node Name failed.\n");
b98ae0d7
QT
4650 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4651 break;
1da177e4
LT
4652 }
4653 }
4654
827210ba
JC
4655 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4656 fcport->scan_state = QLA_FCPORT_SCAN;
4657 }
4658
df673274
AP
4659 /* Mark the time right before querying FW for connected ports.
4660 * This process is long, asynchronous and by the time it's done,
4661 * collected information might not be accurate anymore. E.g.
4662 * disconnected port might have re-connected and a brand new
4663 * session has been created. In this case session's generation
4664 * will be newer than discovery_gen. */
4665 qlt_do_generation_tick(vha, &discovery_gen);
4666
726b8548 4667 rval = qla2x00_find_all_fabric_devs(vha);
1da177e4
LT
4668 if (rval != QLA_SUCCESS)
4669 break;
1da177e4
LT
4670 } while (0);
4671
726b8548 4672 if (rval)
7c3df132
SK
4673 ql_dbg(ql_dbg_disc, vha, 0x2068,
4674 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
4675
4676 return (rval);
4677}
4678
1da177e4
LT
4679/*
4680 * qla2x00_find_all_fabric_devs
4681 *
4682 * Input:
4683 * ha = adapter block pointer.
4684 * dev = database device entry pointer.
4685 *
4686 * Returns:
4687 * 0 = success.
4688 *
4689 * Context:
4690 * Kernel context.
4691 */
4692static int
726b8548 4693qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1da177e4
LT
4694{
4695 int rval;
4696 uint16_t loop_id;
726b8548 4697 fc_port_t *fcport, *new_fcport;
1da177e4
LT
4698 int found;
4699
4700 sw_info_t *swl;
4701 int swl_idx;
4702 int first_dev, last_dev;
1516ef44 4703 port_id_t wrap = {}, nxt_d_id;
e315cd28 4704 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 4705 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
726b8548 4706 unsigned long flags;
1da177e4
LT
4707
4708 rval = QLA_SUCCESS;
4709
4710 /* Try GID_PT to get device list, else GAN. */
7a67735b 4711 if (!ha->swl)
642ef983 4712 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
4713 GFP_KERNEL);
4714 swl = ha->swl;
bbfbbbc1 4715 if (!swl) {
1da177e4 4716 /*EMPTY*/
83548fe2 4717 ql_dbg(ql_dbg_disc, vha, 0x209c,
7c3df132 4718 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 4719 } else {
642ef983 4720 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 4721 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 4722 swl = NULL;
b98ae0d7
QT
4723 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4724 return rval;
e315cd28 4725 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 4726 swl = NULL;
b98ae0d7
QT
4727 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4728 return rval;
e315cd28 4729 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 4730 swl = NULL;
b98ae0d7
QT
4731 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4732 return rval;
726b8548
QT
4733 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
4734 swl = NULL;
b98ae0d7
QT
4735 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4736 return rval;
1da177e4 4737 }
e8c72ba5
CD
4738
4739 /* If other queries succeeded probe for FC-4 type */
b98ae0d7 4740 if (swl) {
e8c72ba5 4741 qla2x00_gff_id(vha, swl);
b98ae0d7
QT
4742 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4743 return rval;
4744 }
1da177e4
LT
4745 }
4746 swl_idx = 0;
4747
4748 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4749 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4750 if (new_fcport == NULL) {
83548fe2 4751 ql_log(ql_log_warn, vha, 0x209d,
7c3df132 4752 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4753 return (QLA_MEMORY_ALLOC_FAILED);
4754 }
4755 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
4756 /* Set start port ID scan at adapter ID. */
4757 first_dev = 1;
4758 last_dev = 0;
4759
4760 /* Starting free loop ID. */
e315cd28
AC
4761 loop_id = ha->min_external_loopid;
4762 for (; loop_id <= ha->max_loop_id; loop_id++) {
4763 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
4764 continue;
4765
3a6478df
GM
4766 if (ha->current_topology == ISP_CFG_FL &&
4767 (atomic_read(&vha->loop_down_timer) ||
4768 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
4769 atomic_set(&vha->loop_down_timer, 0);
4770 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4771 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 4772 break;
bb2d52b2 4773 }
1da177e4
LT
4774
4775 if (swl != NULL) {
4776 if (last_dev) {
4777 wrap.b24 = new_fcport->d_id.b24;
4778 } else {
4779 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
4780 memcpy(new_fcport->node_name,
4781 swl[swl_idx].node_name, WWN_SIZE);
4782 memcpy(new_fcport->port_name,
4783 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
4784 memcpy(new_fcport->fabric_port_name,
4785 swl[swl_idx].fabric_port_name, WWN_SIZE);
4786 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 4787 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4 4788
a5d42f4c
DG
4789 new_fcport->nvme_flag = 0;
4790 if (vha->flags.nvme_enabled &&
4791 swl[swl_idx].fc4f_nvme) {
4792 new_fcport->fc4f_nvme =
4793 swl[swl_idx].fc4f_nvme;
4794 ql_log(ql_log_info, vha, 0x2131,
4795 "FOUND: NVME port %8phC as FC Type 28h\n",
4796 new_fcport->port_name);
4797 }
4798
1da177e4
LT
4799 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
4800 last_dev = 1;
4801 }
4802 swl_idx++;
4803 }
4804 } else {
4805 /* Send GA_NXT to the switch */
e315cd28 4806 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 4807 if (rval != QLA_SUCCESS) {
83548fe2 4808 ql_log(ql_log_warn, vha, 0x209e,
7c3df132
SK
4809 "SNS scan failed -- assuming "
4810 "zero-entry result.\n");
1da177e4
LT
4811 rval = QLA_SUCCESS;
4812 break;
4813 }
4814 }
4815
4816 /* If wrap on switch device list, exit. */
4817 if (first_dev) {
4818 wrap.b24 = new_fcport->d_id.b24;
4819 first_dev = 0;
4820 } else if (new_fcport->d_id.b24 == wrap.b24) {
83548fe2 4821 ql_dbg(ql_dbg_disc, vha, 0x209f,
7c3df132
SK
4822 "Device wrap (%02x%02x%02x).\n",
4823 new_fcport->d_id.b.domain,
4824 new_fcport->d_id.b.area,
4825 new_fcport->d_id.b.al_pa);
1da177e4
LT
4826 break;
4827 }
4828
2c3dfe3f 4829 /* Bypass if same physical adapter. */
e315cd28 4830 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
4831 continue;
4832
2c3dfe3f 4833 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
4834 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
4835 continue;
2c3dfe3f 4836
f7d289f6
AV
4837 /* Bypass if same domain and area of adapter. */
4838 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 4839 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
4840 ISP_CFG_FL)
4841 continue;
4842
1da177e4
LT
4843 /* Bypass reserved domain fields. */
4844 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
4845 continue;
4846
e8c72ba5 4847 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
4848 if (ql2xgffidenable &&
4849 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
4850 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
4851 continue;
4852
726b8548
QT
4853 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4854
1da177e4
LT
4855 /* Locate matching device in database. */
4856 found = 0;
e315cd28 4857 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4858 if (memcmp(new_fcport->port_name, fcport->port_name,
4859 WWN_SIZE))
4860 continue;
4861
827210ba 4862 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 4863
1da177e4
LT
4864 found++;
4865
d8b45213
AV
4866 /* Update port state. */
4867 memcpy(fcport->fabric_port_name,
4868 new_fcport->fabric_port_name, WWN_SIZE);
4869 fcport->fp_speed = new_fcport->fp_speed;
4870
1da177e4 4871 /*
b2032fd5
RD
4872 * If address the same and state FCS_ONLINE
4873 * (or in target mode), nothing changed.
1da177e4
LT
4874 */
4875 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
b2032fd5 4876 (atomic_read(&fcport->state) == FCS_ONLINE ||
726b8548 4877 (vha->host->active_mode == MODE_TARGET))) {
1da177e4
LT
4878 break;
4879 }
4880
4881 /*
4882 * If device was not a fabric device before.
4883 */
4884 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4885 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 4886 qla2x00_clear_loop_id(fcport);
1da177e4
LT
4887 fcport->flags |= (FCF_FABRIC_DEVICE |
4888 FCF_LOGIN_NEEDED);
1da177e4
LT
4889 break;
4890 }
4891
4892 /*
4893 * Port ID changed or device was marked to be updated;
4894 * Log it out if still logged in and mark it for
4895 * relogin later.
4896 */
726b8548 4897 if (qla_tgt_mode_enabled(base_vha)) {
b2032fd5
RD
4898 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
4899 "port changed FC ID, %8phC"
4900 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
4901 fcport->port_name,
4902 fcport->d_id.b.domain,
4903 fcport->d_id.b.area,
4904 fcport->d_id.b.al_pa,
4905 fcport->loop_id,
4906 new_fcport->d_id.b.domain,
4907 new_fcport->d_id.b.area,
4908 new_fcport->d_id.b.al_pa);
4909 fcport->d_id.b24 = new_fcport->d_id.b24;
4910 break;
4911 }
4912
1da177e4
LT
4913 fcport->d_id.b24 = new_fcport->d_id.b24;
4914 fcport->flags |= FCF_LOGIN_NEEDED;
1da177e4
LT
4915 break;
4916 }
4917
726b8548
QT
4918 if (found) {
4919 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1da177e4 4920 continue;
726b8548 4921 }
1da177e4 4922 /* If device was not in our fcports list, then add it. */
b2032fd5 4923 new_fcport->scan_state = QLA_FCPORT_FOUND;
726b8548
QT
4924 list_add_tail(&new_fcport->list, &vha->vp_fcports);
4925
4926 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4927
1da177e4
LT
4928
4929 /* Allocate a new replacement fcport. */
4930 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 4931 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4932 if (new_fcport == NULL) {
83548fe2 4933 ql_log(ql_log_warn, vha, 0xd032,
7c3df132 4934 "Memory allocation failed for fcport.\n");
1da177e4
LT
4935 return (QLA_MEMORY_ALLOC_FAILED);
4936 }
4937 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
4938 new_fcport->d_id.b24 = nxt_d_id.b24;
4939 }
4940
726b8548
QT
4941 qla2x00_free_fcport(new_fcport);
4942
4943 /*
4944 * Logout all previous fabric dev marked lost, except FCP2 devices.
4945 */
4946 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4947 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4948 break;
4949
4950 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
4951 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
4952 continue;
4953
4954 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4955 if ((qla_dual_mode_enabled(vha) ||
4956 qla_ini_mode_enabled(vha)) &&
4957 atomic_read(&fcport->state) == FCS_ONLINE) {
4958 qla2x00_mark_device_lost(vha, fcport,
4959 ql2xplogiabsentdevice, 0);
4960 if (fcport->loop_id != FC_NO_LOOP_ID &&
4961 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4962 fcport->port_type != FCT_INITIATOR &&
4963 fcport->port_type != FCT_BROADCAST) {
83548fe2 4964 ql_dbg(ql_dbg_disc, vha, 0x20f0,
726b8548
QT
4965 "%s %d %8phC post del sess\n",
4966 __func__, __LINE__,
4967 fcport->port_name);
4968
4969 qlt_schedule_sess_for_deletion_lock
4970 (fcport);
4971 continue;
4972 }
4973 }
4974 }
1da177e4 4975
726b8548
QT
4976 if (fcport->scan_state == QLA_FCPORT_FOUND)
4977 qla24xx_fcport_handle_login(vha, fcport);
4978 }
1da177e4
LT
4979 return (rval);
4980}
4981
4982/*
4983 * qla2x00_find_new_loop_id
4984 * Scan through our port list and find a new usable loop ID.
4985 *
4986 * Input:
4987 * ha: adapter state pointer.
4988 * dev: port structure pointer.
4989 *
4990 * Returns:
4991 * qla2x00 local function return status code.
4992 *
4993 * Context:
4994 * Kernel context.
4995 */
03bcfb57 4996int
e315cd28 4997qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
1da177e4
LT
4998{
4999 int rval;
e315cd28 5000 struct qla_hw_data *ha = vha->hw;
feafb7b1 5001 unsigned long flags = 0;
1da177e4
LT
5002
5003 rval = QLA_SUCCESS;
5004
5f16b331 5005 spin_lock_irqsave(&ha->vport_slock, flags);
1da177e4 5006
5f16b331
CD
5007 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5008 LOOPID_MAP_SIZE);
5009 if (dev->loop_id >= LOOPID_MAP_SIZE ||
5010 qla2x00_is_reserved_id(vha, dev->loop_id)) {
5011 dev->loop_id = FC_NO_LOOP_ID;
5012 rval = QLA_FUNCTION_FAILED;
5013 } else
5014 set_bit(dev->loop_id, ha->loop_id_map);
1da177e4 5015
5f16b331 5016 spin_unlock_irqrestore(&ha->vport_slock, flags);
1da177e4 5017
5f16b331
CD
5018 if (rval == QLA_SUCCESS)
5019 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5020 "Assigning new loopid=%x, portid=%x.\n",
5021 dev->loop_id, dev->d_id.b24);
5022 else
5023 ql_log(ql_log_warn, dev->vha, 0x2087,
5024 "No loop_id's available, portid=%x.\n",
5025 dev->d_id.b24);
1da177e4
LT
5026
5027 return (rval);
5028}
5029
1da177e4
LT
5030
5031/*
5032 * qla2x00_fabric_login
5033 * Issue fabric login command.
5034 *
5035 * Input:
5036 * ha = adapter block pointer.
5037 * device = pointer to FC device type structure.
5038 *
5039 * Returns:
5040 * 0 - Login successfully
5041 * 1 - Login failed
5042 * 2 - Initiator device
5043 * 3 - Fatal error
5044 */
5045int
e315cd28 5046qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
5047 uint16_t *next_loopid)
5048{
5049 int rval;
5050 int retry;
5051 uint16_t tmp_loopid;
5052 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 5053 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
5054
5055 retry = 0;
5056 tmp_loopid = 0;
5057
5058 for (;;) {
7c3df132
SK
5059 ql_dbg(ql_dbg_disc, vha, 0x2000,
5060 "Trying Fabric Login w/loop id 0x%04x for port "
5061 "%02x%02x%02x.\n",
5062 fcport->loop_id, fcport->d_id.b.domain,
5063 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
5064
5065 /* Login fcport on switch. */
0b91d116 5066 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
5067 fcport->d_id.b.domain, fcport->d_id.b.area,
5068 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
5069 if (rval != QLA_SUCCESS) {
5070 return rval;
5071 }
1da177e4
LT
5072 if (mb[0] == MBS_PORT_ID_USED) {
5073 /*
5074 * Device has another loop ID. The firmware team
0107109e
AV
5075 * recommends the driver perform an implicit login with
5076 * the specified ID again. The ID we just used is save
5077 * here so we return with an ID that can be tried by
5078 * the next login.
1da177e4
LT
5079 */
5080 retry++;
5081 tmp_loopid = fcport->loop_id;
5082 fcport->loop_id = mb[1];
5083
7c3df132
SK
5084 ql_dbg(ql_dbg_disc, vha, 0x2001,
5085 "Fabric Login: port in use - next loop "
5086 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 5087 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 5088 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
5089
5090 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
5091 /*
5092 * Login succeeded.
5093 */
5094 if (retry) {
5095 /* A retry occurred before. */
5096 *next_loopid = tmp_loopid;
5097 } else {
5098 /*
5099 * No retry occurred before. Just increment the
5100 * ID value for next login.
5101 */
5102 *next_loopid = (fcport->loop_id + 1);
5103 }
5104
5105 if (mb[1] & BIT_0) {
5106 fcport->port_type = FCT_INITIATOR;
5107 } else {
5108 fcport->port_type = FCT_TARGET;
5109 if (mb[1] & BIT_1) {
8474f3a0 5110 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
5111 }
5112 }
5113
ad3e0eda
AV
5114 if (mb[10] & BIT_0)
5115 fcport->supported_classes |= FC_COS_CLASS2;
5116 if (mb[10] & BIT_1)
5117 fcport->supported_classes |= FC_COS_CLASS3;
5118
2d70c103
NB
5119 if (IS_FWI2_CAPABLE(ha)) {
5120 if (mb[10] & BIT_7)
5121 fcport->flags |=
5122 FCF_CONF_COMP_SUPPORTED;
5123 }
5124
1da177e4
LT
5125 rval = QLA_SUCCESS;
5126 break;
5127 } else if (mb[0] == MBS_LOOP_ID_USED) {
5128 /*
5129 * Loop ID already used, try next loop ID.
5130 */
5131 fcport->loop_id++;
e315cd28 5132 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
5133 if (rval != QLA_SUCCESS) {
5134 /* Ran out of loop IDs to use */
5135 break;
5136 }
5137 } else if (mb[0] == MBS_COMMAND_ERROR) {
5138 /*
5139 * Firmware possibly timed out during login. If NO
5140 * retries are left to do then the device is declared
5141 * dead.
5142 */
5143 *next_loopid = fcport->loop_id;
e315cd28 5144 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5145 fcport->d_id.b.domain, fcport->d_id.b.area,
5146 fcport->d_id.b.al_pa);
e315cd28 5147 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
5148
5149 rval = 1;
5150 break;
5151 } else {
5152 /*
5153 * unrecoverable / not handled error
5154 */
7c3df132
SK
5155 ql_dbg(ql_dbg_disc, vha, 0x2002,
5156 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5157 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5158 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5159 fcport->loop_id, jiffies);
1da177e4
LT
5160
5161 *next_loopid = fcport->loop_id;
e315cd28 5162 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5163 fcport->d_id.b.domain, fcport->d_id.b.area,
5164 fcport->d_id.b.al_pa);
5f16b331 5165 qla2x00_clear_loop_id(fcport);
0eedfcf0 5166 fcport->login_retry = 0;
1da177e4
LT
5167
5168 rval = 3;
5169 break;
5170 }
5171 }
5172
5173 return (rval);
5174}
5175
5176/*
5177 * qla2x00_local_device_login
5178 * Issue local device login command.
5179 *
5180 * Input:
5181 * ha = adapter block pointer.
5182 * loop_id = loop id of device to login to.
5183 *
5184 * Returns (Where's the #define!!!!):
5185 * 0 - Login successfully
5186 * 1 - Login failed
5187 * 3 - Fatal error
5188 */
5189int
e315cd28 5190qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
5191{
5192 int rval;
5193 uint16_t mb[MAILBOX_REGISTER_COUNT];
5194
5195 memset(mb, 0, sizeof(mb));
e315cd28 5196 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
5197 if (rval == QLA_SUCCESS) {
5198 /* Interrogate mailbox registers for any errors */
5199 if (mb[0] == MBS_COMMAND_ERROR)
5200 rval = 1;
5201 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5202 /* device not in PCB table */
5203 rval = 3;
5204 }
5205
5206 return (rval);
5207}
5208
5209/*
5210 * qla2x00_loop_resync
5211 * Resync with fibre channel devices.
5212 *
5213 * Input:
5214 * ha = adapter block pointer.
5215 *
5216 * Returns:
5217 * 0 = success
5218 */
5219int
e315cd28 5220qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 5221{
73208dfd 5222 int rval = QLA_SUCCESS;
1da177e4 5223 uint32_t wait_time;
67c2e93a
AC
5224 struct req_que *req;
5225 struct rsp_que *rsp;
5226
d7459527 5227 req = vha->req;
67c2e93a 5228 rsp = req->rsp;
1da177e4 5229
e315cd28
AC
5230 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5231 if (vha->flags.online) {
5232 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
5233 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5234 wait_time = 256;
5235 do {
8ae6d9c7
GM
5236 if (!IS_QLAFX00(vha->hw)) {
5237 /*
5238 * Issue a marker after FW becomes
5239 * ready.
5240 */
5241 qla2x00_marker(vha, req, rsp, 0, 0,
5242 MK_SYNC_ALL);
5243 vha->marker_needed = 0;
5244 }
1da177e4
LT
5245
5246 /* Remap devices on Loop. */
e315cd28 5247 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 5248
8ae6d9c7
GM
5249 if (IS_QLAFX00(vha->hw))
5250 qlafx00_configure_devices(vha);
5251 else
5252 qla2x00_configure_loop(vha);
5253
1da177e4 5254 wait_time--;
e315cd28
AC
5255 } while (!atomic_read(&vha->loop_down_timer) &&
5256 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5257 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5258 &vha->dpc_flags)));
1da177e4 5259 }
1da177e4
LT
5260 }
5261
e315cd28 5262 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 5263 return (QLA_FUNCTION_FAILED);
1da177e4 5264
e315cd28 5265 if (rval)
7c3df132
SK
5266 ql_dbg(ql_dbg_disc, vha, 0x206c,
5267 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
5268
5269 return (rval);
5270}
5271
579d12b5
SK
5272/*
5273* qla2x00_perform_loop_resync
5274* Description: This function will set the appropriate flags and call
5275* qla2x00_loop_resync. If successful loop will be resynced
5276* Arguments : scsi_qla_host_t pointer
5277* returm : Success or Failure
5278*/
5279
5280int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5281{
5282 int32_t rval = 0;
5283
5284 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5285 /*Configure the flags so that resync happens properly*/
5286 atomic_set(&ha->loop_down_timer, 0);
5287 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5288 atomic_set(&ha->loop_state, LOOP_UP);
5289 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5290 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5291 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5292
5293 rval = qla2x00_loop_resync(ha);
5294 } else
5295 atomic_set(&ha->loop_state, LOOP_DEAD);
5296
5297 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5298 }
5299
5300 return rval;
5301}
5302
d97994dc 5303void
67becc00 5304qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc 5305{
5306 fc_port_t *fcport;
feafb7b1
AE
5307 struct scsi_qla_host *vha;
5308 struct qla_hw_data *ha = base_vha->hw;
5309 unsigned long flags;
d97994dc 5310
feafb7b1 5311 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 5312 /* Go with deferred removal of rport references. */
feafb7b1
AE
5313 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5314 atomic_inc(&vha->vref_count);
5315 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 5316 if (fcport->drport &&
feafb7b1
AE
5317 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5318 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 5319 qla2x00_rport_del(fcport);
df673274 5320
feafb7b1
AE
5321 spin_lock_irqsave(&ha->vport_slock, flags);
5322 }
5323 }
5324 atomic_dec(&vha->vref_count);
c4a9b538 5325 wake_up(&vha->vref_waitq);
feafb7b1
AE
5326 }
5327 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc 5328}
5329
7d613ac6
SV
5330/* Assumes idc_lock always held on entry */
5331void
5332qla83xx_reset_ownership(scsi_qla_host_t *vha)
5333{
5334 struct qla_hw_data *ha = vha->hw;
5335 uint32_t drv_presence, drv_presence_mask;
5336 uint32_t dev_part_info1, dev_part_info2, class_type;
5337 uint32_t class_type_mask = 0x3;
5338 uint16_t fcoe_other_function = 0xffff, i;
5339
7ec0effd
AD
5340 if (IS_QLA8044(ha)) {
5341 drv_presence = qla8044_rd_direct(vha,
5342 QLA8044_CRB_DRV_ACTIVE_INDEX);
5343 dev_part_info1 = qla8044_rd_direct(vha,
5344 QLA8044_CRB_DEV_PART_INFO_INDEX);
5345 dev_part_info2 = qla8044_rd_direct(vha,
5346 QLA8044_CRB_DEV_PART_INFO2);
5347 } else {
5348 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5349 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5350 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5351 }
7d613ac6
SV
5352 for (i = 0; i < 8; i++) {
5353 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5354 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5355 (i != ha->portnum)) {
5356 fcoe_other_function = i;
5357 break;
5358 }
5359 }
5360 if (fcoe_other_function == 0xffff) {
5361 for (i = 0; i < 8; i++) {
5362 class_type = ((dev_part_info2 >> (i * 4)) &
5363 class_type_mask);
5364 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5365 ((i + 8) != ha->portnum)) {
5366 fcoe_other_function = i + 8;
5367 break;
5368 }
5369 }
5370 }
5371 /*
5372 * Prepare drv-presence mask based on fcoe functions present.
5373 * However consider only valid physical fcoe function numbers (0-15).
5374 */
5375 drv_presence_mask = ~((1 << (ha->portnum)) |
5376 ((fcoe_other_function == 0xffff) ?
5377 0 : (1 << (fcoe_other_function))));
5378
5379 /* We are the reset owner iff:
5380 * - No other protocol drivers present.
5381 * - This is the lowest among fcoe functions. */
5382 if (!(drv_presence & drv_presence_mask) &&
5383 (ha->portnum < fcoe_other_function)) {
5384 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5385 "This host is Reset owner.\n");
5386 ha->flags.nic_core_reset_owner = 1;
5387 }
5388}
5389
fa492630 5390static int
7d613ac6
SV
5391__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5392{
5393 int rval = QLA_SUCCESS;
5394 struct qla_hw_data *ha = vha->hw;
5395 uint32_t drv_ack;
5396
5397 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5398 if (rval == QLA_SUCCESS) {
5399 drv_ack |= (1 << ha->portnum);
5400 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5401 }
5402
5403 return rval;
5404}
5405
fa492630 5406static int
7d613ac6
SV
5407__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
5408{
5409 int rval = QLA_SUCCESS;
5410 struct qla_hw_data *ha = vha->hw;
5411 uint32_t drv_ack;
5412
5413 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5414 if (rval == QLA_SUCCESS) {
5415 drv_ack &= ~(1 << ha->portnum);
5416 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5417 }
5418
5419 return rval;
5420}
5421
fa492630 5422static const char *
7d613ac6
SV
5423qla83xx_dev_state_to_string(uint32_t dev_state)
5424{
5425 switch (dev_state) {
5426 case QLA8XXX_DEV_COLD:
5427 return "COLD/RE-INIT";
5428 case QLA8XXX_DEV_INITIALIZING:
5429 return "INITIALIZING";
5430 case QLA8XXX_DEV_READY:
5431 return "READY";
5432 case QLA8XXX_DEV_NEED_RESET:
5433 return "NEED RESET";
5434 case QLA8XXX_DEV_NEED_QUIESCENT:
5435 return "NEED QUIESCENT";
5436 case QLA8XXX_DEV_FAILED:
5437 return "FAILED";
5438 case QLA8XXX_DEV_QUIESCENT:
5439 return "QUIESCENT";
5440 default:
5441 return "Unknown";
5442 }
5443}
5444
5445/* Assumes idc-lock always held on entry */
5446void
5447qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
5448{
5449 struct qla_hw_data *ha = vha->hw;
5450 uint32_t idc_audit_reg = 0, duration_secs = 0;
5451
5452 switch (audit_type) {
5453 case IDC_AUDIT_TIMESTAMP:
5454 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
5455 idc_audit_reg = (ha->portnum) |
5456 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
5457 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5458 break;
5459
5460 case IDC_AUDIT_COMPLETION:
5461 duration_secs = ((jiffies_to_msecs(jiffies) -
5462 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
5463 idc_audit_reg = (ha->portnum) |
5464 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
5465 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5466 break;
5467
5468 default:
5469 ql_log(ql_log_warn, vha, 0xb078,
5470 "Invalid audit type specified.\n");
5471 break;
5472 }
5473}
5474
5475/* Assumes idc_lock always held on entry */
fa492630 5476static int
7d613ac6
SV
5477qla83xx_initiating_reset(scsi_qla_host_t *vha)
5478{
5479 struct qla_hw_data *ha = vha->hw;
5480 uint32_t idc_control, dev_state;
5481
5482 __qla83xx_get_idc_control(vha, &idc_control);
5483 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
5484 ql_log(ql_log_info, vha, 0xb080,
5485 "NIC Core reset has been disabled. idc-control=0x%x\n",
5486 idc_control);
5487 return QLA_FUNCTION_FAILED;
5488 }
5489
5490 /* Set NEED-RESET iff in READY state and we are the reset-owner */
5491 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5492 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
5493 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
5494 QLA8XXX_DEV_NEED_RESET);
5495 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
5496 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
5497 } else {
5498 const char *state = qla83xx_dev_state_to_string(dev_state);
5499 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
5500
5501 /* SV: XXX: Is timeout required here? */
5502 /* Wait for IDC state change READY -> NEED_RESET */
5503 while (dev_state == QLA8XXX_DEV_READY) {
5504 qla83xx_idc_unlock(vha, 0);
5505 msleep(200);
5506 qla83xx_idc_lock(vha, 0);
5507 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5508 }
5509 }
5510
5511 /* Send IDC ack by writing to drv-ack register */
5512 __qla83xx_set_drv_ack(vha);
5513
5514 return QLA_SUCCESS;
5515}
5516
5517int
5518__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
5519{
5520 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5521}
5522
7d613ac6
SV
5523int
5524__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
5525{
5526 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5527}
5528
fa492630 5529static int
7d613ac6
SV
5530qla83xx_check_driver_presence(scsi_qla_host_t *vha)
5531{
5532 uint32_t drv_presence = 0;
5533 struct qla_hw_data *ha = vha->hw;
5534
5535 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5536 if (drv_presence & (1 << ha->portnum))
5537 return QLA_SUCCESS;
5538 else
5539 return QLA_TEST_FAILED;
5540}
5541
5542int
5543qla83xx_nic_core_reset(scsi_qla_host_t *vha)
5544{
5545 int rval = QLA_SUCCESS;
5546 struct qla_hw_data *ha = vha->hw;
5547
5548 ql_dbg(ql_dbg_p3p, vha, 0xb058,
5549 "Entered %s().\n", __func__);
5550
5551 if (vha->device_flags & DFLG_DEV_FAILED) {
5552 ql_log(ql_log_warn, vha, 0xb059,
5553 "Device in unrecoverable FAILED state.\n");
5554 return QLA_FUNCTION_FAILED;
5555 }
5556
5557 qla83xx_idc_lock(vha, 0);
5558
5559 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
5560 ql_log(ql_log_warn, vha, 0xb05a,
5561 "Function=0x%x has been removed from IDC participation.\n",
5562 ha->portnum);
5563 rval = QLA_FUNCTION_FAILED;
5564 goto exit;
5565 }
5566
5567 qla83xx_reset_ownership(vha);
5568
5569 rval = qla83xx_initiating_reset(vha);
5570
5571 /*
5572 * Perform reset if we are the reset-owner,
5573 * else wait till IDC state changes to READY/FAILED.
5574 */
5575 if (rval == QLA_SUCCESS) {
5576 rval = qla83xx_idc_state_handler(vha);
5577
5578 if (rval == QLA_SUCCESS)
5579 ha->flags.nic_core_hung = 0;
5580 __qla83xx_clear_drv_ack(vha);
5581 }
5582
5583exit:
5584 qla83xx_idc_unlock(vha, 0);
5585
5586 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
5587
5588 return rval;
5589}
5590
81178772
SK
5591int
5592qla2xxx_mctp_dump(scsi_qla_host_t *vha)
5593{
5594 struct qla_hw_data *ha = vha->hw;
5595 int rval = QLA_FUNCTION_FAILED;
5596
5597 if (!IS_MCTP_CAPABLE(ha)) {
5598 /* This message can be removed from the final version */
5599 ql_log(ql_log_info, vha, 0x506d,
5600 "This board is not MCTP capable\n");
5601 return rval;
5602 }
5603
5604 if (!ha->mctp_dump) {
5605 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
5606 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
5607
5608 if (!ha->mctp_dump) {
5609 ql_log(ql_log_warn, vha, 0x506e,
5610 "Failed to allocate memory for mctp dump\n");
5611 return rval;
5612 }
5613 }
5614
5615#define MCTP_DUMP_STR_ADDR 0x00000000
5616 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
5617 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
5618 if (rval != QLA_SUCCESS) {
5619 ql_log(ql_log_warn, vha, 0x506f,
5620 "Failed to capture mctp dump\n");
5621 } else {
5622 ql_log(ql_log_info, vha, 0x5070,
5623 "Mctp dump capture for host (%ld/%p).\n",
5624 vha->host_no, ha->mctp_dump);
5625 ha->mctp_dumped = 1;
5626 }
5627
409ee0fe 5628 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
5629 ha->flags.nic_core_reset_hdlr_active = 1;
5630 rval = qla83xx_restart_nic_firmware(vha);
5631 if (rval)
5632 /* NIC Core reset failed. */
5633 ql_log(ql_log_warn, vha, 0x5071,
5634 "Failed to restart nic firmware\n");
5635 else
5636 ql_dbg(ql_dbg_p3p, vha, 0xb084,
5637 "Restarted NIC firmware successfully.\n");
5638 ha->flags.nic_core_reset_hdlr_active = 0;
5639 }
5640
5641 return rval;
5642
5643}
5644
579d12b5 5645/*
8fcd6b8b 5646* qla2x00_quiesce_io
579d12b5
SK
5647* Description: This function will block the new I/Os
5648* Its not aborting any I/Os as context
5649* is not destroyed during quiescence
5650* Arguments: scsi_qla_host_t
5651* return : void
5652*/
5653void
8fcd6b8b 5654qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
5655{
5656 struct qla_hw_data *ha = vha->hw;
5657 struct scsi_qla_host *vp;
5658
8fcd6b8b
CD
5659 ql_dbg(ql_dbg_dpc, vha, 0x401d,
5660 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
5661
5662 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
5663 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5664 atomic_set(&vha->loop_state, LOOP_DOWN);
5665 qla2x00_mark_all_devices_lost(vha, 0);
5666 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 5667 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
5668 } else {
5669 if (!atomic_read(&vha->loop_down_timer))
5670 atomic_set(&vha->loop_down_timer,
5671 LOOP_DOWN_TIME);
5672 }
5673 /* Wait for pending cmds to complete */
5674 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
5675}
5676
a9083016
GM
5677void
5678qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5679{
5680 struct qla_hw_data *ha = vha->hw;
579d12b5 5681 struct scsi_qla_host *vp;
feafb7b1 5682 unsigned long flags;
6aef87be 5683 fc_port_t *fcport;
7c3f8fd1 5684 u16 i;
a9083016 5685
e46ef004
SK
5686 /* For ISP82XX, driver waits for completion of the commands.
5687 * online flag should be set.
5688 */
7ec0effd 5689 if (!(IS_P3P_TYPE(ha)))
e46ef004 5690 vha->flags.online = 0;
a9083016
GM
5691 ha->flags.chip_reset_done = 0;
5692 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 5693 vha->qla_stats.total_isp_aborts++;
a9083016 5694
7c3df132
SK
5695 ql_log(ql_log_info, vha, 0x00af,
5696 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 5697
e46ef004
SK
5698 /* For ISP82XX, reset_chip is just disabling interrupts.
5699 * Driver waits for the completion of the commands.
5700 * the interrupts need to be enabled.
5701 */
7ec0effd 5702 if (!(IS_P3P_TYPE(ha)))
a9083016
GM
5703 ha->isp_ops->reset_chip(vha);
5704
ec7193e2
QT
5705 ha->flags.n2n_ae = 0;
5706 ha->flags.lip_ae = 0;
5707 ha->current_topology = 0;
5708 ha->flags.fw_started = 0;
5709 ha->flags.fw_init_done = 0;
7c3f8fd1
QT
5710 ha->base_qpair->chip_reset++;
5711 for (i = 0; i < ha->max_qpairs; i++) {
5712 if (ha->queue_pair_map[i])
5713 ha->queue_pair_map[i]->chip_reset =
5714 ha->base_qpair->chip_reset;
5715 }
726b8548 5716
a9083016
GM
5717 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5718 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5719 atomic_set(&vha->loop_state, LOOP_DOWN);
5720 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
5721
5722 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 5723 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
5724 atomic_inc(&vp->vref_count);
5725 spin_unlock_irqrestore(&ha->vport_slock, flags);
5726
a9083016 5727 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
5728
5729 spin_lock_irqsave(&ha->vport_slock, flags);
5730 atomic_dec(&vp->vref_count);
5731 }
5732 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
5733 } else {
5734 if (!atomic_read(&vha->loop_down_timer))
5735 atomic_set(&vha->loop_down_timer,
5736 LOOP_DOWN_TIME);
5737 }
5738
6aef87be
AV
5739 /* Clear all async request states across all VPs. */
5740 list_for_each_entry(fcport, &vha->vp_fcports, list)
5741 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5742 spin_lock_irqsave(&ha->vport_slock, flags);
5743 list_for_each_entry(vp, &ha->vp_list, list) {
5744 atomic_inc(&vp->vref_count);
5745 spin_unlock_irqrestore(&ha->vport_slock, flags);
5746
5747 list_for_each_entry(fcport, &vp->vp_fcports, list)
5748 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5749
5750 spin_lock_irqsave(&ha->vport_slock, flags);
5751 atomic_dec(&vp->vref_count);
5752 }
5753 spin_unlock_irqrestore(&ha->vport_slock, flags);
5754
bddd2d65
LC
5755 if (!ha->flags.eeh_busy) {
5756 /* Make sure for ISP 82XX IO DMA is complete */
7ec0effd 5757 if (IS_P3P_TYPE(ha)) {
7190575f 5758 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
5759 ql_log(ql_log_info, vha, 0x00b4,
5760 "Done chip reset cleanup.\n");
a9083016 5761
e46ef004
SK
5762 /* Done waiting for pending commands.
5763 * Reset the online flag.
5764 */
5765 vha->flags.online = 0;
4d78c973 5766 }
a9083016 5767
bddd2d65
LC
5768 /* Requeue all commands in outstanding command list. */
5769 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
5770 }
b6a029e1
AE
5771 /* memory barrier */
5772 wmb();
a9083016
GM
5773}
5774
1da177e4
LT
5775/*
5776* qla2x00_abort_isp
5777* Resets ISP and aborts all outstanding commands.
5778*
5779* Input:
5780* ha = adapter block pointer.
5781*
5782* Returns:
5783* 0 = success
5784*/
5785int
e315cd28 5786qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 5787{
476e8978 5788 int rval;
1da177e4 5789 uint8_t status = 0;
e315cd28
AC
5790 struct qla_hw_data *ha = vha->hw;
5791 struct scsi_qla_host *vp;
73208dfd 5792 struct req_que *req = ha->req_q_map[0];
feafb7b1 5793 unsigned long flags;
1da177e4 5794
e315cd28 5795 if (vha->flags.online) {
a9083016 5796 qla2x00_abort_isp_cleanup(vha);
1da177e4 5797
a6171297
SV
5798 if (IS_QLA8031(ha)) {
5799 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
5800 "Clearing fcoe driver presence.\n");
5801 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
5802 ql_dbg(ql_dbg_p3p, vha, 0xb073,
5803 "Error while clearing DRV-Presence.\n");
5804 }
5805
85880801
AV
5806 if (unlikely(pci_channel_offline(ha->pdev) &&
5807 ha->flags.pci_channel_io_perm_failure)) {
5808 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5809 status = 0;
5810 return status;
5811 }
5812
73208dfd 5813 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 5814
e315cd28 5815 ha->isp_ops->nvram_config(vha);
1da177e4 5816
e315cd28
AC
5817 if (!qla2x00_restart_isp(vha)) {
5818 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 5819
e315cd28 5820 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
5821 /*
5822 * Issue marker command only when we are going
5823 * to start the I/O .
5824 */
e315cd28 5825 vha->marker_needed = 1;
1da177e4
LT
5826 }
5827
e315cd28 5828 vha->flags.online = 1;
1da177e4 5829
fd34f556 5830 ha->isp_ops->enable_intrs(ha);
1da177e4 5831
fa2a1ce5 5832 ha->isp_abort_cnt = 0;
e315cd28 5833 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 5834
6246b8a1
GM
5835 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
5836 qla2x00_get_fw_version(vha);
df613b96
AV
5837 if (ha->fce) {
5838 ha->flags.fce_enabled = 1;
5839 memset(ha->fce, 0,
5840 fce_calc_size(ha->fce_bufs));
e315cd28 5841 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
5842 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5843 &ha->fce_bufs);
5844 if (rval) {
7c3df132 5845 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
5846 "Unable to reinitialize FCE "
5847 "(%d).\n", rval);
5848 ha->flags.fce_enabled = 0;
5849 }
5850 }
436a7b11
AV
5851
5852 if (ha->eft) {
5853 memset(ha->eft, 0, EFT_SIZE);
e315cd28 5854 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
5855 ha->eft_dma, EFT_NUM_BUFFERS);
5856 if (rval) {
7c3df132 5857 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
5858 "Unable to reinitialize EFT "
5859 "(%d).\n", rval);
5860 }
5861 }
1da177e4 5862 } else { /* failed the ISP abort */
e315cd28
AC
5863 vha->flags.online = 1;
5864 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 5865 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
5866 ql_log(ql_log_fatal, vha, 0x8035,
5867 "ISP error recover failed - "
5868 "board disabled.\n");
fa2a1ce5 5869 /*
1da177e4
LT
5870 * The next call disables the board
5871 * completely.
5872 */
e315cd28
AC
5873 ha->isp_ops->reset_adapter(vha);
5874 vha->flags.online = 0;
1da177e4 5875 clear_bit(ISP_ABORT_RETRY,
e315cd28 5876 &vha->dpc_flags);
1da177e4
LT
5877 status = 0;
5878 } else { /* schedule another ISP abort */
5879 ha->isp_abort_cnt--;
7c3df132
SK
5880 ql_dbg(ql_dbg_taskm, vha, 0x8020,
5881 "ISP abort - retry remaining %d.\n",
5882 ha->isp_abort_cnt);
1da177e4
LT
5883 status = 1;
5884 }
5885 } else {
5886 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
5887 ql_dbg(ql_dbg_taskm, vha, 0x8021,
5888 "ISP error recovery - retrying (%d) "
5889 "more times.\n", ha->isp_abort_cnt);
e315cd28 5890 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
5891 status = 1;
5892 }
5893 }
fa2a1ce5 5894
1da177e4
LT
5895 }
5896
e315cd28 5897 if (!status) {
7c3df132 5898 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
feafb7b1
AE
5899
5900 spin_lock_irqsave(&ha->vport_slock, flags);
5901 list_for_each_entry(vp, &ha->vp_list, list) {
5902 if (vp->vp_idx) {
5903 atomic_inc(&vp->vref_count);
5904 spin_unlock_irqrestore(&ha->vport_slock, flags);
5905
e315cd28 5906 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
5907
5908 spin_lock_irqsave(&ha->vport_slock, flags);
5909 atomic_dec(&vp->vref_count);
5910 }
e315cd28 5911 }
feafb7b1
AE
5912 spin_unlock_irqrestore(&ha->vport_slock, flags);
5913
7d613ac6
SV
5914 if (IS_QLA8031(ha)) {
5915 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
5916 "Setting back fcoe driver presence.\n");
5917 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
5918 ql_dbg(ql_dbg_p3p, vha, 0xb074,
5919 "Error while setting DRV-Presence.\n");
5920 }
e315cd28 5921 } else {
d8424f68
JP
5922 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
5923 __func__);
1da177e4
LT
5924 }
5925
5926 return(status);
5927}
5928
5929/*
5930* qla2x00_restart_isp
5931* restarts the ISP after a reset
5932*
5933* Input:
5934* ha = adapter block pointer.
5935*
5936* Returns:
5937* 0 = success
5938*/
5939static int
e315cd28 5940qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 5941{
c6b2fca8 5942 int status = 0;
e315cd28 5943 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
5944 struct req_que *req = ha->req_q_map[0];
5945 struct rsp_que *rsp = ha->rsp_q_map[0];
1da177e4
LT
5946
5947 /* If firmware needs to be loaded */
e315cd28
AC
5948 if (qla2x00_isp_firmware(vha)) {
5949 vha->flags.online = 0;
5950 status = ha->isp_ops->chip_diag(vha);
5951 if (!status)
5952 status = qla2x00_setup_chip(vha);
1da177e4
LT
5953 }
5954
e315cd28
AC
5955 if (!status && !(status = qla2x00_init_rings(vha))) {
5956 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 5957 ha->flags.chip_reset_done = 1;
7108b76e 5958
73208dfd
AC
5959 /* Initialize the queues in use */
5960 qla25xx_init_queues(ha);
5961
e315cd28
AC
5962 status = qla2x00_fw_ready(vha);
5963 if (!status) {
0107109e 5964 /* Issue a marker after FW becomes ready. */
73208dfd 5965 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7108b76e 5966 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
5967 }
5968
5969 /* if no cable then assume it's good */
e315cd28 5970 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4 5971 status = 0;
1da177e4
LT
5972 }
5973 return (status);
5974}
5975
73208dfd
AC
5976static int
5977qla25xx_init_queues(struct qla_hw_data *ha)
5978{
5979 struct rsp_que *rsp = NULL;
5980 struct req_que *req = NULL;
5981 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5982 int ret = -1;
5983 int i;
5984
2afa19a9 5985 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd 5986 rsp = ha->rsp_q_map[i];
cb43285f 5987 if (rsp && test_bit(i, ha->rsp_qid_map)) {
73208dfd 5988 rsp->options &= ~BIT_0;
618a7523 5989 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 5990 if (ret != QLA_SUCCESS)
7c3df132
SK
5991 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
5992 "%s Rsp que: %d init failed.\n",
5993 __func__, rsp->id);
73208dfd 5994 else
7c3df132
SK
5995 ql_dbg(ql_dbg_init, base_vha, 0x0100,
5996 "%s Rsp que: %d inited.\n",
5997 __func__, rsp->id);
73208dfd 5998 }
2afa19a9
AC
5999 }
6000 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd 6001 req = ha->req_q_map[i];
cb43285f
QT
6002 if (req && test_bit(i, ha->req_qid_map)) {
6003 /* Clear outstanding commands array. */
73208dfd 6004 req->options &= ~BIT_0;
618a7523 6005 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 6006 if (ret != QLA_SUCCESS)
7c3df132
SK
6007 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6008 "%s Req que: %d init failed.\n",
6009 __func__, req->id);
73208dfd 6010 else
7c3df132
SK
6011 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6012 "%s Req que: %d inited.\n",
6013 __func__, req->id);
73208dfd
AC
6014 }
6015 }
6016 return ret;
6017}
6018
1da177e4
LT
6019/*
6020* qla2x00_reset_adapter
6021* Reset adapter.
6022*
6023* Input:
6024* ha = adapter block pointer.
6025*/
abbd8870 6026void
e315cd28 6027qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
6028{
6029 unsigned long flags = 0;
e315cd28 6030 struct qla_hw_data *ha = vha->hw;
3d71644c 6031 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 6032
e315cd28 6033 vha->flags.online = 0;
fd34f556 6034 ha->isp_ops->disable_intrs(ha);
1da177e4 6035
1da177e4
LT
6036 spin_lock_irqsave(&ha->hardware_lock, flags);
6037 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
6038 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6039 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
6040 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6041 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6042}
0107109e
AV
6043
6044void
e315cd28 6045qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
6046{
6047 unsigned long flags = 0;
e315cd28 6048 struct qla_hw_data *ha = vha->hw;
0107109e
AV
6049 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6050
7ec0effd 6051 if (IS_P3P_TYPE(ha))
a9083016
GM
6052 return;
6053
e315cd28 6054 vha->flags.online = 0;
fd34f556 6055 ha->isp_ops->disable_intrs(ha);
0107109e
AV
6056
6057 spin_lock_irqsave(&ha->hardware_lock, flags);
6058 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
6059 RD_REG_DWORD(&reg->hccr);
6060 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
6061 RD_REG_DWORD(&reg->hccr);
6062 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
6063
6064 if (IS_NOPOLLING_TYPE(ha))
6065 ha->isp_ops->enable_intrs(ha);
0107109e
AV
6066}
6067
4e08df3f
DM
6068/* On sparc systems, obtain port and node WWN from firmware
6069 * properties.
6070 */
e315cd28
AC
6071static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6072 struct nvram_24xx *nv)
4e08df3f
DM
6073{
6074#ifdef CONFIG_SPARC
e315cd28 6075 struct qla_hw_data *ha = vha->hw;
4e08df3f 6076 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
6077 struct device_node *dp = pci_device_to_OF_node(pdev);
6078 const u8 *val;
4e08df3f
DM
6079 int len;
6080
6081 val = of_get_property(dp, "port-wwn", &len);
6082 if (val && len >= WWN_SIZE)
6083 memcpy(nv->port_name, val, WWN_SIZE);
6084
6085 val = of_get_property(dp, "node-wwn", &len);
6086 if (val && len >= WWN_SIZE)
6087 memcpy(nv->node_name, val, WWN_SIZE);
6088#endif
6089}
6090
0107109e 6091int
e315cd28 6092qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 6093{
4e08df3f 6094 int rval;
0107109e
AV
6095 struct init_cb_24xx *icb;
6096 struct nvram_24xx *nv;
6097 uint32_t *dptr;
6098 uint8_t *dptr1, *dptr2;
6099 uint32_t chksum;
6100 uint16_t cnt;
e315cd28 6101 struct qla_hw_data *ha = vha->hw;
0107109e 6102
4e08df3f 6103 rval = QLA_SUCCESS;
0107109e 6104 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 6105 nv = ha->nvram;
0107109e
AV
6106
6107 /* Determine NVRAM starting address. */
f73cb695 6108 if (ha->port_no == 0) {
e5b68a61
AC
6109 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6110 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6111 } else {
0107109e 6112 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790 6113 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6114 }
f73cb695 6115
e5b68a61
AC
6116 ha->nvram_size = sizeof(struct nvram_24xx);
6117 ha->vpd_size = FA_NVRAM_VPD_SIZE;
0107109e 6118
281afe19
SJ
6119 /* Get VPD data into cache */
6120 ha->vpd = ha->nvram + VPD_OFFSET;
e315cd28 6121 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
281afe19
SJ
6122 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6123
6124 /* Get NVRAM data into cache and calculate checksum. */
0107109e 6125 dptr = (uint32_t *)nv;
e315cd28 6126 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
0107109e 6127 ha->nvram_size);
da08ef5c
JC
6128 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6129 chksum += le32_to_cpu(*dptr);
0107109e 6130
7c3df132
SK
6131 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6132 "Contents of NVRAM\n");
6133 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6134 (uint8_t *)nv, ha->nvram_size);
0107109e
AV
6135
6136 /* Bad NVRAM data, set defaults parameters. */
6137 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6138 || nv->id[3] != ' ' ||
ad950360 6139 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
0107109e 6140 /* Reset NVRAM data. */
7c3df132 6141 ql_log(ql_log_warn, vha, 0x006b,
9e336520 6142 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132
SK
6143 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6144 ql_log(ql_log_warn, vha, 0x006c,
6145 "Falling back to functioning (yet invalid -- WWPN) "
6146 "defaults.\n");
4e08df3f
DM
6147
6148 /*
6149 * Set default initialization control block.
6150 */
6151 memset(nv, 0, ha->nvram_size);
ad950360
BVA
6152 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6153 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 6154 nv->frame_payload_size = 2048;
ad950360
BVA
6155 nv->execution_throttle = cpu_to_le16(0xFFFF);
6156 nv->exchange_count = cpu_to_le16(0);
6157 nv->hard_address = cpu_to_le16(124);
4e08df3f 6158 nv->port_name[0] = 0x21;
f73cb695 6159 nv->port_name[1] = 0x00 + ha->port_no + 1;
4e08df3f
DM
6160 nv->port_name[2] = 0x00;
6161 nv->port_name[3] = 0xe0;
6162 nv->port_name[4] = 0x8b;
6163 nv->port_name[5] = 0x1c;
6164 nv->port_name[6] = 0x55;
6165 nv->port_name[7] = 0x86;
6166 nv->node_name[0] = 0x20;
6167 nv->node_name[1] = 0x00;
6168 nv->node_name[2] = 0x00;
6169 nv->node_name[3] = 0xe0;
6170 nv->node_name[4] = 0x8b;
6171 nv->node_name[5] = 0x1c;
6172 nv->node_name[6] = 0x55;
6173 nv->node_name[7] = 0x86;
e315cd28 6174 qla24xx_nvram_wwn_from_ofw(vha, nv);
ad950360
BVA
6175 nv->login_retry_count = cpu_to_le16(8);
6176 nv->interrupt_delay_timer = cpu_to_le16(0);
6177 nv->login_timeout = cpu_to_le16(0);
4e08df3f 6178 nv->firmware_options_1 =
ad950360
BVA
6179 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6180 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6181 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6182 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6183 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6184 nv->efi_parameters = cpu_to_le32(0);
4e08df3f 6185 nv->reset_delay = 5;
ad950360
BVA
6186 nv->max_luns_per_target = cpu_to_le16(128);
6187 nv->port_down_retry_count = cpu_to_le16(30);
6188 nv->link_down_timeout = cpu_to_le16(30);
4e08df3f
DM
6189
6190 rval = 1;
0107109e
AV
6191 }
6192
726b8548 6193 if (qla_tgt_mode_enabled(vha)) {
2d70c103 6194 /* Don't enable full login after initial LIP */
ad950360 6195 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
2d70c103 6196 /* Don't enable LIP full login for initiator */
ad950360 6197 nv->host_p &= cpu_to_le32(~BIT_10);
2d70c103
NB
6198 }
6199
6200 qlt_24xx_config_nvram_stage1(vha, nv);
6201
0107109e 6202 /* Reset Initialization control block */
e315cd28 6203 memset(icb, 0, ha->init_cb_size);
0107109e
AV
6204
6205 /* Copy 1st segment. */
6206 dptr1 = (uint8_t *)icb;
6207 dptr2 = (uint8_t *)&nv->version;
6208 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6209 while (cnt--)
6210 *dptr1++ = *dptr2++;
6211
6212 icb->login_retry_count = nv->login_retry_count;
3ea66e28 6213 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
6214
6215 /* Copy 2nd segment. */
6216 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6217 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6218 cnt = (uint8_t *)&icb->reserved_3 -
6219 (uint8_t *)&icb->interrupt_delay_timer;
6220 while (cnt--)
6221 *dptr1++ = *dptr2++;
6222
6223 /*
6224 * Setup driver NVRAM options.
6225 */
e315cd28 6226 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 6227 "QLA2462");
0107109e 6228
2d70c103
NB
6229 qlt_24xx_config_nvram_stage2(vha, icb);
6230
ad950360 6231 if (nv->host_p & cpu_to_le32(BIT_15)) {
2d70c103 6232 /* Use alternate WWN? */
5341e868
AV
6233 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6234 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6235 }
6236
0107109e 6237 /* Prepare nodename */
ad950360 6238 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
6239 /*
6240 * Firmware will apply the following mask if the nodename was
6241 * not provided.
6242 */
6243 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6244 icb->node_name[0] &= 0xF0;
6245 }
6246
6247 /* Set host adapter parameters. */
6248 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
6249 ha->flags.enable_lip_reset = 0;
6250 ha->flags.enable_lip_full_login =
6251 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6252 ha->flags.enable_target_reset =
6253 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
0107109e 6254 ha->flags.enable_led_scheme = 0;
d4c760c2 6255 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
0107109e 6256
fd0e7e4d
AV
6257 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6258 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
6259
6260 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6261 sizeof(ha->fw_seriallink_options24));
6262
6263 /* save HBA serial number */
6264 ha->serial0 = icb->port_name[5];
6265 ha->serial1 = icb->port_name[6];
6266 ha->serial2 = icb->port_name[7];
e315cd28
AC
6267 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6268 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 6269
ad950360 6270 icb->execution_throttle = cpu_to_le16(0xFFFF);
bc8fb3cb 6271
0107109e
AV
6272 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6273
6274 /* Set minimum login_timeout to 4 seconds. */
6275 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6276 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6277 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 6278 nv->login_timeout = cpu_to_le16(4);
0107109e 6279 ha->login_timeout = le16_to_cpu(nv->login_timeout);
0107109e 6280
00a537b8
AV
6281 /* Set minimum RATOV to 100 tenths of a second. */
6282 ha->r_a_tov = 100;
0107109e
AV
6283
6284 ha->loop_reset_delay = nv->reset_delay;
6285
6286 /* Link Down Timeout = 0:
6287 *
6288 * When Port Down timer expires we will start returning
6289 * I/O's to OS with "DID_NO_CONNECT".
6290 *
6291 * Link Down Timeout != 0:
6292 *
6293 * The driver waits for the link to come up after link down
6294 * before returning I/Os to OS with "DID_NO_CONNECT".
6295 */
6296 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6297 ha->loop_down_abort_time =
6298 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6299 } else {
6300 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6301 ha->loop_down_abort_time =
6302 (LOOP_DOWN_TIME - ha->link_down_timeout);
6303 }
6304
6305 /* Need enough time to try and get the port back. */
6306 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6307 if (qlport_down_retry)
6308 ha->port_down_retry_count = qlport_down_retry;
6309
6310 /* Set login_retry_count */
6311 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6312 if (ha->port_down_retry_count ==
6313 le16_to_cpu(nv->port_down_retry_count) &&
6314 ha->port_down_retry_count > 3)
6315 ha->login_retry_count = ha->port_down_retry_count;
6316 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6317 ha->login_retry_count = ha->port_down_retry_count;
6318 if (ql2xloginretrycount)
6319 ha->login_retry_count = ql2xloginretrycount;
6320
4fdfefe5 6321 /* Enable ZIO. */
e315cd28 6322 if (!vha->flags.init_done) {
4fdfefe5
AV
6323 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6324 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6325 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6326 le16_to_cpu(icb->interrupt_delay_timer): 2;
6327 }
ad950360 6328 icb->firmware_options_2 &= cpu_to_le32(
4fdfefe5 6329 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
e315cd28 6330 vha->flags.process_response_queue = 0;
4fdfefe5 6331 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 6332 ha->zio_mode = QLA_ZIO_MODE_6;
6333
7c3df132 6334 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
6335 "ZIO mode %d enabled; timer delay (%d us).\n",
6336 ha->zio_mode, ha->zio_timer * 100);
6337
6338 icb->firmware_options_2 |= cpu_to_le32(
6339 (uint32_t)ha->zio_mode);
6340 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
e315cd28 6341 vha->flags.process_response_queue = 1;
4fdfefe5
AV
6342 }
6343
4e08df3f 6344 if (rval) {
7c3df132
SK
6345 ql_log(ql_log_warn, vha, 0x0070,
6346 "NVRAM configuration failed.\n");
4e08df3f
DM
6347 }
6348 return (rval);
0107109e
AV
6349}
6350
4243c115
SC
6351uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6352{
6353 struct qla27xx_image_status pri_image_status, sec_image_status;
6354 uint8_t valid_pri_image, valid_sec_image;
6355 uint32_t *wptr;
6356 uint32_t cnt, chksum, size;
6357 struct qla_hw_data *ha = vha->hw;
6358
6359 valid_pri_image = valid_sec_image = 1;
6360 ha->active_image = 0;
6361 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6362
6363 if (!ha->flt_region_img_status_pri) {
6364 valid_pri_image = 0;
6365 goto check_sec_image;
6366 }
6367
6368 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6369 ha->flt_region_img_status_pri, size);
6370
6371 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6372 ql_dbg(ql_dbg_init, vha, 0x018b,
6373 "Primary image signature (0x%x) not valid\n",
6374 pri_image_status.signature);
6375 valid_pri_image = 0;
6376 goto check_sec_image;
6377 }
6378
6379 wptr = (uint32_t *)(&pri_image_status);
6380 cnt = size;
6381
da08ef5c
JC
6382 for (chksum = 0; cnt--; wptr++)
6383 chksum += le32_to_cpu(*wptr);
41dc529a 6384
4243c115
SC
6385 if (chksum) {
6386 ql_dbg(ql_dbg_init, vha, 0x018c,
6387 "Checksum validation failed for primary image (0x%x)\n",
6388 chksum);
6389 valid_pri_image = 0;
6390 }
6391
6392check_sec_image:
6393 if (!ha->flt_region_img_status_sec) {
6394 valid_sec_image = 0;
6395 goto check_valid_image;
6396 }
6397
6398 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
6399 ha->flt_region_img_status_sec, size);
6400
6401 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6402 ql_dbg(ql_dbg_init, vha, 0x018d,
6403 "Secondary image signature(0x%x) not valid\n",
6404 sec_image_status.signature);
6405 valid_sec_image = 0;
6406 goto check_valid_image;
6407 }
6408
6409 wptr = (uint32_t *)(&sec_image_status);
6410 cnt = size;
da08ef5c
JC
6411 for (chksum = 0; cnt--; wptr++)
6412 chksum += le32_to_cpu(*wptr);
4243c115
SC
6413 if (chksum) {
6414 ql_dbg(ql_dbg_init, vha, 0x018e,
6415 "Checksum validation failed for secondary image (0x%x)\n",
6416 chksum);
6417 valid_sec_image = 0;
6418 }
6419
6420check_valid_image:
6421 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
6422 ha->active_image = QLA27XX_PRIMARY_IMAGE;
6423 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
6424 if (!ha->active_image ||
6425 pri_image_status.generation_number <
6426 sec_image_status.generation_number)
6427 ha->active_image = QLA27XX_SECONDARY_IMAGE;
6428 }
6429
6430 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
6431 ha->active_image == 0 ? "default bootld and fw" :
6432 ha->active_image == 1 ? "primary" :
6433 ha->active_image == 2 ? "secondary" :
6434 "Invalid");
6435
6436 return ha->active_image;
6437}
6438
413975a0 6439static int
cbc8eb67
AV
6440qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
6441 uint32_t faddr)
d1c61909 6442{
73208dfd 6443 int rval = QLA_SUCCESS;
d1c61909 6444 int segments, fragment;
d1c61909
AV
6445 uint32_t *dcode, dlen;
6446 uint32_t risc_addr;
6447 uint32_t risc_size;
6448 uint32_t i;
e315cd28 6449 struct qla_hw_data *ha = vha->hw;
73208dfd 6450 struct req_que *req = ha->req_q_map[0];
eaac30be 6451
7c3df132 6452 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 6453 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 6454
d1c61909
AV
6455 rval = QLA_SUCCESS;
6456
6457 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 6458 dcode = (uint32_t *)req->ring;
d1c61909
AV
6459 *srisc_addr = 0;
6460
4243c115
SC
6461 if (IS_QLA27XX(ha) &&
6462 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
6463 faddr = ha->flt_region_fw_sec;
6464
d1c61909 6465 /* Validate firmware image by checking version. */
e315cd28 6466 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
d1c61909
AV
6467 for (i = 0; i < 4; i++)
6468 dcode[i] = be32_to_cpu(dcode[i]);
6469 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6470 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6471 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6472 dcode[3] == 0)) {
7c3df132
SK
6473 ql_log(ql_log_fatal, vha, 0x008c,
6474 "Unable to verify the integrity of flash firmware "
6475 "image.\n");
6476 ql_log(ql_log_fatal, vha, 0x008d,
6477 "Firmware data: %08x %08x %08x %08x.\n",
6478 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
6479
6480 return QLA_FUNCTION_FAILED;
6481 }
6482
6483 while (segments && rval == QLA_SUCCESS) {
6484 /* Read segment's load information. */
e315cd28 6485 qla24xx_read_flash_data(vha, dcode, faddr, 4);
d1c61909
AV
6486
6487 risc_addr = be32_to_cpu(dcode[2]);
6488 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6489 risc_size = be32_to_cpu(dcode[3]);
6490
6491 fragment = 0;
6492 while (risc_size > 0 && rval == QLA_SUCCESS) {
6493 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6494 if (dlen > risc_size)
6495 dlen = risc_size;
6496
7c3df132
SK
6497 ql_dbg(ql_dbg_init, vha, 0x008e,
6498 "Loading risc segment@ risc addr %x "
6499 "number of dwords 0x%x offset 0x%x.\n",
6500 risc_addr, dlen, faddr);
d1c61909 6501
e315cd28 6502 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
6503 for (i = 0; i < dlen; i++)
6504 dcode[i] = swab32(dcode[i]);
6505
73208dfd 6506 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
d1c61909
AV
6507 dlen);
6508 if (rval) {
7c3df132
SK
6509 ql_log(ql_log_fatal, vha, 0x008f,
6510 "Failed to load segment %d of firmware.\n",
6511 fragment);
f261f7af 6512 return QLA_FUNCTION_FAILED;
d1c61909
AV
6513 }
6514
6515 faddr += dlen;
6516 risc_addr += dlen;
6517 risc_size -= dlen;
6518 fragment++;
6519 }
6520
6521 /* Next segment. */
6522 segments--;
6523 }
6524
f73cb695
CD
6525 if (!IS_QLA27XX(ha))
6526 return rval;
6527
6528 if (ha->fw_dump_template)
6529 vfree(ha->fw_dump_template);
6530 ha->fw_dump_template = NULL;
6531 ha->fw_dump_template_len = 0;
6532
6533 ql_dbg(ql_dbg_init, vha, 0x0161,
6534 "Loading fwdump template from %x\n", faddr);
6535 qla24xx_read_flash_data(vha, dcode, faddr, 7);
6536 risc_size = be32_to_cpu(dcode[2]);
6537 ql_dbg(ql_dbg_init, vha, 0x0162,
6538 "-> array size %x dwords\n", risc_size);
6539 if (risc_size == 0 || risc_size == ~0)
6540 goto default_template;
6541
6542 dlen = (risc_size - 8) * sizeof(*dcode);
6543 ql_dbg(ql_dbg_init, vha, 0x0163,
6544 "-> template allocating %x bytes...\n", dlen);
6545 ha->fw_dump_template = vmalloc(dlen);
6546 if (!ha->fw_dump_template) {
6547 ql_log(ql_log_warn, vha, 0x0164,
6548 "Failed fwdump template allocate %x bytes.\n", risc_size);
6549 goto default_template;
6550 }
6551
6552 faddr += 7;
6553 risc_size -= 8;
6554 dcode = ha->fw_dump_template;
6555 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
6556 for (i = 0; i < risc_size; i++)
6557 dcode[i] = le32_to_cpu(dcode[i]);
6558
6559 if (!qla27xx_fwdt_template_valid(dcode)) {
6560 ql_log(ql_log_warn, vha, 0x0165,
6561 "Failed fwdump template validate\n");
6562 goto default_template;
6563 }
6564
6565 dlen = qla27xx_fwdt_template_size(dcode);
6566 ql_dbg(ql_dbg_init, vha, 0x0166,
6567 "-> template size %x bytes\n", dlen);
6568 if (dlen > risc_size * sizeof(*dcode)) {
6569 ql_log(ql_log_warn, vha, 0x0167,
4fae52b5 6570 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 6571 (size_t)(dlen - risc_size * sizeof(*dcode)));
f73cb695
CD
6572 goto default_template;
6573 }
6574 ha->fw_dump_template_len = dlen;
6575 return rval;
6576
6577default_template:
6578 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
6579 if (ha->fw_dump_template)
6580 vfree(ha->fw_dump_template);
6581 ha->fw_dump_template = NULL;
6582 ha->fw_dump_template_len = 0;
6583
6584 dlen = qla27xx_fwdt_template_default_size();
6585 ql_dbg(ql_dbg_init, vha, 0x0169,
6586 "-> template allocating %x bytes...\n", dlen);
6587 ha->fw_dump_template = vmalloc(dlen);
6588 if (!ha->fw_dump_template) {
6589 ql_log(ql_log_warn, vha, 0x016a,
6590 "Failed fwdump template allocate %x bytes.\n", risc_size);
6591 goto failed_template;
6592 }
6593
6594 dcode = ha->fw_dump_template;
6595 risc_size = dlen / sizeof(*dcode);
6596 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
6597 for (i = 0; i < risc_size; i++)
6598 dcode[i] = be32_to_cpu(dcode[i]);
6599
6600 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6601 ql_log(ql_log_warn, vha, 0x016b,
6602 "Failed fwdump template validate\n");
6603 goto failed_template;
6604 }
6605
6606 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6607 ql_dbg(ql_dbg_init, vha, 0x016c,
6608 "-> template size %x bytes\n", dlen);
6609 ha->fw_dump_template_len = dlen;
6610 return rval;
6611
6612failed_template:
6613 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
6614 if (ha->fw_dump_template)
6615 vfree(ha->fw_dump_template);
6616 ha->fw_dump_template = NULL;
6617 ha->fw_dump_template_len = 0;
d1c61909
AV
6618 return rval;
6619}
6620
e9454a88 6621#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 6622
0107109e 6623int
e315cd28 6624qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
6625{
6626 int rval;
6627 int i, fragment;
6628 uint16_t *wcode, *fwcode;
6629 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
6630 struct fw_blob *blob;
e315cd28 6631 struct qla_hw_data *ha = vha->hw;
73208dfd 6632 struct req_que *req = ha->req_q_map[0];
5433383e
AV
6633
6634 /* Load firmware blob. */
e315cd28 6635 blob = qla2x00_request_firmware(vha);
5433383e 6636 if (!blob) {
7c3df132 6637 ql_log(ql_log_info, vha, 0x0083,
94bcf830 6638 "Firmware image unavailable.\n");
7c3df132
SK
6639 ql_log(ql_log_info, vha, 0x0084,
6640 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
6641 return QLA_FUNCTION_FAILED;
6642 }
6643
6644 rval = QLA_SUCCESS;
6645
73208dfd 6646 wcode = (uint16_t *)req->ring;
5433383e
AV
6647 *srisc_addr = 0;
6648 fwcode = (uint16_t *)blob->fw->data;
6649 fwclen = 0;
6650
6651 /* Validate firmware image by checking version. */
6652 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132 6653 ql_log(ql_log_fatal, vha, 0x0085,
5b5e0928 6654 "Unable to verify integrity of firmware image (%zd).\n",
5433383e
AV
6655 blob->fw->size);
6656 goto fail_fw_integrity;
6657 }
6658 for (i = 0; i < 4; i++)
6659 wcode[i] = be16_to_cpu(fwcode[i + 4]);
6660 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
6661 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
6662 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
6663 ql_log(ql_log_fatal, vha, 0x0086,
6664 "Unable to verify integrity of firmware image.\n");
6665 ql_log(ql_log_fatal, vha, 0x0087,
6666 "Firmware data: %04x %04x %04x %04x.\n",
6667 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
6668 goto fail_fw_integrity;
6669 }
6670
6671 seg = blob->segs;
6672 while (*seg && rval == QLA_SUCCESS) {
6673 risc_addr = *seg;
6674 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
6675 risc_size = be16_to_cpu(fwcode[3]);
6676
6677 /* Validate firmware image size. */
6678 fwclen += risc_size * sizeof(uint16_t);
6679 if (blob->fw->size < fwclen) {
7c3df132 6680 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 6681 "Unable to verify integrity of firmware image "
5b5e0928 6682 "(%zd).\n", blob->fw->size);
5433383e
AV
6683 goto fail_fw_integrity;
6684 }
6685
6686 fragment = 0;
6687 while (risc_size > 0 && rval == QLA_SUCCESS) {
6688 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
6689 if (wlen > risc_size)
6690 wlen = risc_size;
7c3df132
SK
6691 ql_dbg(ql_dbg_init, vha, 0x0089,
6692 "Loading risc segment@ risc addr %x number of "
6693 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
6694
6695 for (i = 0; i < wlen; i++)
6696 wcode[i] = swab16(fwcode[i]);
6697
73208dfd 6698 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
6699 wlen);
6700 if (rval) {
7c3df132
SK
6701 ql_log(ql_log_fatal, vha, 0x008a,
6702 "Failed to load segment %d of firmware.\n",
6703 fragment);
5433383e
AV
6704 break;
6705 }
6706
6707 fwcode += wlen;
6708 risc_addr += wlen;
6709 risc_size -= wlen;
6710 fragment++;
6711 }
6712
6713 /* Next segment. */
6714 seg++;
6715 }
6716 return rval;
6717
6718fail_fw_integrity:
6719 return QLA_FUNCTION_FAILED;
6720}
6721
eaac30be
AV
6722static int
6723qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
6724{
6725 int rval;
6726 int segments, fragment;
6727 uint32_t *dcode, dlen;
6728 uint32_t risc_addr;
6729 uint32_t risc_size;
6730 uint32_t i;
5433383e 6731 struct fw_blob *blob;
f73cb695
CD
6732 const uint32_t *fwcode;
6733 uint32_t fwclen;
e315cd28 6734 struct qla_hw_data *ha = vha->hw;
73208dfd 6735 struct req_que *req = ha->req_q_map[0];
0107109e 6736
5433383e 6737 /* Load firmware blob. */
e315cd28 6738 blob = qla2x00_request_firmware(vha);
5433383e 6739 if (!blob) {
7c3df132 6740 ql_log(ql_log_warn, vha, 0x0090,
94bcf830 6741 "Firmware image unavailable.\n");
7c3df132
SK
6742 ql_log(ql_log_warn, vha, 0x0091,
6743 "Firmware images can be retrieved from: "
6744 QLA_FW_URL ".\n");
d1c61909 6745
eaac30be 6746 return QLA_FUNCTION_FAILED;
0107109e
AV
6747 }
6748
cfb0919c
CD
6749 ql_dbg(ql_dbg_init, vha, 0x0092,
6750 "FW: Loading via request-firmware.\n");
eaac30be 6751
0107109e
AV
6752 rval = QLA_SUCCESS;
6753
6754 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 6755 dcode = (uint32_t *)req->ring;
0107109e 6756 *srisc_addr = 0;
5433383e 6757 fwcode = (uint32_t *)blob->fw->data;
0107109e
AV
6758 fwclen = 0;
6759
6760 /* Validate firmware image by checking version. */
5433383e 6761 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7c3df132 6762 ql_log(ql_log_fatal, vha, 0x0093,
5b5e0928 6763 "Unable to verify integrity of firmware image (%zd).\n",
5433383e 6764 blob->fw->size);
f73cb695 6765 return QLA_FUNCTION_FAILED;
0107109e
AV
6766 }
6767 for (i = 0; i < 4; i++)
6768 dcode[i] = be32_to_cpu(fwcode[i + 4]);
6769 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6770 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6771 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6772 dcode[3] == 0)) {
7c3df132 6773 ql_log(ql_log_fatal, vha, 0x0094,
5b5e0928 6774 "Unable to verify integrity of firmware image (%zd).\n",
7c3df132
SK
6775 blob->fw->size);
6776 ql_log(ql_log_fatal, vha, 0x0095,
6777 "Firmware data: %08x %08x %08x %08x.\n",
6778 dcode[0], dcode[1], dcode[2], dcode[3]);
f73cb695 6779 return QLA_FUNCTION_FAILED;
0107109e
AV
6780 }
6781
6782 while (segments && rval == QLA_SUCCESS) {
6783 risc_addr = be32_to_cpu(fwcode[2]);
6784 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6785 risc_size = be32_to_cpu(fwcode[3]);
6786
6787 /* Validate firmware image size. */
6788 fwclen += risc_size * sizeof(uint32_t);
5433383e 6789 if (blob->fw->size < fwclen) {
7c3df132 6790 ql_log(ql_log_fatal, vha, 0x0096,
5433383e 6791 "Unable to verify integrity of firmware image "
5b5e0928 6792 "(%zd).\n", blob->fw->size);
f73cb695 6793 return QLA_FUNCTION_FAILED;
0107109e
AV
6794 }
6795
6796 fragment = 0;
6797 while (risc_size > 0 && rval == QLA_SUCCESS) {
6798 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6799 if (dlen > risc_size)
6800 dlen = risc_size;
6801
7c3df132
SK
6802 ql_dbg(ql_dbg_init, vha, 0x0097,
6803 "Loading risc segment@ risc addr %x "
6804 "number of dwords 0x%x.\n", risc_addr, dlen);
0107109e
AV
6805
6806 for (i = 0; i < dlen; i++)
6807 dcode[i] = swab32(fwcode[i]);
6808
73208dfd 6809 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
590f98e5 6810 dlen);
0107109e 6811 if (rval) {
7c3df132
SK
6812 ql_log(ql_log_fatal, vha, 0x0098,
6813 "Failed to load segment %d of firmware.\n",
6814 fragment);
f261f7af 6815 return QLA_FUNCTION_FAILED;
0107109e
AV
6816 }
6817
6818 fwcode += dlen;
6819 risc_addr += dlen;
6820 risc_size -= dlen;
6821 fragment++;
6822 }
6823
6824 /* Next segment. */
6825 segments--;
6826 }
f73cb695
CD
6827
6828 if (!IS_QLA27XX(ha))
6829 return rval;
6830
6831 if (ha->fw_dump_template)
6832 vfree(ha->fw_dump_template);
6833 ha->fw_dump_template = NULL;
6834 ha->fw_dump_template_len = 0;
6835
6836 ql_dbg(ql_dbg_init, vha, 0x171,
97ea702b
CD
6837 "Loading fwdump template from %x\n",
6838 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
f73cb695
CD
6839 risc_size = be32_to_cpu(fwcode[2]);
6840 ql_dbg(ql_dbg_init, vha, 0x172,
6841 "-> array size %x dwords\n", risc_size);
6842 if (risc_size == 0 || risc_size == ~0)
6843 goto default_template;
6844
6845 dlen = (risc_size - 8) * sizeof(*fwcode);
6846 ql_dbg(ql_dbg_init, vha, 0x0173,
6847 "-> template allocating %x bytes...\n", dlen);
6848 ha->fw_dump_template = vmalloc(dlen);
6849 if (!ha->fw_dump_template) {
6850 ql_log(ql_log_warn, vha, 0x0174,
6851 "Failed fwdump template allocate %x bytes.\n", risc_size);
6852 goto default_template;
6853 }
6854
6855 fwcode += 7;
6856 risc_size -= 8;
6857 dcode = ha->fw_dump_template;
6858 for (i = 0; i < risc_size; i++)
6859 dcode[i] = le32_to_cpu(fwcode[i]);
6860
6861 if (!qla27xx_fwdt_template_valid(dcode)) {
6862 ql_log(ql_log_warn, vha, 0x0175,
6863 "Failed fwdump template validate\n");
6864 goto default_template;
6865 }
6866
6867 dlen = qla27xx_fwdt_template_size(dcode);
6868 ql_dbg(ql_dbg_init, vha, 0x0176,
6869 "-> template size %x bytes\n", dlen);
6870 if (dlen > risc_size * sizeof(*fwcode)) {
6871 ql_log(ql_log_warn, vha, 0x0177,
4fae52b5 6872 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 6873 (size_t)(dlen - risc_size * sizeof(*fwcode)));
f73cb695
CD
6874 goto default_template;
6875 }
6876 ha->fw_dump_template_len = dlen;
0107109e
AV
6877 return rval;
6878
f73cb695
CD
6879default_template:
6880 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
6881 if (ha->fw_dump_template)
6882 vfree(ha->fw_dump_template);
6883 ha->fw_dump_template = NULL;
6884 ha->fw_dump_template_len = 0;
6885
6886 dlen = qla27xx_fwdt_template_default_size();
6887 ql_dbg(ql_dbg_init, vha, 0x0179,
6888 "-> template allocating %x bytes...\n", dlen);
6889 ha->fw_dump_template = vmalloc(dlen);
6890 if (!ha->fw_dump_template) {
6891 ql_log(ql_log_warn, vha, 0x017a,
6892 "Failed fwdump template allocate %x bytes.\n", risc_size);
6893 goto failed_template;
6894 }
6895
6896 dcode = ha->fw_dump_template;
6897 risc_size = dlen / sizeof(*fwcode);
6898 fwcode = qla27xx_fwdt_template_default();
6899 for (i = 0; i < risc_size; i++)
6900 dcode[i] = be32_to_cpu(fwcode[i]);
6901
6902 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6903 ql_log(ql_log_warn, vha, 0x017b,
6904 "Failed fwdump template validate\n");
6905 goto failed_template;
6906 }
6907
6908 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6909 ql_dbg(ql_dbg_init, vha, 0x017c,
6910 "-> template size %x bytes\n", dlen);
6911 ha->fw_dump_template_len = dlen;
6912 return rval;
6913
6914failed_template:
6915 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
6916 if (ha->fw_dump_template)
6917 vfree(ha->fw_dump_template);
6918 ha->fw_dump_template = NULL;
6919 ha->fw_dump_template_len = 0;
6920 return rval;
0107109e 6921}
18c6c127 6922
eaac30be
AV
6923int
6924qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6925{
6926 int rval;
6927
e337d907
AV
6928 if (ql2xfwloadbin == 1)
6929 return qla81xx_load_risc(vha, srisc_addr);
6930
eaac30be
AV
6931 /*
6932 * FW Load priority:
6933 * 1) Firmware via request-firmware interface (.bin file).
6934 * 2) Firmware residing in flash.
6935 */
6936 rval = qla24xx_load_risc_blob(vha, srisc_addr);
6937 if (rval == QLA_SUCCESS)
6938 return rval;
6939
cbc8eb67
AV
6940 return qla24xx_load_risc_flash(vha, srisc_addr,
6941 vha->hw->flt_region_fw);
eaac30be
AV
6942}
6943
6944int
6945qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6946{
6947 int rval;
cbc8eb67 6948 struct qla_hw_data *ha = vha->hw;
eaac30be 6949
e337d907 6950 if (ql2xfwloadbin == 2)
cbc8eb67 6951 goto try_blob_fw;
e337d907 6952
eaac30be
AV
6953 /*
6954 * FW Load priority:
6955 * 1) Firmware residing in flash.
6956 * 2) Firmware via request-firmware interface (.bin file).
cbc8eb67 6957 * 3) Golden-Firmware residing in flash -- limited operation.
eaac30be 6958 */
cbc8eb67 6959 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
eaac30be
AV
6960 if (rval == QLA_SUCCESS)
6961 return rval;
6962
cbc8eb67
AV
6963try_blob_fw:
6964 rval = qla24xx_load_risc_blob(vha, srisc_addr);
6965 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
6966 return rval;
6967
7c3df132
SK
6968 ql_log(ql_log_info, vha, 0x0099,
6969 "Attempting to fallback to golden firmware.\n");
cbc8eb67
AV
6970 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
6971 if (rval != QLA_SUCCESS)
6972 return rval;
6973
7c3df132 6974 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
cbc8eb67 6975 ha->flags.running_gold_fw = 1;
cbc8eb67 6976 return rval;
eaac30be
AV
6977}
6978
18c6c127 6979void
e315cd28 6980qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
6981{
6982 int ret, retries;
e315cd28 6983 struct qla_hw_data *ha = vha->hw;
18c6c127 6984
85880801
AV
6985 if (ha->flags.pci_channel_io_perm_failure)
6986 return;
e428924c 6987 if (!IS_FWI2_CAPABLE(ha))
18c6c127 6988 return;
75edf81d
AV
6989 if (!ha->fw_major_version)
6990 return;
ec7193e2
QT
6991 if (!ha->flags.fw_started)
6992 return;
18c6c127 6993
e315cd28 6994 ret = qla2x00_stop_firmware(vha);
7c7f1f29 6995 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 6996 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
6997 ha->isp_ops->reset_chip(vha);
6998 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 6999 continue;
e315cd28 7000 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 7001 continue;
7c3df132
SK
7002 ql_log(ql_log_info, vha, 0x8015,
7003 "Attempting retry of stop-firmware command.\n");
e315cd28 7004 ret = qla2x00_stop_firmware(vha);
18c6c127 7005 }
ec7193e2 7006
4b60c827 7007 QLA_FW_STOPPED(ha);
ec7193e2 7008 ha->flags.fw_init_done = 0;
18c6c127 7009}
2c3dfe3f
SJ
7010
7011int
e315cd28 7012qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
7013{
7014 int rval = QLA_SUCCESS;
0b91d116 7015 int rval2;
2c3dfe3f 7016 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
7017 struct qla_hw_data *ha = vha->hw;
7018 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
67c2e93a
AC
7019 struct req_que *req;
7020 struct rsp_que *rsp;
2c3dfe3f 7021
e315cd28 7022 if (!vha->vp_idx)
2c3dfe3f
SJ
7023 return -EINVAL;
7024
e315cd28 7025 rval = qla2x00_fw_ready(base_vha);
d7459527
MH
7026 if (vha->qpair)
7027 req = vha->qpair->req;
67c2e93a 7028 else
d7459527 7029 req = ha->req_q_map[0];
67c2e93a
AC
7030 rsp = req->rsp;
7031
2c3dfe3f 7032 if (rval == QLA_SUCCESS) {
e315cd28 7033 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
73208dfd 7034 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
7035 }
7036
e315cd28 7037 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
7038
7039 /* Login to SNS first */
0b91d116
CD
7040 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7041 BIT_1);
7042 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7043 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7044 ql_dbg(ql_dbg_init, vha, 0x0120,
7045 "Failed SNS login: loop_id=%x, rval2=%d\n",
7046 NPH_SNS, rval2);
7047 else
7048 ql_dbg(ql_dbg_init, vha, 0x0103,
7049 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7050 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7051 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
7052 return (QLA_FUNCTION_FAILED);
7053 }
7054
e315cd28
AC
7055 atomic_set(&vha->loop_down_timer, 0);
7056 atomic_set(&vha->loop_state, LOOP_UP);
7057 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7058 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7059 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
7060
7061 return rval;
7062}
4d4df193
HK
7063
7064/* 84XX Support **************************************************************/
7065
7066static LIST_HEAD(qla_cs84xx_list);
7067static DEFINE_MUTEX(qla_cs84xx_mutex);
7068
7069static struct qla_chip_state_84xx *
e315cd28 7070qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
7071{
7072 struct qla_chip_state_84xx *cs84xx;
e315cd28 7073 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7074
7075 mutex_lock(&qla_cs84xx_mutex);
7076
7077 /* Find any shared 84xx chip. */
7078 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7079 if (cs84xx->bus == ha->pdev->bus) {
7080 kref_get(&cs84xx->kref);
7081 goto done;
7082 }
7083 }
7084
7085 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7086 if (!cs84xx)
7087 goto done;
7088
7089 kref_init(&cs84xx->kref);
7090 spin_lock_init(&cs84xx->access_lock);
7091 mutex_init(&cs84xx->fw_update_mutex);
7092 cs84xx->bus = ha->pdev->bus;
7093
7094 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7095done:
7096 mutex_unlock(&qla_cs84xx_mutex);
7097 return cs84xx;
7098}
7099
7100static void
7101__qla84xx_chip_release(struct kref *kref)
7102{
7103 struct qla_chip_state_84xx *cs84xx =
7104 container_of(kref, struct qla_chip_state_84xx, kref);
7105
7106 mutex_lock(&qla_cs84xx_mutex);
7107 list_del(&cs84xx->list);
7108 mutex_unlock(&qla_cs84xx_mutex);
7109 kfree(cs84xx);
7110}
7111
7112void
e315cd28 7113qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 7114{
e315cd28 7115 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7116 if (ha->cs84xx)
7117 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7118}
7119
7120static int
e315cd28 7121qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
7122{
7123 int rval;
7124 uint16_t status[2];
e315cd28 7125 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7126
7127 mutex_lock(&ha->cs84xx->fw_update_mutex);
7128
e315cd28 7129 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
7130
7131 mutex_unlock(&ha->cs84xx->fw_update_mutex);
7132
7133 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7134 QLA_SUCCESS;
7135}
3a03eb79
AV
7136
7137/* 81XX Support **************************************************************/
7138
7139int
7140qla81xx_nvram_config(scsi_qla_host_t *vha)
7141{
7142 int rval;
7143 struct init_cb_81xx *icb;
7144 struct nvram_81xx *nv;
7145 uint32_t *dptr;
7146 uint8_t *dptr1, *dptr2;
7147 uint32_t chksum;
7148 uint16_t cnt;
7149 struct qla_hw_data *ha = vha->hw;
7150
7151 rval = QLA_SUCCESS;
7152 icb = (struct init_cb_81xx *)ha->init_cb;
7153 nv = ha->nvram;
7154
7155 /* Determine NVRAM starting address. */
7156 ha->nvram_size = sizeof(struct nvram_81xx);
3a03eb79 7157 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7ec0effd
AD
7158 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7159 ha->vpd_size = FA_VPD_SIZE_82XX;
3a03eb79
AV
7160
7161 /* Get VPD data into cache */
7162 ha->vpd = ha->nvram + VPD_OFFSET;
3d79038f
AV
7163 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7164 ha->vpd_size);
3a03eb79
AV
7165
7166 /* Get NVRAM data into cache and calculate checksum. */
3d79038f 7167 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
3a03eb79 7168 ha->nvram_size);
3d79038f 7169 dptr = (uint32_t *)nv;
da08ef5c
JC
7170 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7171 chksum += le32_to_cpu(*dptr);
3a03eb79 7172
7c3df132
SK
7173 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7174 "Contents of NVRAM:\n");
7175 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7176 (uint8_t *)nv, ha->nvram_size);
3a03eb79
AV
7177
7178 /* Bad NVRAM data, set defaults parameters. */
7179 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7180 || nv->id[3] != ' ' ||
ad950360 7181 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
3a03eb79 7182 /* Reset NVRAM data. */
7c3df132 7183 ql_log(ql_log_info, vha, 0x0073,
9e336520 7184 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132 7185 "version=0x%x.\n", chksum, nv->id[0],
3a03eb79 7186 le16_to_cpu(nv->nvram_version));
7c3df132
SK
7187 ql_log(ql_log_info, vha, 0x0074,
7188 "Falling back to functioning (yet invalid -- WWPN) "
7189 "defaults.\n");
3a03eb79
AV
7190
7191 /*
7192 * Set default initialization control block.
7193 */
7194 memset(nv, 0, ha->nvram_size);
ad950360
BVA
7195 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7196 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 7197 nv->frame_payload_size = 2048;
ad950360
BVA
7198 nv->execution_throttle = cpu_to_le16(0xFFFF);
7199 nv->exchange_count = cpu_to_le16(0);
3a03eb79 7200 nv->port_name[0] = 0x21;
f73cb695 7201 nv->port_name[1] = 0x00 + ha->port_no + 1;
3a03eb79
AV
7202 nv->port_name[2] = 0x00;
7203 nv->port_name[3] = 0xe0;
7204 nv->port_name[4] = 0x8b;
7205 nv->port_name[5] = 0x1c;
7206 nv->port_name[6] = 0x55;
7207 nv->port_name[7] = 0x86;
7208 nv->node_name[0] = 0x20;
7209 nv->node_name[1] = 0x00;
7210 nv->node_name[2] = 0x00;
7211 nv->node_name[3] = 0xe0;
7212 nv->node_name[4] = 0x8b;
7213 nv->node_name[5] = 0x1c;
7214 nv->node_name[6] = 0x55;
7215 nv->node_name[7] = 0x86;
ad950360
BVA
7216 nv->login_retry_count = cpu_to_le16(8);
7217 nv->interrupt_delay_timer = cpu_to_le16(0);
7218 nv->login_timeout = cpu_to_le16(0);
3a03eb79 7219 nv->firmware_options_1 =
ad950360
BVA
7220 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7221 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7222 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7223 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7224 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7225 nv->efi_parameters = cpu_to_le32(0);
3a03eb79 7226 nv->reset_delay = 5;
ad950360
BVA
7227 nv->max_luns_per_target = cpu_to_le16(128);
7228 nv->port_down_retry_count = cpu_to_le16(30);
7229 nv->link_down_timeout = cpu_to_le16(180);
eeebcc92 7230 nv->enode_mac[0] = 0x00;
6246b8a1
GM
7231 nv->enode_mac[1] = 0xC0;
7232 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
7233 nv->enode_mac[3] = 0x04;
7234 nv->enode_mac[4] = 0x05;
f73cb695 7235 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7236
7237 rval = 1;
7238 }
7239
9e522cd8
AE
7240 if (IS_T10_PI_CAPABLE(ha))
7241 nv->frame_payload_size &= ~7;
7242
aa230bc5
AE
7243 qlt_81xx_config_nvram_stage1(vha, nv);
7244
3a03eb79 7245 /* Reset Initialization control block */
773120e4 7246 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
7247
7248 /* Copy 1st segment. */
7249 dptr1 = (uint8_t *)icb;
7250 dptr2 = (uint8_t *)&nv->version;
7251 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7252 while (cnt--)
7253 *dptr1++ = *dptr2++;
7254
7255 icb->login_retry_count = nv->login_retry_count;
7256
7257 /* Copy 2nd segment. */
7258 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7259 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7260 cnt = (uint8_t *)&icb->reserved_5 -
7261 (uint8_t *)&icb->interrupt_delay_timer;
7262 while (cnt--)
7263 *dptr1++ = *dptr2++;
7264
7265 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7266 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7267 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
7268 icb->enode_mac[0] = 0x00;
7269 icb->enode_mac[1] = 0xC0;
7270 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
7271 icb->enode_mac[3] = 0x04;
7272 icb->enode_mac[4] = 0x05;
f73cb695 7273 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7274 }
7275
b64b0e8f
AV
7276 /* Use extended-initialization control block. */
7277 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7278
3a03eb79
AV
7279 /*
7280 * Setup driver NVRAM options.
7281 */
7282 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 7283 "QLE8XXX");
3a03eb79 7284
aa230bc5
AE
7285 qlt_81xx_config_nvram_stage2(vha, icb);
7286
3a03eb79 7287 /* Use alternate WWN? */
ad950360 7288 if (nv->host_p & cpu_to_le32(BIT_15)) {
3a03eb79
AV
7289 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7290 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7291 }
7292
7293 /* Prepare nodename */
ad950360 7294 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
3a03eb79
AV
7295 /*
7296 * Firmware will apply the following mask if the nodename was
7297 * not provided.
7298 */
7299 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7300 icb->node_name[0] &= 0xF0;
7301 }
7302
7303 /* Set host adapter parameters. */
7304 ha->flags.disable_risc_code_load = 0;
7305 ha->flags.enable_lip_reset = 0;
7306 ha->flags.enable_lip_full_login =
7307 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7308 ha->flags.enable_target_reset =
7309 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7310 ha->flags.enable_led_scheme = 0;
7311 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7312
7313 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7314 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7315
7316 /* save HBA serial number */
7317 ha->serial0 = icb->port_name[5];
7318 ha->serial1 = icb->port_name[6];
7319 ha->serial2 = icb->port_name[7];
7320 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7321 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7322
ad950360 7323 icb->execution_throttle = cpu_to_le16(0xFFFF);
3a03eb79
AV
7324
7325 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7326
7327 /* Set minimum login_timeout to 4 seconds. */
7328 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7329 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7330 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 7331 nv->login_timeout = cpu_to_le16(4);
3a03eb79 7332 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3a03eb79
AV
7333
7334 /* Set minimum RATOV to 100 tenths of a second. */
7335 ha->r_a_tov = 100;
7336
7337 ha->loop_reset_delay = nv->reset_delay;
7338
7339 /* Link Down Timeout = 0:
7340 *
7ec0effd 7341 * When Port Down timer expires we will start returning
3a03eb79
AV
7342 * I/O's to OS with "DID_NO_CONNECT".
7343 *
7344 * Link Down Timeout != 0:
7345 *
7346 * The driver waits for the link to come up after link down
7347 * before returning I/Os to OS with "DID_NO_CONNECT".
7348 */
7349 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7350 ha->loop_down_abort_time =
7351 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7352 } else {
7353 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7354 ha->loop_down_abort_time =
7355 (LOOP_DOWN_TIME - ha->link_down_timeout);
7356 }
7357
7358 /* Need enough time to try and get the port back. */
7359 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7360 if (qlport_down_retry)
7361 ha->port_down_retry_count = qlport_down_retry;
7362
7363 /* Set login_retry_count */
7364 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7365 if (ha->port_down_retry_count ==
7366 le16_to_cpu(nv->port_down_retry_count) &&
7367 ha->port_down_retry_count > 3)
7368 ha->login_retry_count = ha->port_down_retry_count;
7369 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7370 ha->login_retry_count = ha->port_down_retry_count;
7371 if (ql2xloginretrycount)
7372 ha->login_retry_count = ql2xloginretrycount;
7373
6246b8a1 7374 /* if not running MSI-X we need handshaking on interrupts */
f73cb695 7375 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
ad950360 7376 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6246b8a1 7377
3a03eb79
AV
7378 /* Enable ZIO. */
7379 if (!vha->flags.init_done) {
7380 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7381 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7382 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7383 le16_to_cpu(icb->interrupt_delay_timer): 2;
7384 }
ad950360 7385 icb->firmware_options_2 &= cpu_to_le32(
3a03eb79
AV
7386 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7387 vha->flags.process_response_queue = 0;
7388 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7389 ha->zio_mode = QLA_ZIO_MODE_6;
7390
7c3df132 7391 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 7392 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
7393 ha->zio_mode,
7394 ha->zio_timer * 100);
3a03eb79
AV
7395
7396 icb->firmware_options_2 |= cpu_to_le32(
7397 (uint32_t)ha->zio_mode);
7398 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7399 vha->flags.process_response_queue = 1;
7400 }
7401
41dc529a
QT
7402 /* enable RIDA Format2 */
7403 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7404 icb->firmware_options_3 |= BIT_0;
7405
3a03eb79 7406 if (rval) {
7c3df132
SK
7407 ql_log(ql_log_warn, vha, 0x0076,
7408 "NVRAM configuration failed.\n");
3a03eb79
AV
7409 }
7410 return (rval);
7411}
7412
a9083016
GM
7413int
7414qla82xx_restart_isp(scsi_qla_host_t *vha)
7415{
7416 int status, rval;
a9083016
GM
7417 struct qla_hw_data *ha = vha->hw;
7418 struct req_que *req = ha->req_q_map[0];
7419 struct rsp_que *rsp = ha->rsp_q_map[0];
7420 struct scsi_qla_host *vp;
feafb7b1 7421 unsigned long flags;
a9083016
GM
7422
7423 status = qla2x00_init_rings(vha);
7424 if (!status) {
7425 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7426 ha->flags.chip_reset_done = 1;
7427
7428 status = qla2x00_fw_ready(vha);
7429 if (!status) {
a9083016
GM
7430 /* Issue a marker after FW becomes ready. */
7431 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
a9083016 7432 vha->flags.online = 1;
7108b76e 7433 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
a9083016
GM
7434 }
7435
7436 /* if no cable then assume it's good */
7437 if ((vha->device_flags & DFLG_NO_CABLE))
7438 status = 0;
a9083016
GM
7439 }
7440
7441 if (!status) {
7442 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7443
7444 if (!atomic_read(&vha->loop_down_timer)) {
7445 /*
7446 * Issue marker command only when we are going
7447 * to start the I/O .
7448 */
7449 vha->marker_needed = 1;
7450 }
7451
a9083016
GM
7452 ha->isp_ops->enable_intrs(ha);
7453
7454 ha->isp_abort_cnt = 0;
7455 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7456
53296788 7457 /* Update the firmware version */
3173167f 7458 status = qla82xx_check_md_needed(vha);
53296788 7459
a9083016
GM
7460 if (ha->fce) {
7461 ha->flags.fce_enabled = 1;
7462 memset(ha->fce, 0,
7463 fce_calc_size(ha->fce_bufs));
7464 rval = qla2x00_enable_fce_trace(vha,
7465 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7466 &ha->fce_bufs);
7467 if (rval) {
cfb0919c 7468 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
7469 "Unable to reinitialize FCE (%d).\n",
7470 rval);
a9083016
GM
7471 ha->flags.fce_enabled = 0;
7472 }
7473 }
7474
7475 if (ha->eft) {
7476 memset(ha->eft, 0, EFT_SIZE);
7477 rval = qla2x00_enable_eft_trace(vha,
7478 ha->eft_dma, EFT_NUM_BUFFERS);
7479 if (rval) {
cfb0919c 7480 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
7481 "Unable to reinitialize EFT (%d).\n",
7482 rval);
a9083016
GM
7483 }
7484 }
a9083016
GM
7485 }
7486
7487 if (!status) {
cfb0919c 7488 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 7489 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
7490
7491 spin_lock_irqsave(&ha->vport_slock, flags);
7492 list_for_each_entry(vp, &ha->vp_list, list) {
7493 if (vp->vp_idx) {
7494 atomic_inc(&vp->vref_count);
7495 spin_unlock_irqrestore(&ha->vport_slock, flags);
7496
a9083016 7497 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
7498
7499 spin_lock_irqsave(&ha->vport_slock, flags);
7500 atomic_dec(&vp->vref_count);
7501 }
a9083016 7502 }
feafb7b1
AE
7503 spin_unlock_irqrestore(&ha->vport_slock, flags);
7504
a9083016 7505 } else {
cfb0919c 7506 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 7507 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
7508 }
7509
7510 return status;
7511}
7512
3a03eb79 7513void
ae97c91e 7514qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 7515{
ae97c91e
AV
7516 struct qla_hw_data *ha = vha->hw;
7517
f198cafa
HM
7518 /* Hold status IOCBs until ABTS response received. */
7519 if (ql2xfwholdabts)
7520 ha->fw_options[3] |= BIT_12;
7521
088d09d4
GM
7522 /* Set Retry FLOGI in case of P2P connection */
7523 if (ha->operating_mode == P2P) {
7524 ha->fw_options[2] |= BIT_3;
7525 ql_dbg(ql_dbg_disc, vha, 0x2103,
7526 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
7527 __func__, ha->fw_options[2]);
7528 }
7529
41dc529a
QT
7530 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
7531 if (ql2xmvasynctoatio) {
7532 if (qla_tgt_mode_enabled(vha) ||
7533 qla_dual_mode_enabled(vha))
7534 ha->fw_options[2] |= BIT_11;
7535 else
7536 ha->fw_options[2] &= ~BIT_11;
7537 }
7538
f7e761f5 7539 if (qla_tgt_mode_enabled(vha) ||
2da52737
QT
7540 qla_dual_mode_enabled(vha)) {
7541 /* FW auto send SCSI status during */
7542 ha->fw_options[1] |= BIT_8;
7543 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
7544
7545 /* FW perform Exchange validation */
f7e761f5 7546 ha->fw_options[2] |= BIT_4;
2da52737
QT
7547 } else {
7548 ha->fw_options[1] &= ~BIT_8;
7549 ha->fw_options[10] &= 0x00ff;
7550
f7e761f5 7551 ha->fw_options[2] &= ~BIT_4;
2da52737 7552 }
f7e761f5 7553
41dc529a
QT
7554 if (ql2xetsenable) {
7555 /* Enable ETS Burst. */
7556 memset(ha->fw_options, 0, sizeof(ha->fw_options));
7557 ha->fw_options[2] |= BIT_9;
7558 }
7559
83548fe2
QT
7560 ql_dbg(ql_dbg_init, vha, 0x00e9,
7561 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
7562 __func__, ha->fw_options[1], ha->fw_options[2],
7563 ha->fw_options[3], vha->host->active_mode);
ae97c91e 7564
ae97c91e 7565 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 7566}
09ff701a
SR
7567
7568/*
7569 * qla24xx_get_fcp_prio
7570 * Gets the fcp cmd priority value for the logged in port.
7571 * Looks for a match of the port descriptors within
7572 * each of the fcp prio config entries. If a match is found,
7573 * the tag (priority) value is returned.
7574 *
7575 * Input:
21090cbe 7576 * vha = scsi host structure pointer.
09ff701a
SR
7577 * fcport = port structure pointer.
7578 *
7579 * Return:
6c452a45 7580 * non-zero (if found)
f28a0a96 7581 * -1 (if not found)
09ff701a
SR
7582 *
7583 * Context:
7584 * Kernel context
7585 */
f28a0a96 7586static int
09ff701a
SR
7587qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7588{
7589 int i, entries;
7590 uint8_t pid_match, wwn_match;
f28a0a96 7591 int priority;
09ff701a
SR
7592 uint32_t pid1, pid2;
7593 uint64_t wwn1, wwn2;
7594 struct qla_fcp_prio_entry *pri_entry;
7595 struct qla_hw_data *ha = vha->hw;
7596
7597 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 7598 return -1;
09ff701a 7599
f28a0a96 7600 priority = -1;
09ff701a
SR
7601 entries = ha->fcp_prio_cfg->num_entries;
7602 pri_entry = &ha->fcp_prio_cfg->entry[0];
7603
7604 for (i = 0; i < entries; i++) {
7605 pid_match = wwn_match = 0;
7606
7607 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
7608 pri_entry++;
7609 continue;
7610 }
7611
7612 /* check source pid for a match */
7613 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
7614 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
7615 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
7616 if (pid1 == INVALID_PORT_ID)
7617 pid_match++;
7618 else if (pid1 == pid2)
7619 pid_match++;
7620 }
7621
7622 /* check destination pid for a match */
7623 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
7624 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
7625 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
7626 if (pid1 == INVALID_PORT_ID)
7627 pid_match++;
7628 else if (pid1 == pid2)
7629 pid_match++;
7630 }
7631
7632 /* check source WWN for a match */
7633 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
7634 wwn1 = wwn_to_u64(vha->port_name);
7635 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
7636 if (wwn2 == (uint64_t)-1)
7637 wwn_match++;
7638 else if (wwn1 == wwn2)
7639 wwn_match++;
7640 }
7641
7642 /* check destination WWN for a match */
7643 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
7644 wwn1 = wwn_to_u64(fcport->port_name);
7645 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
7646 if (wwn2 == (uint64_t)-1)
7647 wwn_match++;
7648 else if (wwn1 == wwn2)
7649 wwn_match++;
7650 }
7651
7652 if (pid_match == 2 || wwn_match == 2) {
7653 /* Found a matching entry */
7654 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
7655 priority = pri_entry->tag;
7656 break;
7657 }
7658
7659 pri_entry++;
7660 }
7661
7662 return priority;
7663}
7664
7665/*
7666 * qla24xx_update_fcport_fcp_prio
7667 * Activates fcp priority for the logged in fc port
7668 *
7669 * Input:
21090cbe 7670 * vha = scsi host structure pointer.
09ff701a
SR
7671 * fcp = port structure pointer.
7672 *
7673 * Return:
7674 * QLA_SUCCESS or QLA_FUNCTION_FAILED
7675 *
7676 * Context:
7677 * Kernel context.
7678 */
7679int
21090cbe 7680qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
7681{
7682 int ret;
f28a0a96 7683 int priority;
09ff701a
SR
7684 uint16_t mb[5];
7685
21090cbe
MI
7686 if (fcport->port_type != FCT_TARGET ||
7687 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
7688 return QLA_FUNCTION_FAILED;
7689
21090cbe 7690 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
7691 if (priority < 0)
7692 return QLA_FUNCTION_FAILED;
7693
7ec0effd 7694 if (IS_P3P_TYPE(vha->hw)) {
a00f6296
SK
7695 fcport->fcp_prio = priority & 0xf;
7696 return QLA_SUCCESS;
7697 }
7698
21090cbe 7699 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
7700 if (ret == QLA_SUCCESS) {
7701 if (fcport->fcp_prio != priority)
7702 ql_dbg(ql_dbg_user, vha, 0x709e,
7703 "Updated FCP_CMND priority - value=%d loop_id=%d "
7704 "port_id=%02x%02x%02x.\n", priority,
7705 fcport->loop_id, fcport->d_id.b.domain,
7706 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 7707 fcport->fcp_prio = priority & 0xf;
cfb0919c 7708 } else
7c3df132 7709 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
7710 "Unable to update FCP_CMND priority - ret=0x%x for "
7711 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
7712 fcport->d_id.b.domain, fcport->d_id.b.area,
7713 fcport->d_id.b.al_pa);
09ff701a
SR
7714 return ret;
7715}
7716
7717/*
7718 * qla24xx_update_all_fcp_prio
7719 * Activates fcp priority for all the logged in ports
7720 *
7721 * Input:
7722 * ha = adapter block pointer.
7723 *
7724 * Return:
7725 * QLA_SUCCESS or QLA_FUNCTION_FAILED
7726 *
7727 * Context:
7728 * Kernel context.
7729 */
7730int
7731qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
7732{
7733 int ret;
7734 fc_port_t *fcport;
7735
7736 ret = QLA_FUNCTION_FAILED;
7737 /* We need to set priority for all logged in ports */
7738 list_for_each_entry(fcport, &vha->vp_fcports, list)
7739 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
7740
7741 return ret;
7742}
d7459527 7743
82de802a
QT
7744struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
7745 int vp_idx, bool startqp)
d7459527
MH
7746{
7747 int rsp_id = 0;
7748 int req_id = 0;
7749 int i;
7750 struct qla_hw_data *ha = vha->hw;
7751 uint16_t qpair_id = 0;
7752 struct qla_qpair *qpair = NULL;
7753 struct qla_msix_entry *msix;
7754
7755 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
7756 ql_log(ql_log_warn, vha, 0x00181,
7757 "FW/Driver is not multi-queue capable.\n");
7758 return NULL;
7759 }
7760
7761 if (ql2xmqsupport) {
7762 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
7763 if (qpair == NULL) {
7764 ql_log(ql_log_warn, vha, 0x0182,
7765 "Failed to allocate memory for queue pair.\n");
7766 return NULL;
7767 }
7768 memset(qpair, 0, sizeof(struct qla_qpair));
7769
7770 qpair->hw = vha->hw;
25ff6af1 7771 qpair->vha = vha;
82de802a
QT
7772 qpair->qp_lock_ptr = &qpair->qp_lock;
7773 spin_lock_init(&qpair->qp_lock);
af7bb382 7774 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
d7459527
MH
7775
7776 /* Assign available que pair id */
7777 mutex_lock(&ha->mq_lock);
7778 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
b95b9452 7779 if (ha->num_qpairs >= ha->max_qpairs) {
d7459527
MH
7780 mutex_unlock(&ha->mq_lock);
7781 ql_log(ql_log_warn, vha, 0x0183,
7782 "No resources to create additional q pair.\n");
7783 goto fail_qid_map;
7784 }
b95b9452 7785 ha->num_qpairs++;
d7459527
MH
7786 set_bit(qpair_id, ha->qpair_qid_map);
7787 ha->queue_pair_map[qpair_id] = qpair;
7788 qpair->id = qpair_id;
7789 qpair->vp_idx = vp_idx;
e326d22a 7790 INIT_LIST_HEAD(&qpair->hints_list);
7c3f8fd1
QT
7791 qpair->chip_reset = ha->base_qpair->chip_reset;
7792 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
7793 qpair->enable_explicit_conf =
7794 ha->base_qpair->enable_explicit_conf;
d7459527
MH
7795
7796 for (i = 0; i < ha->msix_count; i++) {
093df737 7797 msix = &ha->msix_entries[i];
d7459527
MH
7798 if (msix->in_use)
7799 continue;
7800 qpair->msix = msix;
83548fe2 7801 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
d7459527
MH
7802 "Vector %x selected for qpair\n", msix->vector);
7803 break;
7804 }
7805 if (!qpair->msix) {
7806 ql_log(ql_log_warn, vha, 0x0184,
7807 "Out of MSI-X vectors!.\n");
7808 goto fail_msix;
7809 }
7810
7811 qpair->msix->in_use = 1;
7812 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8abfa9e2
QT
7813 qpair->pdev = ha->pdev;
7814 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
7815 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
d7459527
MH
7816
7817 mutex_unlock(&ha->mq_lock);
7818
7819 /* Create response queue first */
82de802a 7820 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
d7459527
MH
7821 if (!rsp_id) {
7822 ql_log(ql_log_warn, vha, 0x0185,
7823 "Failed to create response queue.\n");
7824 goto fail_rsp;
7825 }
7826
7827 qpair->rsp = ha->rsp_q_map[rsp_id];
7828
7829 /* Create request queue */
82de802a
QT
7830 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
7831 startqp);
d7459527
MH
7832 if (!req_id) {
7833 ql_log(ql_log_warn, vha, 0x0186,
7834 "Failed to create request queue.\n");
7835 goto fail_req;
7836 }
7837
7838 qpair->req = ha->req_q_map[req_id];
7839 qpair->rsp->req = qpair->req;
82de802a 7840 qpair->rsp->qpair = qpair;
e326d22a
QT
7841 /* init qpair to this cpu. Will adjust at run time. */
7842 qla_cpu_update(qpair, smp_processor_id());
d7459527
MH
7843
7844 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
7845 if (ha->fw_attributes & BIT_4)
7846 qpair->difdix_supported = 1;
7847 }
7848
7849 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
7850 if (!qpair->srb_mempool) {
83548fe2 7851 ql_log(ql_log_warn, vha, 0xd036,
d7459527
MH
7852 "Failed to create srb mempool for qpair %d\n",
7853 qpair->id);
7854 goto fail_mempool;
7855 }
7856
7857 /* Mark as online */
7858 qpair->online = 1;
7859
7860 if (!vha->flags.qpairs_available)
7861 vha->flags.qpairs_available = 1;
7862
7863 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
7864 "Request/Response queue pair created, id %d\n",
7865 qpair->id);
7866 ql_dbg(ql_dbg_init, vha, 0x0187,
7867 "Request/Response queue pair created, id %d\n",
7868 qpair->id);
7869 }
7870 return qpair;
7871
7872fail_mempool:
7873fail_req:
7874 qla25xx_delete_rsp_que(vha, qpair->rsp);
7875fail_rsp:
7876 mutex_lock(&ha->mq_lock);
7877 qpair->msix->in_use = 0;
7878 list_del(&qpair->qp_list_elem);
7879 if (list_empty(&vha->qp_list))
7880 vha->flags.qpairs_available = 0;
7881fail_msix:
7882 ha->queue_pair_map[qpair_id] = NULL;
7883 clear_bit(qpair_id, ha->qpair_qid_map);
b95b9452 7884 ha->num_qpairs--;
d7459527
MH
7885 mutex_unlock(&ha->mq_lock);
7886fail_qid_map:
7887 kfree(qpair);
7888 return NULL;
7889}
7890
7891int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
7892{
d65237c7 7893 int ret = QLA_FUNCTION_FAILED;
d7459527
MH
7894 struct qla_hw_data *ha = qpair->hw;
7895
d65237c7
SC
7896 if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
7897 goto fail;
7898
d7459527
MH
7899 qpair->delete_in_progress = 1;
7900 while (atomic_read(&qpair->ref_count))
7901 msleep(500);
7902
7903 ret = qla25xx_delete_req_que(vha, qpair->req);
7904 if (ret != QLA_SUCCESS)
7905 goto fail;
7906 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
7907 if (ret != QLA_SUCCESS)
7908 goto fail;
7909
7910 mutex_lock(&ha->mq_lock);
7911 ha->queue_pair_map[qpair->id] = NULL;
7912 clear_bit(qpair->id, ha->qpair_qid_map);
b95b9452 7913 ha->num_qpairs--;
d7459527 7914 list_del(&qpair->qp_list_elem);
d65237c7 7915 if (list_empty(&vha->qp_list)) {
d7459527 7916 vha->flags.qpairs_available = 0;
d65237c7
SC
7917 vha->flags.qpairs_req_created = 0;
7918 vha->flags.qpairs_rsp_created = 0;
7919 }
d7459527
MH
7920 mempool_destroy(qpair->srb_mempool);
7921 kfree(qpair);
7922 mutex_unlock(&ha->mq_lock);
7923
7924 return QLA_SUCCESS;
7925fail:
7926 return ret;
7927}