scsi: qla2xxx: Really fix qla2xxx_eh_abort()
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4 32static int qla2x00_configure_fabric(scsi_qla_host_t *);
726b8548 33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
1da177e4 34static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 35
4d4df193
HK
36static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 38static int qla25xx_init_queues(struct qla_hw_data *);
a5d42f4c 39static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
726b8548
QT
40static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
a5d42f4c
DG
42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
a4239945 44static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
4d4df193 45
ac280b67
AV
46/* SRB Extensions ---------------------------------------------------------- */
47
9ba56b95 48void
8e5f4ba0 49qla2x00_sp_timeout(struct timer_list *t)
ac280b67 50{
8e5f4ba0 51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
4916392b 52 struct srb_iocb *iocb;
ac280b67
AV
53 struct req_que *req;
54 unsigned long flags;
bcc71cc3 55 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 56
ef801f07 57 WARN_ON_ONCE(irqs_disabled());
bcc71cc3 58 spin_lock_irqsave(&ha->hardware_lock, flags);
f6145e86 59 req = sp->qpair->req;
ac280b67 60 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 61 iocb = &sp->u.iocb_cmd;
bcc71cc3 62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4916392b 63 iocb->timeout(sp);
ac280b67
AV
64}
65
9ba56b95 66void
25ff6af1 67qla2x00_sp_free(void *ptr)
ac280b67 68{
25ff6af1 69 srb_t *sp = ptr;
9ba56b95 70 struct srb_iocb *iocb = &sp->u.iocb_cmd;
ac280b67 71
4d97cc53 72 del_timer(&iocb->timer);
25ff6af1 73 qla2x00_rel_sp(sp);
ac280b67
AV
74}
75
ac280b67
AV
76/* Asynchronous Login/Logout Routines -------------------------------------- */
77
a9b6f722 78unsigned long
5b91490e
AV
79qla2x00_get_async_timeout(struct scsi_qla_host *vha)
80{
81 unsigned long tmo;
82 struct qla_hw_data *ha = vha->hw;
83
84 /* Firmware should use switch negotiated r_a_tov for timeout. */
85 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
86 if (IS_QLAFX00(ha)) {
87 tmo = FX00_DEF_RATOV * 2;
88 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
89 /*
90 * Except for earlier ISPs where the timeout is seeded from the
91 * initialization control block.
92 */
93 tmo = ha->login_timeout;
94 }
95 return tmo;
96}
ac280b67 97
1956eee5
BVA
98static void qla24xx_abort_iocb_timeout(void *data)
99{
100 srb_t *sp = data;
101 struct srb_iocb *abt = &sp->u.iocb_cmd;
0c6df590
QT
102 struct qla_qpair *qpair = sp->qpair;
103 u32 handle;
104 unsigned long flags;
105
106 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
107 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
108 /* removing the abort */
109 if (qpair->req->outstanding_cmds[handle] == sp) {
110 qpair->req->outstanding_cmds[handle] = NULL;
111 break;
112 }
113 }
114 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1956eee5
BVA
115
116 abt->u.abt.comp_status = CS_TIMEOUT;
0c6df590 117 sp->done(sp, QLA_OS_TIMER_EXPIRED);
1956eee5
BVA
118}
119
120static void qla24xx_abort_sp_done(void *ptr, int res)
121{
122 srb_t *sp = ptr;
123 struct srb_iocb *abt = &sp->u.iocb_cmd;
124
0c6df590
QT
125 if ((res == QLA_OS_TIMER_EXPIRED) ||
126 del_timer(&sp->u.iocb_cmd.timer)) {
1956eee5
BVA
127 if (sp->flags & SRB_WAKEUP_ON_COMP)
128 complete(&abt->u.abt.comp);
129 else
130 sp->free(sp);
131 }
132}
133
134static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
135{
136 scsi_qla_host_t *vha = cmd_sp->vha;
137 struct srb_iocb *abt_iocb;
138 srb_t *sp;
139 int rval = QLA_FUNCTION_FAILED;
140
141 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
142 GFP_ATOMIC);
143 if (!sp)
144 goto done;
145
146 abt_iocb = &sp->u.iocb_cmd;
147 sp->type = SRB_ABT_CMD;
148 sp->name = "abort";
149 sp->qpair = cmd_sp->qpair;
150 if (wait)
151 sp->flags = SRB_WAKEUP_ON_COMP;
152
153 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
154 init_completion(&abt_iocb->u.abt.comp);
155 /* FW can send 2 x ABTS's timeout/20s */
156 qla2x00_init_timer(sp, 42);
157
158 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
159 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
160
161 sp->done = qla24xx_abort_sp_done;
162
163 ql_dbg(ql_dbg_async, vha, 0x507c,
164 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
165 cmd_sp->type);
166
167 rval = qla2x00_start_sp(sp);
168 if (rval != QLA_SUCCESS)
169 goto done_free_sp;
170
171 if (wait) {
172 wait_for_completion(&abt_iocb->u.abt.comp);
173 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
174 QLA_SUCCESS : QLA_FUNCTION_FAILED;
175 } else {
176 goto done;
177 }
178
179done_free_sp:
180 sp->free(sp);
181done:
182 return rval;
183}
184
726b8548 185void
9ba56b95 186qla2x00_async_iocb_timeout(void *data)
ac280b67 187{
25ff6af1 188 srb_t *sp = data;
ac280b67 189 fc_port_t *fcport = sp->fcport;
726b8548 190 struct srb_iocb *lio = &sp->u.iocb_cmd;
f6145e86
QT
191 int rc, h;
192 unsigned long flags;
ac280b67 193
5c25d451
QT
194 if (fcport) {
195 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
196 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
197 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
198
6d674927 199 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
5c25d451
QT
200 } else {
201 pr_info("Async-%s timeout - hdl=%x.\n",
202 sp->name, sp->handle);
203 }
726b8548
QT
204
205 switch (sp->type) {
206 case SRB_LOGIN_CMD:
f6145e86
QT
207 rc = qla24xx_async_abort_cmd(sp, false);
208 if (rc) {
209 /* Retry as needed. */
210 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
211 lio->u.logio.data[1] =
212 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
213 QLA_LOGIO_LOGIN_RETRIED : 0;
214 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
215 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
216 h++) {
217 if (sp->qpair->req->outstanding_cmds[h] ==
218 sp) {
219 sp->qpair->req->outstanding_cmds[h] =
220 NULL;
221 break;
222 }
223 }
224 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
225 sp->done(sp, QLA_FUNCTION_TIMEOUT);
226 }
726b8548
QT
227 break;
228 case SRB_LOGOUT_CMD:
726b8548
QT
229 case SRB_CT_PTHRU_CMD:
230 case SRB_MB_IOCB:
231 case SRB_NACK_PLOGI:
232 case SRB_NACK_PRLI:
233 case SRB_NACK_LOGO:
2853192e 234 case SRB_CTRL_VP:
f6145e86
QT
235 rc = qla24xx_async_abort_cmd(sp, false);
236 if (rc) {
237 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
238 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
239 h++) {
240 if (sp->qpair->req->outstanding_cmds[h] ==
241 sp) {
242 sp->qpair->req->outstanding_cmds[h] =
243 NULL;
244 break;
245 }
246 }
247 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
248 sp->done(sp, QLA_FUNCTION_TIMEOUT);
249 }
726b8548 250 break;
6ac52608 251 }
ac280b67
AV
252}
253
99b0bec7 254static void
25ff6af1 255qla2x00_async_login_sp_done(void *ptr, int res)
99b0bec7 256{
25ff6af1
JC
257 srb_t *sp = ptr;
258 struct scsi_qla_host *vha = sp->vha;
9ba56b95 259 struct srb_iocb *lio = &sp->u.iocb_cmd;
726b8548 260 struct event_arg ea;
9ba56b95 261
83548fe2 262 ql_dbg(ql_dbg_disc, vha, 0x20dd,
25ff6af1 263 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
726b8548 264
6d674927
QT
265 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
266
726b8548
QT
267 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
268 memset(&ea, 0, sizeof(ea));
269 ea.event = FCME_PLOGI_DONE;
270 ea.fcport = sp->fcport;
271 ea.data[0] = lio->u.logio.data[0];
272 ea.data[1] = lio->u.logio.data[1];
273 ea.iop[0] = lio->u.logio.iop[0];
274 ea.iop[1] = lio->u.logio.iop[1];
275 ea.sp = sp;
276 qla2x00_fcport_event_handler(vha, &ea);
277 }
9ba56b95 278
25ff6af1 279 sp->free(sp);
99b0bec7
AV
280}
281
48acad09
QT
282static inline bool
283fcport_is_smaller(fc_port_t *fcport)
284{
285 if (wwn_to_u64(fcport->port_name) <
286 wwn_to_u64(fcport->vha->port_name))
287 return true;
288 else
289 return false;
290}
291
292static inline bool
293fcport_is_bigger(fc_port_t *fcport)
294{
295 return !fcport_is_smaller(fcport);
296}
297
ac280b67
AV
298int
299qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
300 uint16_t *data)
301{
ac280b67 302 srb_t *sp;
4916392b 303 struct srb_iocb *lio;
726b8548
QT
304 int rval = QLA_FUNCTION_FAILED;
305
8b5292bc
QT
306 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
307 fcport->loop_id == FC_NO_LOOP_ID) {
308 ql_log(ql_log_warn, vha, 0xffff,
309 "%s: %8phC - not sending command.\n",
310 __func__, fcport->port_name);
311 return rval;
312 }
726b8548 313
9ba56b95 314 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
315 if (!sp)
316 goto done;
317
726b8548
QT
318 fcport->flags |= FCF_ASYNC_SENT;
319 fcport->logout_completed = 0;
320
a4239945 321 fcport->disc_state = DSC_LOGIN_PEND;
9ba56b95
GM
322 sp->type = SRB_LOGIN_CMD;
323 sp->name = "login";
a4239945
QT
324 sp->gen1 = fcport->rscn_gen;
325 sp->gen2 = fcport->login_gen;
9ba56b95
GM
326
327 lio = &sp->u.iocb_cmd;
3822263e 328 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
329 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
330
9ba56b95 331 sp->done = qla2x00_async_login_sp_done;
835aa4f2 332 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
48acad09 333 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
835aa4f2 334 else
48acad09 335 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
a5d42f4c 336
835aa4f2
GM
337 if (fcport->fc4f_nvme)
338 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
a5d42f4c 339
9fe278f4
GM
340 ql_dbg(ql_dbg_disc, vha, 0x2072,
341 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
342 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
343 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
344 fcport->login_retry);
345
ac280b67 346 rval = qla2x00_start_sp(sp);
080c9517 347 if (rval != QLA_SUCCESS) {
080c9517
CD
348 fcport->flags |= FCF_LOGIN_NEEDED;
349 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ac280b67 350 goto done_free_sp;
080c9517 351 }
ac280b67 352
ac280b67
AV
353 return rval;
354
355done_free_sp:
25ff6af1 356 sp->free(sp);
726b8548 357 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 358done:
fa83e658 359 fcport->flags &= ~FCF_ASYNC_ACTIVE;
ac280b67
AV
360 return rval;
361}
362
99b0bec7 363static void
25ff6af1 364qla2x00_async_logout_sp_done(void *ptr, int res)
99b0bec7 365{
25ff6af1 366 srb_t *sp = ptr;
9ba56b95 367
6d674927 368 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
eaf75d18
QT
369 sp->fcport->login_gen++;
370 qlt_logo_completion_handler(sp->fcport, res);
25ff6af1 371 sp->free(sp);
99b0bec7
AV
372}
373
ac280b67
AV
374int
375qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
376{
ac280b67 377 srb_t *sp;
4916392b 378 struct srb_iocb *lio;
3dbec59b
QT
379 int rval = QLA_FUNCTION_FAILED;
380
726b8548 381 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 382 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
383 if (!sp)
384 goto done;
385
9ba56b95
GM
386 sp->type = SRB_LOGOUT_CMD;
387 sp->name = "logout";
9ba56b95
GM
388
389 lio = &sp->u.iocb_cmd;
3822263e 390 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
391 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
392
9ba56b95 393 sp->done = qla2x00_async_logout_sp_done;
ac280b67 394
7c3df132 395 ql_dbg(ql_dbg_disc, vha, 0x2070,
726b8548 396 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
cfb0919c 397 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
726b8548
QT
398 fcport->d_id.b.area, fcport->d_id.b.al_pa,
399 fcport->port_name);
9fe278f4
GM
400
401 rval = qla2x00_start_sp(sp);
402 if (rval != QLA_SUCCESS)
403 goto done_free_sp;
ac280b67
AV
404 return rval;
405
406done_free_sp:
25ff6af1 407 sp->free(sp);
ac280b67 408done:
fa83e658 409 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ac280b67
AV
410 return rval;
411}
11aea16a
QT
412
413void
414qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
415 uint16_t *data)
416{
fa83e658 417 fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
418 /* Don't re-login in target mode */
419 if (!fcport->tgt_session)
420 qla2x00_mark_device_lost(vha, fcport, 1, 0);
421 qlt_logo_completion_handler(fcport, data[0]);
422}
423
424static void
425qla2x00_async_prlo_sp_done(void *s, int res)
426{
427 srb_t *sp = (srb_t *)s;
428 struct srb_iocb *lio = &sp->u.iocb_cmd;
429 struct scsi_qla_host *vha = sp->vha;
430
fa83e658 431 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
432 if (!test_bit(UNLOADING, &vha->dpc_flags))
433 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
434 lio->u.logio.data);
435 sp->free(sp);
436}
437
438int
439qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
440{
441 srb_t *sp;
442 struct srb_iocb *lio;
443 int rval;
444
445 rval = QLA_FUNCTION_FAILED;
446 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
447 if (!sp)
448 goto done;
449
450 sp->type = SRB_PRLO_CMD;
451 sp->name = "prlo";
11aea16a
QT
452
453 lio = &sp->u.iocb_cmd;
454 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
455 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
456
11aea16a 457 sp->done = qla2x00_async_prlo_sp_done;
11aea16a
QT
458
459 ql_dbg(ql_dbg_disc, vha, 0x2070,
460 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
461 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
462 fcport->d_id.b.area, fcport->d_id.b.al_pa);
f233e8c0
BK
463
464 rval = qla2x00_start_sp(sp);
465 if (rval != QLA_SUCCESS)
466 goto done_free_sp;
467
11aea16a
QT
468 return rval;
469
470done_free_sp:
471 sp->free(sp);
472done:
fa83e658 473 fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
474 return rval;
475}
476
f13515ac
QT
477static
478void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
479{
0616e965
QT
480 struct fc_port *fcport = ea->fcport;
481
482 ql_dbg(ql_dbg_disc, vha, 0x20d2,
483 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
484 __func__, fcport->port_name, fcport->disc_state,
485 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
486 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
487
488 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
a4239945
QT
489 ql_dbg(ql_dbg_disc, vha, 0x2066,
490 "%s %8phC: adisc fail: post delete\n",
491 __func__, ea->fcport->port_name);
2d3fdbeb
QT
492 /* deleted = 0 & logout_on_delete = force fw cleanup */
493 fcport->deleted = 0;
494 fcport->logout_on_delete = 1;
94cff6e1 495 qlt_schedule_sess_for_deletion(ea->fcport);
a4239945
QT
496 return;
497 }
a4239945
QT
498
499 if (ea->fcport->disc_state == DSC_DELETE_PEND)
500 return;
501
502 if (ea->sp->gen2 != ea->fcport->login_gen) {
503 /* target side must have changed it. */
504 ql_dbg(ql_dbg_disc, vha, 0x20d3,
0616e965
QT
505 "%s %8phC generation changed\n",
506 __func__, ea->fcport->port_name);
a4239945
QT
507 return;
508 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
d4f7a16a 509 qla_rscn_replay(fcport);
861d483d 510 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
511 return;
512 }
513
514 __qla24xx_handle_gpdb_event(vha, ea);
f13515ac 515}
ac280b67 516
8f9a2148 517static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
8777e431
QT
518{
519 struct qla_work_evt *e;
520
521 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
522 if (!e)
523 return QLA_FUNCTION_FAILED;
524
525 e->u.fcport.fcport = fcport;
526 fcport->flags |= FCF_ASYNC_ACTIVE;
527 return qla2x00_post_work(vha, e);
528}
529
5ff1d584 530static void
25ff6af1 531qla2x00_async_adisc_sp_done(void *ptr, int res)
5ff1d584 532{
25ff6af1
JC
533 srb_t *sp = ptr;
534 struct scsi_qla_host *vha = sp->vha;
f13515ac 535 struct event_arg ea;
0616e965 536 struct srb_iocb *lio = &sp->u.iocb_cmd;
f13515ac
QT
537
538 ql_dbg(ql_dbg_disc, vha, 0x2066,
539 "Async done-%s res %x %8phC\n",
540 sp->name, res, sp->fcport->port_name);
541
15b6c3c9 542 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
fa83e658 543
f13515ac
QT
544 memset(&ea, 0, sizeof(ea));
545 ea.event = FCME_ADISC_DONE;
546 ea.rc = res;
0616e965
QT
547 ea.data[0] = lio->u.logio.data[0];
548 ea.data[1] = lio->u.logio.data[1];
549 ea.iop[0] = lio->u.logio.iop[0];
550 ea.iop[1] = lio->u.logio.iop[1];
f13515ac
QT
551 ea.fcport = sp->fcport;
552 ea.sp = sp;
553
554 qla2x00_fcport_event_handler(vha, &ea);
9ba56b95 555
25ff6af1 556 sp->free(sp);
5ff1d584
AV
557}
558
559int
560qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
561 uint16_t *data)
562{
5ff1d584 563 srb_t *sp;
4916392b 564 struct srb_iocb *lio;
192c4e9b
QT
565 int rval = QLA_FUNCTION_FAILED;
566
567 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
568 return rval;
5ff1d584 569
726b8548 570 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 571 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
572 if (!sp)
573 goto done;
574
9ba56b95
GM
575 sp->type = SRB_ADISC_CMD;
576 sp->name = "adisc";
9ba56b95
GM
577
578 lio = &sp->u.iocb_cmd;
3822263e 579 lio->timeout = qla2x00_async_iocb_timeout;
8777e431
QT
580 sp->gen1 = fcport->rscn_gen;
581 sp->gen2 = fcport->login_gen;
e74e7d95
BH
582 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
583
9ba56b95 584 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 585 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 586 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584 587
7c3df132 588 ql_dbg(ql_dbg_disc, vha, 0x206f,
f13515ac
QT
589 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
590 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
9fe278f4
GM
591
592 rval = qla2x00_start_sp(sp);
593 if (rval != QLA_SUCCESS)
594 goto done_free_sp;
595
5ff1d584
AV
596 return rval;
597
598done_free_sp:
25ff6af1 599 sp->free(sp);
5ff1d584 600done:
fa83e658 601 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
f13515ac 602 qla2x00_post_async_adisc_work(vha, fcport, data);
726b8548
QT
603 return rval;
604}
605
0c6660b2
BVA
606static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
607{
608 struct qla_hw_data *ha = vha->hw;
609
610 if (IS_FWI2_CAPABLE(ha))
611 return loop_id > NPH_LAST_HANDLE;
612
613 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
614 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
615}
616
94f5b916
BVA
617/**
618 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
619 * @vha: adapter state pointer.
620 * @dev: port structure pointer.
621 *
622 * Returns:
623 * qla2x00 local function return status code.
624 *
625 * Context:
626 * Kernel context.
627 */
628static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
629{
630 int rval;
631 struct qla_hw_data *ha = vha->hw;
632 unsigned long flags = 0;
633
634 rval = QLA_SUCCESS;
635
636 spin_lock_irqsave(&ha->vport_slock, flags);
637
638 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
639 if (dev->loop_id >= LOOPID_MAP_SIZE ||
640 qla2x00_is_reserved_id(vha, dev->loop_id)) {
641 dev->loop_id = FC_NO_LOOP_ID;
642 rval = QLA_FUNCTION_FAILED;
643 } else {
644 set_bit(dev->loop_id, ha->loop_id_map);
645 }
646 spin_unlock_irqrestore(&ha->vport_slock, flags);
647
648 if (rval == QLA_SUCCESS)
649 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
650 "Assigning new loopid=%x, portid=%x.\n",
651 dev->loop_id, dev->d_id.b24);
652 else
653 ql_log(ql_log_warn, dev->vha, 0x2087,
654 "No loop_id's available, portid=%x.\n",
655 dev->d_id.b24);
656
657 return rval;
658}
659
ef1eb688
BVA
660void qla2x00_clear_loop_id(fc_port_t *fcport)
661{
662 struct qla_hw_data *ha = fcport->vha->hw;
663
664 if (fcport->loop_id == FC_NO_LOOP_ID ||
665 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
666 return;
667
668 clear_bit(fcport->loop_id, ha->loop_id_map);
669 fcport->loop_id = FC_NO_LOOP_ID;
670}
671
726b8548
QT
672static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
673 struct event_arg *ea)
674{
675 fc_port_t *fcport, *conflict_fcport;
676 struct get_name_list_extended *e;
677 u16 i, n, found = 0, loop_id;
678 port_id_t id;
679 u64 wwn;
a4239945
QT
680 u16 data[2];
681 u8 current_login_state;
726b8548
QT
682
683 fcport = ea->fcport;
f352eeb7
QT
684 ql_dbg(ql_dbg_disc, vha, 0xffff,
685 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
686 __func__, fcport->port_name, fcport->disc_state,
687 fcport->fw_login_state, ea->rc,
688 fcport->login_gen, fcport->last_login_gen,
689 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
726b8548 690
a4239945
QT
691 if (fcport->disc_state == DSC_DELETE_PEND)
692 return;
693
726b8548
QT
694 if (ea->rc) { /* rval */
695 if (fcport->login_retry == 0) {
83548fe2
QT
696 ql_dbg(ql_dbg_disc, vha, 0x20de,
697 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
698 fcport->port_name, fcport->login_retry);
726b8548
QT
699 }
700 return;
701 }
702
703 if (fcport->last_rscn_gen != fcport->rscn_gen) {
d4f7a16a 704 qla_rscn_replay(fcport);
861d483d 705 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
706 return;
707 } else if (fcport->last_login_gen != fcport->login_gen) {
83548fe2 708 ql_dbg(ql_dbg_disc, vha, 0x20e0,
f352eeb7
QT
709 "%s %8phC login gen changed\n",
710 __func__, fcport->port_name);
726b8548
QT
711 return;
712 }
713
714 n = ea->data[0] / sizeof(struct get_name_list_extended);
715
83548fe2 716 ql_dbg(ql_dbg_disc, vha, 0x20e1,
726b8548
QT
717 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
718 __func__, __LINE__, fcport->port_name, n,
719 fcport->d_id.b.domain, fcport->d_id.b.area,
720 fcport->d_id.b.al_pa, fcport->loop_id);
721
722 for (i = 0; i < n; i++) {
723 e = &vha->gnl.l[i];
724 wwn = wwn_to_u64(e->port_name);
48acad09
QT
725 id.b.domain = e->port_id[2];
726 id.b.area = e->port_id[1];
727 id.b.al_pa = e->port_id[0];
728 id.b.rsvd_1 = 0;
726b8548
QT
729
730 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
731 continue;
732
48acad09
QT
733 if (IS_SW_RESV_ADDR(id))
734 continue;
735
726b8548 736 found = 1;
726b8548
QT
737
738 loop_id = le16_to_cpu(e->nport_handle);
739 loop_id = (loop_id & 0x7fff);
8777e431
QT
740 if (fcport->fc4f_nvme)
741 current_login_state = e->current_login_state >> 4;
742 else
743 current_login_state = e->current_login_state & 0xf;
744
726b8548 745
83548fe2 746 ql_dbg(ql_dbg_disc, vha, 0x20e2,
8777e431 747 "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
83548fe2
QT
748 __func__, fcport->port_name,
749 e->current_login_state, fcport->fw_login_state,
8777e431 750 fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
83548fe2
QT
751 fcport->d_id.b.domain, fcport->d_id.b.area,
752 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
726b8548 753
48acad09
QT
754 switch (fcport->disc_state) {
755 case DSC_DELETE_PEND:
756 case DSC_DELETED:
757 break;
758 default:
8777e431
QT
759 if ((id.b24 != fcport->d_id.b24 &&
760 fcport->d_id.b24) ||
761 (fcport->loop_id != FC_NO_LOOP_ID &&
762 fcport->loop_id != loop_id)) {
763 ql_dbg(ql_dbg_disc, vha, 0x20e3,
764 "%s %d %8phC post del sess\n",
765 __func__, __LINE__, fcport->port_name);
48acad09
QT
766 qlt_schedule_sess_for_deletion(fcport);
767 return;
768 }
769 break;
726b8548
QT
770 }
771
772 fcport->loop_id = loop_id;
773
774 wwn = wwn_to_u64(fcport->port_name);
775 qlt_find_sess_invalidate_other(vha, wwn,
776 id, loop_id, &conflict_fcport);
777
778 if (conflict_fcport) {
779 /*
780 * Another share fcport share the same loop_id &
781 * nport id. Conflict fcport needs to finish
782 * cleanup before this fcport can proceed to login.
783 */
784 conflict_fcport->conflict = fcport;
785 fcport->login_pause = 1;
786 }
787
48acad09
QT
788 switch (vha->hw->current_topology) {
789 default:
790 switch (current_login_state) {
791 case DSC_LS_PRLI_COMP:
792 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
793 vha, 0x20e4, "%s %d %8phC post gpdb\n",
794 __func__, __LINE__, fcport->port_name);
a4239945 795
48acad09
QT
796 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
797 fcport->port_type = FCT_INITIATOR;
798 else
799 fcport->port_type = FCT_TARGET;
800 data[0] = data[1] = 0;
801 qla2x00_post_async_adisc_work(vha, fcport,
802 data);
803 break;
804 case DSC_LS_PORT_UNAVAIL:
805 default:
ec322937
HM
806 if (fcport->loop_id == FC_NO_LOOP_ID) {
807 qla2x00_find_new_loop_id(vha, fcport);
808 fcport->fw_login_state =
809 DSC_LS_PORT_UNAVAIL;
810 }
811 ql_dbg(ql_dbg_disc, vha, 0x20e5,
812 "%s %d %8phC\n", __func__, __LINE__,
813 fcport->port_name);
48acad09
QT
814 qla24xx_fcport_handle_login(vha, fcport);
815 break;
726b8548 816 }
726b8548 817 break;
48acad09 818 case ISP_CFG_N:
8777e431
QT
819 fcport->fw_login_state = current_login_state;
820 fcport->d_id = id;
48acad09
QT
821 switch (current_login_state) {
822 case DSC_LS_PRLI_COMP:
823 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
824 fcport->port_type = FCT_INITIATOR;
825 else
826 fcport->port_type = FCT_TARGET;
827
828 data[0] = data[1] = 0;
829 qla2x00_post_async_adisc_work(vha, fcport,
830 data);
831 break;
832 case DSC_LS_PLOGI_COMP:
833 if (fcport_is_bigger(fcport)) {
834 /* local adapter is smaller */
835 if (fcport->loop_id != FC_NO_LOOP_ID)
836 qla2x00_clear_loop_id(fcport);
837
838 fcport->loop_id = loop_id;
839 qla24xx_fcport_handle_login(vha,
840 fcport);
841 break;
842 }
50435d42 843 /* fall through */
48acad09
QT
844 default:
845 if (fcport_is_smaller(fcport)) {
846 /* local adapter is bigger */
847 if (fcport->loop_id != FC_NO_LOOP_ID)
848 qla2x00_clear_loop_id(fcport);
849
850 fcport->loop_id = loop_id;
851 qla24xx_fcport_handle_login(vha,
852 fcport);
853 }
854 break;
855 }
856 break;
857 } /* switch (ha->current_topology) */
726b8548
QT
858 }
859
860 if (!found) {
48acad09
QT
861 switch (vha->hw->current_topology) {
862 case ISP_CFG_F:
863 case ISP_CFG_FL:
864 for (i = 0; i < n; i++) {
865 e = &vha->gnl.l[i];
866 id.b.domain = e->port_id[0];
867 id.b.area = e->port_id[1];
868 id.b.al_pa = e->port_id[2];
869 id.b.rsvd_1 = 0;
870 loop_id = le16_to_cpu(e->nport_handle);
871
872 if (fcport->d_id.b24 == id.b24) {
873 conflict_fcport =
874 qla2x00_find_fcport_by_wwpn(vha,
875 e->port_name, 0);
72f02ba6
LT
876 if (conflict_fcport) {
877 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
878 vha, 0x20e5,
879 "%s %d %8phC post del sess\n",
880 __func__, __LINE__,
881 conflict_fcport->port_name);
882 qlt_schedule_sess_for_deletion
883 (conflict_fcport);
884 }
36eb8ff6 885 }
48acad09
QT
886 /*
887 * FW already picked this loop id for
888 * another fcport
889 */
890 if (fcport->loop_id == loop_id)
891 fcport->loop_id = FC_NO_LOOP_ID;
726b8548 892 }
48acad09
QT
893 qla24xx_fcport_handle_login(vha, fcport);
894 break;
895 case ISP_CFG_N:
8777e431
QT
896 fcport->disc_state = DSC_DELETED;
897 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
898 if (fcport->n2n_link_reset_cnt < 2) {
899 fcport->n2n_link_reset_cnt++;
900 /*
901 * remote port is not sending PLOGI.
902 * Reset link to kick start his state
903 * machine
904 */
905 set_bit(N2N_LINK_RESET,
906 &vha->dpc_flags);
907 } else {
908 if (fcport->n2n_chip_reset < 1) {
909 ql_log(ql_log_info, vha, 0x705d,
910 "Chip reset to bring laser down");
911 set_bit(ISP_ABORT_NEEDED,
912 &vha->dpc_flags);
913 fcport->n2n_chip_reset++;
914 } else {
915 ql_log(ql_log_info, vha, 0x705d,
916 "Remote port %8ph is not coming back\n",
917 fcport->port_name);
918 fcport->scan_state = 0;
919 }
920 }
921 qla2xxx_wake_dpc(vha);
922 } else {
923 /*
924 * report port suppose to do PLOGI. Give him
925 * more time. FW will catch it.
926 */
927 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
928 }
48acad09
QT
929 break;
930 default:
931 break;
726b8548 932 }
726b8548
QT
933 }
934} /* gnl_event */
935
936static void
25ff6af1 937qla24xx_async_gnl_sp_done(void *s, int res)
726b8548 938{
25ff6af1
JC
939 struct srb *sp = s;
940 struct scsi_qla_host *vha = sp->vha;
726b8548
QT
941 unsigned long flags;
942 struct fc_port *fcport = NULL, *tf;
943 u16 i, n = 0, loop_id;
944 struct event_arg ea;
945 struct get_name_list_extended *e;
946 u64 wwn;
947 struct list_head h;
a4239945 948 bool found = false;
726b8548 949
83548fe2 950 ql_dbg(ql_dbg_disc, vha, 0x20e7,
726b8548
QT
951 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
952 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
953 sp->u.iocb_cmd.u.mbx.in_mb[2]);
954
ef801f07
HM
955 if (res == QLA_FUNCTION_TIMEOUT)
956 return;
957
0aca7784 958 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
726b8548
QT
959 memset(&ea, 0, sizeof(ea));
960 ea.sp = sp;
961 ea.rc = res;
962 ea.event = FCME_GNL_DONE;
963
964 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
965 sizeof(struct get_name_list_extended)) {
966 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
967 sizeof(struct get_name_list_extended);
968 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
969 }
970
971 for (i = 0; i < n; i++) {
972 e = &vha->gnl.l[i];
973 loop_id = le16_to_cpu(e->nport_handle);
974 /* mask out reserve bit */
975 loop_id = (loop_id & 0x7fff);
976 set_bit(loop_id, vha->hw->loop_id_map);
977 wwn = wwn_to_u64(e->port_name);
978
83548fe2 979 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
726b8548
QT
980 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
981 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
982 e->port_id[0], e->current_login_state, e->last_login_state,
983 (loop_id & 0x7fff));
984 }
985
0aca7784 986 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
987
988 INIT_LIST_HEAD(&h);
989 fcport = tf = NULL;
990 if (!list_empty(&vha->gnl.fcports))
991 list_splice_init(&vha->gnl.fcports, &h);
0aca7784 992 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
993
994 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
995 list_del_init(&fcport->gnl_entry);
0aca7784 996 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6d674927 997 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0aca7784 998 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
999 ea.fcport = fcport;
1000
1001 qla2x00_fcport_event_handler(vha, &ea);
1002 }
1003
a4239945
QT
1004 /* create new fcport if fw has knowledge of new sessions */
1005 for (i = 0; i < n; i++) {
1006 port_id_t id;
1007 u64 wwnn;
1008
1009 e = &vha->gnl.l[i];
1010 wwn = wwn_to_u64(e->port_name);
1011
1012 found = false;
1013 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1014 if (!memcmp((u8 *)&wwn, fcport->port_name,
1015 WWN_SIZE)) {
1016 found = true;
1017 break;
1018 }
1019 }
1020
cf055fb0 1021 id.b.domain = e->port_id[2];
a4239945 1022 id.b.area = e->port_id[1];
cf055fb0 1023 id.b.al_pa = e->port_id[0];
a4239945
QT
1024 id.b.rsvd_1 = 0;
1025
1026 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1027 ql_dbg(ql_dbg_disc, vha, 0x2065,
cf055fb0
QT
1028 "%s %d %8phC %06x post new sess\n",
1029 __func__, __LINE__, (u8 *)&wwn, id.b24);
a4239945
QT
1030 wwnn = wwn_to_u64(e->node_name);
1031 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1032 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
1033 }
1034 }
1035
0aca7784
QT
1036 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1037 vha->gnl.sent = 0;
726b8548
QT
1038 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1039
25ff6af1 1040 sp->free(sp);
726b8548
QT
1041}
1042
1043int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1044{
1045 srb_t *sp;
1046 struct srb_iocb *mbx;
1047 int rval = QLA_FUNCTION_FAILED;
1048 unsigned long flags;
1049 u16 *mb;
1050
3dbec59b
QT
1051 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1052 return rval;
726b8548 1053
83548fe2 1054 ql_dbg(ql_dbg_disc, vha, 0x20d9,
726b8548
QT
1055 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1056
0aca7784
QT
1057 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1058 fcport->flags |= FCF_ASYNC_SENT;
726b8548
QT
1059 fcport->disc_state = DSC_GNL;
1060 fcport->last_rscn_gen = fcport->rscn_gen;
1061 fcport->last_login_gen = fcport->login_gen;
1062
1063 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
0aca7784
QT
1064 if (vha->gnl.sent) {
1065 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1066 return QLA_SUCCESS;
1067 }
1068 vha->gnl.sent = 1;
1069 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
1070
1071 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1072 if (!sp)
1073 goto done;
3dbec59b 1074
726b8548
QT
1075 sp->type = SRB_MB_IOCB;
1076 sp->name = "gnlist";
1077 sp->gen1 = fcport->rscn_gen;
1078 sp->gen2 = fcport->login_gen;
1079
e74e7d95
BH
1080 mbx = &sp->u.iocb_cmd;
1081 mbx->timeout = qla2x00_async_iocb_timeout;
726b8548
QT
1082 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
1083
1084 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1085 mb[0] = MBC_PORT_NODE_NAME_LIST;
1086 mb[1] = BIT_2 | BIT_3;
1087 mb[2] = MSW(vha->gnl.ldma);
1088 mb[3] = LSW(vha->gnl.ldma);
1089 mb[6] = MSW(MSD(vha->gnl.ldma));
1090 mb[7] = LSW(MSD(vha->gnl.ldma));
1091 mb[8] = vha->gnl.size;
1092 mb[9] = vha->vp_idx;
1093
726b8548
QT
1094 sp->done = qla24xx_async_gnl_sp_done;
1095
83548fe2
QT
1096 ql_dbg(ql_dbg_disc, vha, 0x20da,
1097 "Async-%s - OUT WWPN %8phC hndl %x\n",
1098 sp->name, fcport->port_name, sp->handle);
726b8548 1099
f233e8c0
BK
1100 rval = qla2x00_start_sp(sp);
1101 if (rval != QLA_SUCCESS)
1102 goto done_free_sp;
1103
726b8548
QT
1104 return rval;
1105
1106done_free_sp:
25ff6af1 1107 sp->free(sp);
726b8548 1108 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 1109done:
726b8548
QT
1110 return rval;
1111}
1112
1113int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1114{
1115 struct qla_work_evt *e;
1116
1117 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1118 if (!e)
1119 return QLA_FUNCTION_FAILED;
1120
1121 e->u.fcport.fcport = fcport;
6d674927 1122 fcport->flags |= FCF_ASYNC_ACTIVE;
726b8548
QT
1123 return qla2x00_post_work(vha, e);
1124}
1125
1126static
25ff6af1 1127void qla24xx_async_gpdb_sp_done(void *s, int res)
726b8548 1128{
25ff6af1
JC
1129 struct srb *sp = s;
1130 struct scsi_qla_host *vha = sp->vha;
726b8548 1131 struct qla_hw_data *ha = vha->hw;
726b8548
QT
1132 fc_port_t *fcport = sp->fcport;
1133 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
726b8548
QT
1134 struct event_arg ea;
1135
83548fe2 1136 ql_dbg(ql_dbg_disc, vha, 0x20db,
726b8548
QT
1137 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1138 sp->name, res, fcport->port_name, mb[1], mb[2]);
1139
bcc71cc3
GM
1140 if (res == QLA_FUNCTION_TIMEOUT) {
1141 dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1142 sp->u.iocb_cmd.u.mbx.in_dma);
1143 return;
1144 }
1145
ef801f07 1146 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
726b8548
QT
1147 memset(&ea, 0, sizeof(ea));
1148 ea.event = FCME_GPDB_DONE;
726b8548
QT
1149 ea.fcport = fcport;
1150 ea.sp = sp;
1151
1152 qla2x00_fcport_event_handler(vha, &ea);
1153
1154 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1155 sp->u.iocb_cmd.u.mbx.in_dma);
1156
25ff6af1 1157 sp->free(sp);
726b8548
QT
1158}
1159
a5d42f4c
DG
1160static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1161{
1162 struct qla_work_evt *e;
1163
1164 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1165 if (!e)
1166 return QLA_FUNCTION_FAILED;
1167
1168 e->u.fcport.fcport = fcport;
1169
1170 return qla2x00_post_work(vha, e);
1171}
1172
1173static void
1174qla2x00_async_prli_sp_done(void *ptr, int res)
1175{
1176 srb_t *sp = ptr;
1177 struct scsi_qla_host *vha = sp->vha;
1178 struct srb_iocb *lio = &sp->u.iocb_cmd;
1179 struct event_arg ea;
1180
1181 ql_dbg(ql_dbg_disc, vha, 0x2129,
1182 "%s %8phC res %d \n", __func__,
1183 sp->fcport->port_name, res);
1184
1185 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1186
1187 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1188 memset(&ea, 0, sizeof(ea));
1189 ea.event = FCME_PRLI_DONE;
1190 ea.fcport = sp->fcport;
1191 ea.data[0] = lio->u.logio.data[0];
1192 ea.data[1] = lio->u.logio.data[1];
1193 ea.iop[0] = lio->u.logio.iop[0];
1194 ea.iop[1] = lio->u.logio.iop[1];
1195 ea.sp = sp;
1196
1197 qla2x00_fcport_event_handler(vha, &ea);
1198 }
1199
1200 sp->free(sp);
1201}
1202
1203int
1204qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1205{
1206 srb_t *sp;
1207 struct srb_iocb *lio;
1208 int rval = QLA_FUNCTION_FAILED;
1209
1210 if (!vha->flags.online)
1211 return rval;
1212
1213 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
a5d42f4c
DG
1214 fcport->fw_login_state == DSC_LS_PRLI_PEND)
1215 return rval;
1216
1217 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1218 if (!sp)
1219 return rval;
1220
1221 fcport->flags |= FCF_ASYNC_SENT;
1222 fcport->logout_completed = 0;
1223
1224 sp->type = SRB_PRLI_CMD;
1225 sp->name = "prli";
a5d42f4c
DG
1226
1227 lio = &sp->u.iocb_cmd;
1228 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
1229 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1230
a5d42f4c
DG
1231 sp->done = qla2x00_async_prli_sp_done;
1232 lio->u.logio.flags = 0;
1233
1234 if (fcport->fc4f_nvme)
1235 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1236
f233e8c0
BK
1237 ql_dbg(ql_dbg_disc, vha, 0x211b,
1238 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
1239 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1240 fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
1241
a5d42f4c
DG
1242 rval = qla2x00_start_sp(sp);
1243 if (rval != QLA_SUCCESS) {
a5d42f4c
DG
1244 fcport->flags |= FCF_LOGIN_NEEDED;
1245 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1246 goto done_free_sp;
1247 }
1248
a5d42f4c
DG
1249 return rval;
1250
1251done_free_sp:
1252 sp->free(sp);
1253 fcport->flags &= ~FCF_ASYNC_SENT;
1254 return rval;
1255}
1256
a07fc0a4 1257int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
726b8548
QT
1258{
1259 struct qla_work_evt *e;
1260
1261 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1262 if (!e)
1263 return QLA_FUNCTION_FAILED;
1264
1265 e->u.fcport.fcport = fcport;
1266 e->u.fcport.opt = opt;
6d674927 1267 fcport->flags |= FCF_ASYNC_ACTIVE;
726b8548
QT
1268 return qla2x00_post_work(vha, e);
1269}
1270
1271int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1272{
1273 srb_t *sp;
1274 struct srb_iocb *mbx;
1275 int rval = QLA_FUNCTION_FAILED;
1276 u16 *mb;
1277 dma_addr_t pd_dma;
1278 struct port_database_24xx *pd;
1279 struct qla_hw_data *ha = vha->hw;
1280
8b5292bc
QT
1281 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
1282 fcport->loop_id == FC_NO_LOOP_ID) {
1283 ql_log(ql_log_warn, vha, 0xffff,
1284 "%s: %8phC - not sending command.\n",
1285 __func__, fcport->port_name);
3dbec59b 1286 return rval;
8b5292bc 1287 }
726b8548 1288
726b8548
QT
1289 fcport->disc_state = DSC_GPDB;
1290
1291 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1292 if (!sp)
1293 goto done;
1294
3dbec59b 1295 fcport->flags |= FCF_ASYNC_SENT;
e0824e69
JC
1296 sp->type = SRB_MB_IOCB;
1297 sp->name = "gpdb";
1298 sp->gen1 = fcport->rscn_gen;
1299 sp->gen2 = fcport->login_gen;
e74e7d95
BH
1300
1301 mbx = &sp->u.iocb_cmd;
1302 mbx->timeout = qla2x00_async_iocb_timeout;
e0824e69
JC
1303 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1304
08eb7f45 1305 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
726b8548 1306 if (pd == NULL) {
83548fe2
QT
1307 ql_log(ql_log_warn, vha, 0xd043,
1308 "Failed to allocate port database structure.\n");
726b8548
QT
1309 goto done_free_sp;
1310 }
726b8548 1311
726b8548
QT
1312 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1313 mb[0] = MBC_GET_PORT_DATABASE;
1314 mb[1] = fcport->loop_id;
1315 mb[2] = MSW(pd_dma);
1316 mb[3] = LSW(pd_dma);
1317 mb[6] = MSW(MSD(pd_dma));
1318 mb[7] = LSW(MSD(pd_dma));
1319 mb[9] = vha->vp_idx;
1320 mb[10] = opt;
1321
726b8548
QT
1322 mbx->u.mbx.in = (void *)pd;
1323 mbx->u.mbx.in_dma = pd_dma;
1324
1325 sp->done = qla24xx_async_gpdb_sp_done;
1326
83548fe2
QT
1327 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1328 "Async-%s %8phC hndl %x opt %x\n",
1329 sp->name, fcport->port_name, sp->handle, opt);
726b8548 1330
9fe278f4
GM
1331 rval = qla2x00_start_sp(sp);
1332 if (rval != QLA_SUCCESS)
1333 goto done_free_sp;
726b8548
QT
1334 return rval;
1335
1336done_free_sp:
1337 if (pd)
1338 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1339
25ff6af1 1340 sp->free(sp);
726b8548 1341 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 1342done:
726b8548 1343 qla24xx_post_gpdb_work(vha, fcport, opt);
5ff1d584
AV
1344 return rval;
1345}
1346
726b8548 1347static
a4239945 1348void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1349{
726b8548
QT
1350 unsigned long flags;
1351
726b8548 1352 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
f13515ac 1353 ea->fcport->login_gen++;
726b8548
QT
1354 ea->fcport->deleted = 0;
1355 ea->fcport->logout_on_delete = 1;
1356
1357 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1358 vha->fcport_count++;
1359 ea->fcport->login_succ = 1;
1360
0aca7784 1361 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
cd4ed6b4 1362 qla24xx_sched_upd_fcport(ea->fcport);
0aca7784 1363 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
414d9ff3
QT
1364 } else if (ea->fcport->login_succ) {
1365 /*
1366 * We have an existing session. A late RSCN delivery
1367 * must have triggered the session to be re-validate.
a4239945 1368 * Session is still valid.
414d9ff3 1369 */
5ef696aa
QT
1370 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1371 "%s %d %8phC session revalidate success\n",
a4239945 1372 __func__, __LINE__, ea->fcport->port_name);
8a7eac2f 1373 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
726b8548
QT
1374 }
1375 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
a4239945
QT
1376}
1377
1378static
1379void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1380{
a4239945
QT
1381 fc_port_t *fcport = ea->fcport;
1382 struct port_database_24xx *pd;
1383 struct srb *sp = ea->sp;
2b5b9647 1384 uint8_t ls;
a4239945
QT
1385
1386 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1387
1388 fcport->flags &= ~FCF_ASYNC_SENT;
1389
1390 ql_dbg(ql_dbg_disc, vha, 0x20d2,
8777e431
QT
1391 "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
1392 fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
1393 ea->rc);
a4239945
QT
1394
1395 if (fcport->disc_state == DSC_DELETE_PEND)
1396 return;
726b8548 1397
2b5b9647
DT
1398 if (fcport->fc4f_nvme)
1399 ls = pd->current_login_state >> 4;
1400 else
1401 ls = pd->current_login_state & 0xf;
1402
d4f7a16a
HM
1403 if (ea->sp->gen2 != fcport->login_gen) {
1404 /* target side must have changed it. */
1405
1406 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1407 "%s %8phC generation changed\n",
1408 __func__, fcport->port_name);
1409 return;
1410 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1411 qla_rscn_replay(fcport);
861d483d 1412 qlt_schedule_sess_for_deletion(fcport);
d4f7a16a
HM
1413 return;
1414 }
1415
2b5b9647 1416 switch (ls) {
a4239945
QT
1417 case PDS_PRLI_COMPLETE:
1418 __qla24xx_parse_gpdb(vha, fcport, pd);
1419 break;
1420 case PDS_PLOGI_PENDING:
1421 case PDS_PLOGI_COMPLETE:
1422 case PDS_PRLI_PENDING:
1423 case PDS_PRLI2_PENDING:
8fde6977
QT
1424 /* Set discovery state back to GNL to Relogin attempt */
1425 if (qla_dual_mode_enabled(vha) ||
1426 qla_ini_mode_enabled(vha)) {
1427 fcport->disc_state = DSC_GNL;
1428 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1429 }
a4239945
QT
1430 return;
1431 case PDS_LOGO_PENDING:
1432 case PDS_PORT_UNAVAILABLE:
1433 default:
1434 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1435 __func__, __LINE__, fcport->port_name);
d8630bb9 1436 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
1437 return;
1438 }
1439 __qla24xx_handle_gpdb_event(vha, ea);
1440} /* gpdb event */
9cd883f0
QT
1441
1442static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1443{
1444 u8 login = 0;
040036bb 1445 int rc;
9cd883f0
QT
1446
1447 if (qla_tgt_mode_enabled(vha))
1448 return;
1449
1450 if (qla_dual_mode_enabled(vha)) {
1451 if (N2N_TOPO(vha->hw)) {
1452 u64 mywwn, wwn;
1453
1454 mywwn = wwn_to_u64(vha->port_name);
1455 wwn = wwn_to_u64(fcport->port_name);
1456 if (mywwn > wwn)
1457 login = 1;
1458 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1459 && time_after_eq(jiffies,
1460 fcport->plogi_nack_done_deadline))
1461 login = 1;
1462 } else {
1463 login = 1;
1464 }
1465 } else {
1466 /* initiator mode */
1467 login = 1;
1468 }
1469
0754d5e0
QT
1470 if (login && fcport->login_retry) {
1471 fcport->login_retry--;
040036bb
QT
1472 if (fcport->loop_id == FC_NO_LOOP_ID) {
1473 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1474 rc = qla2x00_find_new_loop_id(vha, fcport);
1475 if (rc) {
1476 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1477 "%s %d %8phC post del sess - out of loopid\n",
1478 __func__, __LINE__, fcport->port_name);
1479 fcport->scan_state = 0;
94cff6e1 1480 qlt_schedule_sess_for_deletion(fcport);
040036bb
QT
1481 return;
1482 }
1483 }
9cd883f0
QT
1484 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1485 "%s %d %8phC post login\n",
1486 __func__, __LINE__, fcport->port_name);
9cd883f0
QT
1487 qla2x00_post_async_login_work(vha, fcport, NULL);
1488 }
1489}
1490
726b8548
QT
1491int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1492{
f13515ac 1493 u16 data[2];
a4239945 1494 u64 wwn;
cd4ed6b4 1495 u16 sec;
726b8548 1496
0754d5e0
QT
1497 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
1498 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
726b8548
QT
1499 __func__, fcport->port_name, fcport->disc_state,
1500 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1501 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
0754d5e0 1502 fcport->login_gen, fcport->loop_id, fcport->scan_state);
726b8548 1503
a4239945
QT
1504 if (fcport->scan_state != QLA_FCPORT_FOUND)
1505 return 0;
726b8548 1506
07ea4b60
HR
1507 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1508 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1509 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
726b8548
QT
1510 return 0;
1511
5b33469a 1512 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
9cd883f0
QT
1513 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1514 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5b33469a 1515 return 0;
9cd883f0 1516 }
5b33469a
QT
1517 }
1518
726b8548
QT
1519 /* for pure Target Mode. Login will not be initiated */
1520 if (vha->host->active_mode == MODE_TARGET)
1521 return 0;
1522
1523 if (fcport->flags & FCF_ASYNC_SENT) {
1524 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1525 return 0;
1526 }
1527
1528 switch (fcport->disc_state) {
1529 case DSC_DELETED:
a4239945 1530 wwn = wwn_to_u64(fcport->node_name);
8777e431
QT
1531 switch (vha->hw->current_topology) {
1532 case ISP_CFG_N:
1533 if (fcport_is_smaller(fcport)) {
1534 /* this adapter is bigger */
1535 if (fcport->login_retry) {
1536 if (fcport->loop_id == FC_NO_LOOP_ID) {
1537 qla2x00_find_new_loop_id(vha,
1538 fcport);
1539 fcport->fw_login_state =
1540 DSC_LS_PORT_UNAVAIL;
1541 }
1542 fcport->login_retry--;
1543 qla_post_els_plogi_work(vha, fcport);
1544 } else {
1545 ql_log(ql_log_info, vha, 0x705d,
1546 "Unable to reach remote port %8phC",
1547 fcport->port_name);
1548 }
1549 } else {
1550 qla24xx_post_gnl_work(vha, fcport);
1551 }
1552 break;
1553 default:
1554 if (wwn == 0) {
1555 ql_dbg(ql_dbg_disc, vha, 0xffff,
1556 "%s %d %8phC post GNNID\n",
1557 __func__, __LINE__, fcport->port_name);
1558 qla24xx_post_gnnid_work(vha, fcport);
1559 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1560 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1561 "%s %d %8phC post gnl\n",
1562 __func__, __LINE__, fcport->port_name);
1563 qla24xx_post_gnl_work(vha, fcport);
1564 } else {
1565 qla_chk_n2n_b4_login(vha, fcport);
1566 }
1567 break;
726b8548
QT
1568 }
1569 break;
1570
1571 case DSC_GNL:
8777e431
QT
1572 switch (vha->hw->current_topology) {
1573 case ISP_CFG_N:
1574 if ((fcport->current_login_state & 0xf) == 0x6) {
1575 ql_dbg(ql_dbg_disc, vha, 0x2118,
1576 "%s %d %8phC post GPDB work\n",
1577 __func__, __LINE__, fcport->port_name);
1578 fcport->chip_reset =
1579 vha->hw->base_qpair->chip_reset;
1580 qla24xx_post_gpdb_work(vha, fcport, 0);
1581 } else {
1582 ql_dbg(ql_dbg_disc, vha, 0x2118,
1583 "%s %d %8phC post NVMe PRLI\n",
1584 __func__, __LINE__, fcport->port_name);
1585 qla24xx_post_prli_work(vha, fcport);
1586 }
1587 break;
1588 default:
1589 if (fcport->login_pause) {
1590 fcport->last_rscn_gen = fcport->rscn_gen;
1591 fcport->last_login_gen = fcport->login_gen;
1592 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1593 break;
1594 }
1595 qla_chk_n2n_b4_login(vha, fcport);
726b8548
QT
1596 break;
1597 }
726b8548
QT
1598 break;
1599
1600 case DSC_LOGIN_FAILED:
9cd883f0
QT
1601 if (N2N_TOPO(vha->hw))
1602 qla_chk_n2n_b4_login(vha, fcport);
1603 else
d4f7a16a 1604 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
1605 break;
1606
1607 case DSC_LOGIN_COMPLETE:
1608 /* recheck login state */
f13515ac
QT
1609 data[0] = data[1] = 0;
1610 qla2x00_post_async_adisc_work(vha, fcport, data);
726b8548
QT
1611 break;
1612
1cbc0efc
DT
1613 case DSC_LOGIN_PEND:
1614 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1615 qla24xx_post_prli_work(vha, fcport);
1616 break;
1617
cd4ed6b4
QT
1618 case DSC_UPD_FCPORT:
1619 sec = jiffies_to_msecs(jiffies -
1620 fcport->jiffies_at_registration)/1000;
1621 if (fcport->sec_since_registration < sec && sec &&
1622 !(sec % 60)) {
1623 fcport->sec_since_registration = sec;
1624 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1625 "%s %8phC - Slow Rport registration(%d Sec)\n",
1626 __func__, fcport->port_name, sec);
1627 }
1628
1629 if (fcport->next_disc_state != DSC_DELETE_PEND)
1630 fcport->next_disc_state = DSC_ADISC;
1631 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1632 break;
1633
726b8548
QT
1634 default:
1635 break;
1636 }
1637
1638 return 0;
1639}
1640
726b8548 1641int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
a4239945 1642 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
726b8548
QT
1643{
1644 struct qla_work_evt *e;
bd432bb5 1645
726b8548
QT
1646 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1647 if (!e)
1648 return QLA_FUNCTION_FAILED;
1649
1650 e->u.new_sess.id = *id;
1651 e->u.new_sess.pla = pla;
a4239945 1652 e->u.new_sess.fc4_type = fc4_type;
726b8548 1653 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
a4239945
QT
1654 if (node_name)
1655 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
726b8548
QT
1656
1657 return qla2x00_post_work(vha, e);
1658}
1659
726b8548
QT
1660static
1661void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1662 struct event_arg *ea)
1663{
1664 fc_port_t *fcport = ea->fcport;
1665
83548fe2
QT
1666 ql_dbg(ql_dbg_disc, vha, 0x2102,
1667 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1668 __func__, fcport->port_name, fcport->disc_state,
1669 fcport->fw_login_state, fcport->login_pause,
1670 fcport->deleted, fcport->conflict,
1671 fcport->last_rscn_gen, fcport->rscn_gen,
1672 fcport->last_login_gen, fcport->login_gen,
1673 fcport->flags);
726b8548
QT
1674
1675 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
1676 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1677 return;
1678
5b33469a 1679 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
9cd883f0
QT
1680 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1681 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5b33469a 1682 return;
9cd883f0 1683 }
5b33469a
QT
1684 }
1685
726b8548 1686 if (fcport->last_rscn_gen != fcport->rscn_gen) {
9e744591 1687 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
726b8548 1688 __func__, __LINE__, fcport->port_name);
9e744591 1689 qla24xx_post_gnl_work(vha, fcport);
726b8548
QT
1690 return;
1691 }
1692
1693 qla24xx_fcport_handle_login(vha, fcport);
1694}
1695
8777e431 1696
8f9a2148
BVA
1697static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1698 struct event_arg *ea)
8777e431
QT
1699{
1700 ql_dbg(ql_dbg_disc, vha, 0x2118,
1701 "%s %d %8phC post PRLI\n",
1702 __func__, __LINE__, ea->fcport->port_name);
1703 qla24xx_post_prli_work(vha, ea->fcport);
1704}
1705
41dc529a 1706void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1707{
bee8b846 1708 fc_port_t *fcport;
726b8548
QT
1709
1710 switch (ea->event) {
1711 case FCME_RELOGIN:
1712 if (test_bit(UNLOADING, &vha->dpc_flags))
1713 return;
5ff1d584 1714
726b8548
QT
1715 qla24xx_handle_relogin_event(vha, ea);
1716 break;
1717 case FCME_RSCN:
1718 if (test_bit(UNLOADING, &vha->dpc_flags))
1719 return;
d4f7a16a
HM
1720 {
1721 unsigned long flags;
bd432bb5 1722
bee8b846
QT
1723 fcport = qla2x00_find_fcport_by_nportid
1724 (vha, &ea->id, 1);
cd4ed6b4 1725 if (fcport) {
cb873ba4 1726 fcport->scan_needed = 1;
cd4ed6b4
QT
1727 fcport->rscn_gen++;
1728 }
bee8b846 1729
f352eeb7
QT
1730 spin_lock_irqsave(&vha->work_lock, flags);
1731 if (vha->scan.scan_flags == 0) {
1732 ql_dbg(ql_dbg_disc, vha, 0xffff,
1733 "%s: schedule\n", __func__);
1734 vha->scan.scan_flags |= SF_QUEUED;
1735 schedule_delayed_work(&vha->scan.scan_work, 5);
41dc529a 1736 }
f352eeb7 1737 spin_unlock_irqrestore(&vha->work_lock, flags);
d4f7a16a 1738 }
726b8548 1739 break;
726b8548
QT
1740 case FCME_GNL_DONE:
1741 qla24xx_handle_gnl_done_event(vha, ea);
1742 break;
1743 case FCME_GPSC_DONE:
a4239945 1744 qla24xx_handle_gpsc_event(vha, ea);
726b8548
QT
1745 break;
1746 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1747 qla24xx_handle_plogi_done_event(vha, ea);
1748 break;
a5d42f4c
DG
1749 case FCME_PRLI_DONE:
1750 qla24xx_handle_prli_done_event(vha, ea);
1751 break;
726b8548
QT
1752 case FCME_GPDB_DONE:
1753 qla24xx_handle_gpdb_event(vha, ea);
1754 break;
1755 case FCME_GPNID_DONE:
1756 qla24xx_handle_gpnid_event(vha, ea);
1757 break;
d3bae931
DG
1758 case FCME_GFFID_DONE:
1759 qla24xx_handle_gffid_event(vha, ea);
1760 break;
f13515ac
QT
1761 case FCME_ADISC_DONE:
1762 qla24xx_handle_adisc_event(vha, ea);
1763 break;
a4239945
QT
1764 case FCME_GNNID_DONE:
1765 qla24xx_handle_gnnid_event(vha, ea);
1766 break;
1767 case FCME_GFPNID_DONE:
1768 qla24xx_handle_gfpnid_event(vha, ea);
1769 break;
8777e431
QT
1770 case FCME_ELS_PLOGI_DONE:
1771 qla_handle_els_plogi_done(vha, ea);
1772 break;
726b8548
QT
1773 default:
1774 BUG_ON(1);
1775 break;
1776 }
5ff1d584
AV
1777}
1778
d4f7a16a
HM
1779/*
1780 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1781 * to be consumed by the fcport
1782 */
1783void qla_rscn_replay(fc_port_t *fcport)
1784{
2703eaaf
BVA
1785 struct event_arg ea;
1786
1787 switch (fcport->disc_state) {
1788 case DSC_DELETE_PEND:
1789 return;
1790 default:
1791 break;
1792 }
1793
1794 if (fcport->scan_needed) {
1795 memset(&ea, 0, sizeof(ea));
1796 ea.event = FCME_RSCN;
1797 ea.id = fcport->d_id;
1798 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1799 qla2x00_fcport_event_handler(fcport->vha, &ea);
861d483d 1800 }
d4f7a16a
HM
1801}
1802
3822263e 1803static void
faef62d1 1804qla2x00_tmf_iocb_timeout(void *data)
3822263e 1805{
25ff6af1 1806 srb_t *sp = data;
faef62d1 1807 struct srb_iocb *tmf = &sp->u.iocb_cmd;
3822263e 1808
faef62d1
AB
1809 tmf->u.tmf.comp_status = CS_TIMEOUT;
1810 complete(&tmf->u.tmf.comp);
1811}
9ba56b95 1812
faef62d1 1813static void
25ff6af1 1814qla2x00_tmf_sp_done(void *ptr, int res)
faef62d1 1815{
25ff6af1 1816 srb_t *sp = ptr;
faef62d1 1817 struct srb_iocb *tmf = &sp->u.iocb_cmd;
25ff6af1 1818
faef62d1 1819 complete(&tmf->u.tmf.comp);
3822263e
MI
1820}
1821
1822int
faef62d1 1823qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
3822263e
MI
1824 uint32_t tag)
1825{
1826 struct scsi_qla_host *vha = fcport->vha;
faef62d1 1827 struct srb_iocb *tm_iocb;
3822263e 1828 srb_t *sp;
faef62d1 1829 int rval = QLA_FUNCTION_FAILED;
3822263e 1830
9ba56b95 1831 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
1832 if (!sp)
1833 goto done;
1834
faef62d1 1835 tm_iocb = &sp->u.iocb_cmd;
9ba56b95
GM
1836 sp->type = SRB_TM_CMD;
1837 sp->name = "tmf";
e74e7d95
BH
1838
1839 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1840 init_completion(&tm_iocb->u.tmf.comp);
faef62d1 1841 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
e74e7d95 1842
faef62d1
AB
1843 tm_iocb->u.tmf.flags = flags;
1844 tm_iocb->u.tmf.lun = lun;
1845 tm_iocb->u.tmf.data = tag;
1846 sp->done = qla2x00_tmf_sp_done;
3822263e 1847
7c3df132 1848 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
1849 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1850 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1851 fcport->d_id.b.area, fcport->d_id.b.al_pa);
faef62d1 1852
9fe278f4
GM
1853 rval = qla2x00_start_sp(sp);
1854 if (rval != QLA_SUCCESS)
1855 goto done_free_sp;
faef62d1
AB
1856 wait_for_completion(&tm_iocb->u.tmf.comp);
1857
b4146c49 1858 rval = tm_iocb->u.tmf.data;
faef62d1 1859
b4146c49
AG
1860 if (rval != QLA_SUCCESS) {
1861 ql_log(ql_log_warn, vha, 0x8030,
faef62d1
AB
1862 "TM IOCB failed (%x).\n", rval);
1863 }
1864
1865 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1866 flags = tm_iocb->u.tmf.flags;
1867 lun = (uint16_t)tm_iocb->u.tmf.lun;
1868
1869 /* Issue Marker IOCB */
9eb9c6dc 1870 qla2x00_marker(vha, vha->hw->base_qpair,
92fff53b 1871 fcport->loop_id, lun,
faef62d1
AB
1872 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1873 }
3822263e
MI
1874
1875done_free_sp:
25ff6af1 1876 sp->free(sp);
388a4995 1877 fcport->flags &= ~FCF_ASYNC_SENT;
3822263e
MI
1878done:
1879 return rval;
1880}
1881
4440e46d
AB
1882int
1883qla24xx_async_abort_command(srb_t *sp)
1884{
1885 unsigned long flags = 0;
1886
1887 uint32_t handle;
1888 fc_port_t *fcport = sp->fcport;
585def9b 1889 struct qla_qpair *qpair = sp->qpair;
4440e46d 1890 struct scsi_qla_host *vha = fcport->vha;
585def9b 1891 struct req_que *req = qpair->req;
b027a5ac 1892
585def9b 1893 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4440e46d
AB
1894 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1895 if (req->outstanding_cmds[handle] == sp)
1896 break;
1897 }
585def9b
QT
1898 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1899
4440e46d
AB
1900 if (handle == req->num_outstanding_cmds) {
1901 /* Command not found. */
1902 return QLA_FUNCTION_FAILED;
1903 }
1904 if (sp->type == SRB_FXIOCB_DCMD)
1905 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1906 FXDISC_ABORT_IOCTL);
1907
f6145e86 1908 return qla24xx_async_abort_cmd(sp, true);
4440e46d
AB
1909}
1910
a5d42f4c
DG
1911static void
1912qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1913{
1914 switch (ea->data[0]) {
1915 case MBS_COMMAND_COMPLETE:
1916 ql_dbg(ql_dbg_disc, vha, 0x2118,
1917 "%s %d %8phC post gpdb\n",
1918 __func__, __LINE__, ea->fcport->port_name);
1919
1920 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1921 ea->fcport->logout_on_delete = 1;
03aaa89f
DT
1922 ea->fcport->nvme_prli_service_param = ea->iop[0];
1923 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
1924 ea->fcport->nvme_first_burst_size =
1925 (ea->iop[1] & 0xffff) * 512;
1926 else
1927 ea->fcport->nvme_first_burst_size = 0;
a5d42f4c
DG
1928 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1929 break;
1930 default:
1cbc0efc
DT
1931 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1932 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
1933 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1934 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1935 break;
1936 }
1937
edd05de1
DG
1938 if (ea->fcport->n2n_flag) {
1939 ql_dbg(ql_dbg_disc, vha, 0x2118,
1940 "%s %d %8phC post fc4 prli\n",
1941 __func__, __LINE__, ea->fcport->port_name);
1942 ea->fcport->fc4f_nvme = 0;
1943 ea->fcport->n2n_flag = 0;
1944 qla24xx_post_prli_work(vha, ea->fcport);
1945 }
a5d42f4c
DG
1946 ql_dbg(ql_dbg_disc, vha, 0x2119,
1947 "%s %d %8phC unhandle event of %x\n",
1948 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1949 break;
1950 }
1951}
1952
726b8548
QT
1953static void
1954qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ac280b67 1955{
726b8548 1956 port_id_t cid; /* conflict Nport id */
a084fd68
QT
1957 u16 lid;
1958 struct fc_port *conflict_fcport;
82abdcaf 1959 unsigned long flags;
a4239945
QT
1960 struct fc_port *fcport = ea->fcport;
1961
f352eeb7
QT
1962 ql_dbg(ql_dbg_disc, vha, 0xffff,
1963 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1964 __func__, fcport->port_name, fcport->disc_state,
1965 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
861d483d 1966 ea->sp->gen1, fcport->rscn_gen,
f352eeb7
QT
1967 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1968
a4239945
QT
1969 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1970 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1971 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1972 "%s %d %8phC Remote is trying to login\n",
1973 __func__, __LINE__, fcport->port_name);
1974 return;
1975 }
1976
8b5292bc
QT
1977 if ((fcport->disc_state == DSC_DELETE_PEND) ||
1978 (fcport->disc_state == DSC_DELETED)) {
1979 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
a4239945 1980 return;
8b5292bc 1981 }
a4239945
QT
1982
1983 if (ea->sp->gen2 != fcport->login_gen) {
1984 /* target side must have changed it. */
1985 ql_dbg(ql_dbg_disc, vha, 0x20d3,
f352eeb7
QT
1986 "%s %8phC generation changed\n",
1987 __func__, fcport->port_name);
1988 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
a4239945
QT
1989 return;
1990 } else if (ea->sp->gen1 != fcport->rscn_gen) {
861d483d
QT
1991 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1992 "%s %8phC RSCN generation changed\n",
1993 __func__, fcport->port_name);
d4f7a16a 1994 qla_rscn_replay(fcport);
861d483d 1995 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
1996 return;
1997 }
ac280b67 1998
726b8548 1999 switch (ea->data[0]) {
ac280b67 2000 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
2001 /*
2002 * Driver must validate login state - If PRLI not complete,
2003 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2004 * requests.
2005 */
a5d42f4c
DG
2006 if (ea->fcport->fc4f_nvme) {
2007 ql_dbg(ql_dbg_disc, vha, 0x2117,
2008 "%s %d %8phC post prli\n",
2009 __func__, __LINE__, ea->fcport->port_name);
2010 qla24xx_post_prli_work(vha, ea->fcport);
2011 } else {
2012 ql_dbg(ql_dbg_disc, vha, 0x20ea,
a084fd68
QT
2013 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
2014 __func__, __LINE__, ea->fcport->port_name,
2015 ea->fcport->loop_id, ea->fcport->d_id.b24);
2016
2017 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
82abdcaf 2018 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
a5d42f4c
DG
2019 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2020 ea->fcport->logout_on_delete = 1;
3515832c 2021 ea->fcport->send_els_logo = 0;
82abdcaf
QT
2022 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
2023 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2024
a5d42f4c
DG
2025 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2026 }
ac280b67
AV
2027 break;
2028 case MBS_COMMAND_ERROR:
83548fe2 2029 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
726b8548
QT
2030 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2031
2032 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2033 ea->fcport->disc_state = DSC_LOGIN_FAILED;
2034 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
ac280b67
AV
2035 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2036 else
726b8548 2037 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
ac280b67
AV
2038 break;
2039 case MBS_LOOP_ID_USED:
726b8548
QT
2040 /* data[1] = IO PARAM 1 = nport ID */
2041 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2042 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2043 cid.b.al_pa = ea->iop[1] & 0xff;
2044 cid.b.rsvd_1 = 0;
2045
83548fe2 2046 ql_dbg(ql_dbg_disc, vha, 0x20ec,
5c640053 2047 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
83548fe2 2048 __func__, __LINE__, ea->fcport->port_name,
5c640053 2049 ea->fcport->loop_id, cid.b24);
726b8548 2050
5c640053
QT
2051 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2052 ea->fcport->loop_id = FC_NO_LOOP_ID;
726b8548
QT
2053 qla24xx_post_gnl_work(vha, ea->fcport);
2054 break;
2055 case MBS_PORT_ID_USED:
a084fd68
QT
2056 lid = ea->iop[1] & 0xffff;
2057 qlt_find_sess_invalidate_other(vha,
2058 wwn_to_u64(ea->fcport->port_name),
2059 ea->fcport->d_id, lid, &conflict_fcport);
2060
2061 if (conflict_fcport) {
2062 /*
2063 * Another fcport share the same loop_id/nport id.
2064 * Conflict fcport needs to finish cleanup before this
2065 * fcport can proceed to login.
2066 */
2067 conflict_fcport->conflict = ea->fcport;
2068 ea->fcport->login_pause = 1;
2069
2070 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2071 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2072 __func__, __LINE__, ea->fcport->port_name,
2073 ea->fcport->d_id.b24, lid);
a084fd68
QT
2074 } else {
2075 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2076 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2077 __func__, __LINE__, ea->fcport->port_name,
2078 ea->fcport->d_id.b24, lid);
2079
2080 qla2x00_clear_loop_id(ea->fcport);
2081 set_bit(lid, vha->hw->loop_id_map);
2082 ea->fcport->loop_id = lid;
2083 ea->fcport->keep_nport_handle = 0;
94cff6e1 2084 qlt_schedule_sess_for_deletion(ea->fcport);
a084fd68 2085 }
ac280b67
AV
2086 break;
2087 }
4916392b 2088 return;
ac280b67
AV
2089}
2090
4916392b 2091void
ac280b67
AV
2092qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
2093 uint16_t *data)
2094{
a6ca8878 2095 qlt_logo_completion_handler(fcport, data[0]);
726b8548 2096 fcport->login_gen++;
fa83e658 2097 fcport->flags &= ~FCF_ASYNC_ACTIVE;
4916392b 2098 return;
ac280b67
AV
2099}
2100
1da177e4
LT
2101/****************************************************************************/
2102/* QLogic ISP2x00 Hardware Support Functions. */
2103/****************************************************************************/
2104
fa492630 2105static int
7d613ac6
SV
2106qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2107{
2108 int rval = QLA_SUCCESS;
2109 struct qla_hw_data *ha = vha->hw;
2110 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 2111 uint16_t config[4];
7d613ac6
SV
2112
2113 qla83xx_idc_lock(vha, 0);
2114
2115 /* SV: TODO: Assign initialization timeout from
2116 * flash-info / other param
2117 */
2118 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2119 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2120
2121 /* Set our fcoe function presence */
2122 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2123 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2124 "Error while setting DRV-Presence.\n");
2125 rval = QLA_FUNCTION_FAILED;
2126 goto exit;
2127 }
2128
2129 /* Decide the reset ownership */
2130 qla83xx_reset_ownership(vha);
2131
2132 /*
2133 * On first protocol driver load:
2134 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2135 * register.
2136 * Others: Check compatibility with current IDC Major version.
2137 */
2138 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2139 if (ha->flags.nic_core_reset_owner) {
2140 /* Set IDC Major version */
2141 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2142 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2143
2144 /* Clearing IDC-Lock-Recovery register */
2145 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2146 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2147 /*
2148 * Clear further IDC participation if we are not compatible with
2149 * the current IDC Major Version.
2150 */
2151 ql_log(ql_log_warn, vha, 0xb07d,
2152 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2153 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2154 __qla83xx_clear_drv_presence(vha);
2155 rval = QLA_FUNCTION_FAILED;
2156 goto exit;
2157 }
2158 /* Each function sets its supported Minor version. */
2159 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2160 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2161 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2162
711aa7f7
SK
2163 if (ha->flags.nic_core_reset_owner) {
2164 memset(config, 0, sizeof(config));
2165 if (!qla81xx_get_port_config(vha, config))
2166 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2167 QLA8XXX_DEV_READY);
2168 }
2169
7d613ac6
SV
2170 rval = qla83xx_idc_state_handler(vha);
2171
2172exit:
2173 qla83xx_idc_unlock(vha, 0);
2174
2175 return rval;
2176}
2177
1da177e4
LT
2178/*
2179* qla2x00_initialize_adapter
2180* Initialize board.
2181*
2182* Input:
2183* ha = adapter block pointer.
2184*
2185* Returns:
2186* 0 = success
2187*/
2188int
e315cd28 2189qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
2190{
2191 int rval;
e315cd28 2192 struct qla_hw_data *ha = vha->hw;
73208dfd 2193 struct req_que *req = ha->req_q_map[0];
3f006ac3 2194 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2533cf67 2195
fc90adaf
JC
2196 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2197 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2198
1da177e4 2199 /* Clear adapter flags. */
e315cd28 2200 vha->flags.online = 0;
2533cf67 2201 ha->flags.chip_reset_done = 0;
e315cd28 2202 vha->flags.reset_active = 0;
85880801
AV
2203 ha->flags.pci_channel_io_perm_failure = 0;
2204 ha->flags.eeh_busy = 0;
fabbb8df 2205 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
e315cd28
AC
2206 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2207 atomic_set(&vha->loop_state, LOOP_DOWN);
2208 vha->device_flags = DFLG_NO_CABLE;
2209 vha->dpc_flags = 0;
2210 vha->flags.management_server_logged_in = 0;
2211 vha->marker_needed = 0;
1da177e4
LT
2212 ha->isp_abort_cnt = 0;
2213 ha->beacon_blink_led = 0;
2214
73208dfd
AC
2215 set_bit(0, ha->req_qid_map);
2216 set_bit(0, ha->rsp_qid_map);
2217
cfb0919c 2218 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 2219 "Configuring PCI space...\n");
e315cd28 2220 rval = ha->isp_ops->pci_config(vha);
1da177e4 2221 if (rval) {
7c3df132
SK
2222 ql_log(ql_log_warn, vha, 0x0044,
2223 "Unable to configure PCI space.\n");
1da177e4
LT
2224 return (rval);
2225 }
2226
e315cd28 2227 ha->isp_ops->reset_chip(vha);
1da177e4 2228
3f006ac3
MH
2229 /* Check for secure flash support */
2230 if (IS_QLA28XX(ha)) {
2231 if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
2232 ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
2233 ha->flags.secure_adapter = 1;
2234 }
2235 }
2236
2237
e315cd28 2238 rval = qla2xxx_get_flash_info(vha);
c00d8994 2239 if (rval) {
7c3df132
SK
2240 ql_log(ql_log_fatal, vha, 0x004f,
2241 "Unable to validate FLASH data.\n");
7ec0effd
AD
2242 return rval;
2243 }
2244
2245 if (IS_QLA8044(ha)) {
2246 qla8044_read_reset_template(vha);
2247
2248 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2249 * If DONRESET_BIT0 is set, drivers should not set dev_state
2250 * to NEED_RESET. But if NEED_RESET is set, drivers should
2251 * should honor the reset. */
2252 if (ql2xdontresethba == 1)
2253 qla8044_set_idc_dontreset(vha);
c00d8994
AV
2254 }
2255
73208dfd 2256 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 2257 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 2258 "Configure NVRAM parameters...\n");
0107109e 2259
e315cd28 2260 ha->isp_ops->nvram_config(vha);
1da177e4 2261
d4c760c2
AV
2262 if (ha->flags.disable_serdes) {
2263 /* Mask HBA via NVRAM settings? */
7c3df132 2264 ql_log(ql_log_info, vha, 0x0077,
7b833558 2265 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
d4c760c2
AV
2266 return QLA_FUNCTION_FAILED;
2267 }
2268
cfb0919c 2269 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 2270 "Verifying loaded RISC code...\n");
1da177e4 2271
e315cd28
AC
2272 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2273 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
2274 if (rval)
2275 return (rval);
e315cd28 2276 rval = qla2x00_setup_chip(vha);
d19044c3
AV
2277 if (rval)
2278 return (rval);
1da177e4 2279 }
a9083016 2280
4d4df193 2281 if (IS_QLA84XX(ha)) {
e315cd28 2282 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 2283 if (!ha->cs84xx) {
7c3df132 2284 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
2285 "Unable to configure ISP84XX.\n");
2286 return QLA_FUNCTION_FAILED;
2287 }
2288 }
2d70c103 2289
ead03855 2290 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2d70c103
NB
2291 rval = qla2x00_init_rings(vha);
2292
26a77799
AV
2293 /* No point in continuing if firmware initialization failed. */
2294 if (rval != QLA_SUCCESS)
2295 return rval;
2296
2533cf67 2297 ha->flags.chip_reset_done = 1;
1da177e4 2298
9a069e19 2299 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 2300 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
2301 rval = qla84xx_init_chip(vha);
2302 if (rval != QLA_SUCCESS) {
7c3df132
SK
2303 ql_log(ql_log_warn, vha, 0x00d4,
2304 "Unable to initialize ISP84XX.\n");
8d2b21db 2305 qla84xx_put_chip(vha);
9a069e19
GM
2306 }
2307 }
2308
7d613ac6
SV
2309 /* Load the NIC Core f/w if we are the first protocol driver. */
2310 if (IS_QLA8031(ha)) {
2311 rval = qla83xx_nic_core_fw_load(vha);
2312 if (rval)
2313 ql_log(ql_log_warn, vha, 0x0124,
2314 "Error in initializing NIC Core f/w.\n");
2315 }
2316
2f0f3f4f
MI
2317 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2318 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 2319
c46e65c7
JC
2320 if (IS_P3P_TYPE(ha))
2321 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2322 else
2323 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2324
1da177e4
LT
2325 return (rval);
2326}
2327
2328/**
abbd8870 2329 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2db6228d 2330 * @vha: HA context
1da177e4
LT
2331 *
2332 * Returns 0 on success.
2333 */
abbd8870 2334int
e315cd28 2335qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 2336{
a157b101 2337 uint16_t w;
abbd8870 2338 unsigned long flags;
e315cd28 2339 struct qla_hw_data *ha = vha->hw;
3d71644c 2340 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2341
1da177e4 2342 pci_set_master(ha->pdev);
af6177d8 2343 pci_try_set_mwi(ha->pdev);
1da177e4 2344
1da177e4 2345 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2346 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
2347 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2348
737faece 2349 pci_disable_rom(ha->pdev);
1da177e4
LT
2350
2351 /* Get PCI bus information. */
2352 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 2353 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
2354 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2355
abbd8870
AV
2356 return QLA_SUCCESS;
2357}
1da177e4 2358
abbd8870
AV
2359/**
2360 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2db6228d 2361 * @vha: HA context
abbd8870
AV
2362 *
2363 * Returns 0 on success.
2364 */
2365int
e315cd28 2366qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 2367{
a157b101 2368 uint16_t w;
abbd8870
AV
2369 unsigned long flags = 0;
2370 uint32_t cnt;
e315cd28 2371 struct qla_hw_data *ha = vha->hw;
3d71644c 2372 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2373
abbd8870 2374 pci_set_master(ha->pdev);
af6177d8 2375 pci_try_set_mwi(ha->pdev);
1da177e4 2376
abbd8870 2377 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2378 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 2379
abbd8870
AV
2380 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2381 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 2382 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 2383
abbd8870
AV
2384 /*
2385 * If this is a 2300 card and not 2312, reset the
2386 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2387 * the 2310 also reports itself as a 2300 so we need to get the
2388 * fb revision level -- a 6 indicates it really is a 2300 and
2389 * not a 2310.
2390 */
2391 if (IS_QLA2300(ha)) {
2392 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 2393
abbd8870 2394 /* Pause RISC. */
3d71644c 2395 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 2396 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 2397 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 2398 break;
1da177e4 2399
abbd8870
AV
2400 udelay(10);
2401 }
1da177e4 2402
abbd8870 2403 /* Select FPM registers. */
3d71644c
AV
2404 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2405 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2406
2407 /* Get the fb rev level */
3d71644c 2408 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
2409
2410 if (ha->fb_rev == FPM_2300)
a157b101 2411 pci_clear_mwi(ha->pdev);
abbd8870
AV
2412
2413 /* Deselect FPM registers. */
3d71644c
AV
2414 WRT_REG_WORD(&reg->ctrl_status, 0x0);
2415 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2416
2417 /* Release RISC module. */
3d71644c 2418 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 2419 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 2420 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
2421 break;
2422
2423 udelay(10);
1da177e4 2424 }
1da177e4 2425
abbd8870
AV
2426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2427 }
1da177e4 2428
abbd8870
AV
2429 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2430
737faece 2431 pci_disable_rom(ha->pdev);
1da177e4 2432
abbd8870
AV
2433 /* Get PCI bus information. */
2434 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 2435 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2436 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2437
2438 return QLA_SUCCESS;
1da177e4
LT
2439}
2440
0107109e
AV
2441/**
2442 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2db6228d 2443 * @vha: HA context
0107109e
AV
2444 *
2445 * Returns 0 on success.
2446 */
2447int
e315cd28 2448qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 2449{
a157b101 2450 uint16_t w;
0107109e 2451 unsigned long flags = 0;
e315cd28 2452 struct qla_hw_data *ha = vha->hw;
0107109e 2453 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
2454
2455 pci_set_master(ha->pdev);
af6177d8 2456 pci_try_set_mwi(ha->pdev);
0107109e
AV
2457
2458 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2459 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
2460 w &= ~PCI_COMMAND_INTX_DISABLE;
2461 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2462
2463 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2464
2465 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
2466 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2467 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
2468
2469 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 2470 if (pci_is_pcie(ha->pdev))
5ffd3a52 2471 pcie_set_readrq(ha->pdev, 4096);
0107109e 2472
737faece 2473 pci_disable_rom(ha->pdev);
0107109e 2474
44c10138 2475 ha->chip_revision = ha->pdev->revision;
a8488abe 2476
0107109e
AV
2477 /* Get PCI bus information. */
2478 spin_lock_irqsave(&ha->hardware_lock, flags);
2479 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
2480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2481
2482 return QLA_SUCCESS;
2483}
2484
c3a2f0df
AV
2485/**
2486 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2db6228d 2487 * @vha: HA context
c3a2f0df
AV
2488 *
2489 * Returns 0 on success.
2490 */
2491int
e315cd28 2492qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
2493{
2494 uint16_t w;
e315cd28 2495 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
2496
2497 pci_set_master(ha->pdev);
2498 pci_try_set_mwi(ha->pdev);
2499
2500 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2501 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2502 w &= ~PCI_COMMAND_INTX_DISABLE;
2503 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2504
2505 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 2506 if (pci_is_pcie(ha->pdev))
5ffd3a52 2507 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 2508
737faece 2509 pci_disable_rom(ha->pdev);
c3a2f0df
AV
2510
2511 ha->chip_revision = ha->pdev->revision;
2512
2513 return QLA_SUCCESS;
2514}
2515
1da177e4
LT
2516/**
2517 * qla2x00_isp_firmware() - Choose firmware image.
2db6228d 2518 * @vha: HA context
1da177e4
LT
2519 *
2520 * Returns 0 on success.
2521 */
2522static int
e315cd28 2523qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
2524{
2525 int rval;
42e421b1
AV
2526 uint16_t loop_id, topo, sw_cap;
2527 uint8_t domain, area, al_pa;
e315cd28 2528 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2529
2530 /* Assume loading risc code */
fa2a1ce5 2531 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
2532
2533 if (ha->flags.disable_risc_code_load) {
7c3df132 2534 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
2535
2536 /* Verify checksum of loaded RISC code. */
e315cd28 2537 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
2538 if (rval == QLA_SUCCESS) {
2539 /* And, verify we are not in ROM code. */
e315cd28 2540 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
2541 &area, &domain, &topo, &sw_cap);
2542 }
1da177e4
LT
2543 }
2544
7c3df132
SK
2545 if (rval)
2546 ql_dbg(ql_dbg_init, vha, 0x007a,
2547 "**** Load RISC code ****.\n");
1da177e4
LT
2548
2549 return (rval);
2550}
2551
2552/**
2553 * qla2x00_reset_chip() - Reset ISP chip.
2db6228d 2554 * @vha: HA context
1da177e4
LT
2555 *
2556 * Returns 0 on success.
2557 */
3f006ac3 2558int
e315cd28 2559qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
2560{
2561 unsigned long flags = 0;
e315cd28 2562 struct qla_hw_data *ha = vha->hw;
3d71644c 2563 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2564 uint32_t cnt;
1da177e4 2565 uint16_t cmd;
3f006ac3 2566 int rval = QLA_FUNCTION_FAILED;
1da177e4 2567
85880801 2568 if (unlikely(pci_channel_offline(ha->pdev)))
3f006ac3 2569 return rval;
85880801 2570
fd34f556 2571 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
2572
2573 spin_lock_irqsave(&ha->hardware_lock, flags);
2574
2575 /* Turn off master enable */
2576 cmd = 0;
2577 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2578 cmd &= ~PCI_COMMAND_MASTER;
2579 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2580
2581 if (!IS_QLA2100(ha)) {
2582 /* Pause RISC. */
2583 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2584 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2585 for (cnt = 0; cnt < 30000; cnt++) {
2586 if ((RD_REG_WORD(&reg->hccr) &
2587 HCCR_RISC_PAUSE) != 0)
2588 break;
2589 udelay(100);
2590 }
2591 } else {
2592 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2593 udelay(10);
2594 }
2595
2596 /* Select FPM registers. */
2597 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2598 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2599
2600 /* FPM Soft Reset. */
2601 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2602 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2603
2604 /* Toggle Fpm Reset. */
2605 if (!IS_QLA2200(ha)) {
2606 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2607 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2608 }
2609
2610 /* Select frame buffer registers. */
2611 WRT_REG_WORD(&reg->ctrl_status, 0x10);
2612 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2613
2614 /* Reset frame buffer FIFOs. */
2615 if (IS_QLA2200(ha)) {
2616 WRT_FB_CMD_REG(ha, reg, 0xa000);
2617 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2618 } else {
2619 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2620
2621 /* Read back fb_cmd until zero or 3 seconds max */
2622 for (cnt = 0; cnt < 3000; cnt++) {
2623 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2624 break;
2625 udelay(100);
2626 }
2627 }
2628
2629 /* Select RISC module registers. */
2630 WRT_REG_WORD(&reg->ctrl_status, 0);
2631 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2632
2633 /* Reset RISC processor. */
2634 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2635 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2636
2637 /* Release RISC processor. */
2638 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2639 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2640 }
2641
2642 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2643 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2644
2645 /* Reset ISP chip. */
2646 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2647
2648 /* Wait for RISC to recover from reset. */
2649 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2650 /*
2651 * It is necessary to for a delay here since the card doesn't
2652 * respond to PCI reads during a reset. On some architectures
2653 * this will result in an MCA.
2654 */
2655 udelay(20);
2656 for (cnt = 30000; cnt; cnt--) {
2657 if ((RD_REG_WORD(&reg->ctrl_status) &
2658 CSR_ISP_SOFT_RESET) == 0)
2659 break;
2660 udelay(100);
2661 }
2662 } else
2663 udelay(10);
2664
2665 /* Reset RISC processor. */
2666 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2667
2668 WRT_REG_WORD(&reg->semaphore, 0);
2669
2670 /* Release RISC processor. */
2671 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2672 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2673
2674 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2675 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 2676 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 2677 break;
1da177e4
LT
2678
2679 udelay(100);
2680 }
2681 } else
2682 udelay(100);
2683
2684 /* Turn on master enable */
2685 cmd |= PCI_COMMAND_MASTER;
2686 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2687
2688 /* Disable RISC pause on FPM parity error. */
2689 if (!IS_QLA2100(ha)) {
2690 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2691 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2692 }
2693
2694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3f006ac3
MH
2695
2696 return QLA_SUCCESS;
1da177e4
LT
2697}
2698
b1d46989
MI
2699/**
2700 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2db6228d 2701 * @vha: HA context
b1d46989
MI
2702 *
2703 * Returns 0 on success.
2704 */
fa492630 2705static int
b1d46989
MI
2706qla81xx_reset_mpi(scsi_qla_host_t *vha)
2707{
2708 uint16_t mb[4] = {0x1010, 0, 1, 0};
2709
6246b8a1
GM
2710 if (!IS_QLA81XX(vha->hw))
2711 return QLA_SUCCESS;
2712
b1d46989
MI
2713 return qla81xx_write_mpi_register(vha, mb);
2714}
2715
0107109e 2716/**
88c26663 2717 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2db6228d 2718 * @vha: HA context
0107109e
AV
2719 *
2720 * Returns 0 on success.
2721 */
d14e72fb 2722static inline int
e315cd28 2723qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
2724{
2725 unsigned long flags = 0;
e315cd28 2726 struct qla_hw_data *ha = vha->hw;
0107109e 2727 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
52c82823 2728 uint32_t cnt;
335a1cc9 2729 uint16_t wd;
b1d46989 2730 static int abts_cnt; /* ISP abort retry counts */
d14e72fb 2731 int rval = QLA_SUCCESS;
0107109e 2732
0107109e
AV
2733 spin_lock_irqsave(&ha->hardware_lock, flags);
2734
2735 /* Reset RISC. */
2736 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2737 for (cnt = 0; cnt < 30000; cnt++) {
2738 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2739 break;
2740
2741 udelay(10);
2742 }
2743
d14e72fb
HM
2744 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2745 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2746
2747 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2748 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2749 RD_REG_DWORD(&reg->hccr),
2750 RD_REG_DWORD(&reg->ctrl_status),
2751 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2752
0107109e
AV
2753 WRT_REG_DWORD(&reg->ctrl_status,
2754 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 2755 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 2756
335a1cc9 2757 udelay(100);
d14e72fb 2758
88c26663 2759 /* Wait for firmware to complete NVRAM accesses. */
52c82823 2760 RD_REG_WORD(&reg->mailbox0);
d14e72fb
HM
2761 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2762 rval == QLA_SUCCESS; cnt--) {
88c26663 2763 barrier();
d14e72fb
HM
2764 if (cnt)
2765 udelay(5);
2766 else
2767 rval = QLA_FUNCTION_TIMEOUT;
88c26663
AV
2768 }
2769
d14e72fb
HM
2770 if (rval == QLA_SUCCESS)
2771 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2772
2773 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2774 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2775 RD_REG_DWORD(&reg->hccr),
2776 RD_REG_DWORD(&reg->mailbox0));
2777
335a1cc9 2778 /* Wait for soft-reset to complete. */
52c82823 2779 RD_REG_DWORD(&reg->ctrl_status);
200ffb15 2780 for (cnt = 0; cnt < 60; cnt++) {
0107109e 2781 barrier();
d14e72fb
HM
2782 if ((RD_REG_DWORD(&reg->ctrl_status) &
2783 CSRX_ISP_SOFT_RESET) == 0)
2784 break;
2785
2786 udelay(5);
0107109e 2787 }
d14e72fb
HM
2788 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2789 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2790
2791 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2792 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2793 RD_REG_DWORD(&reg->hccr),
2794 RD_REG_DWORD(&reg->ctrl_status));
0107109e 2795
b1d46989
MI
2796 /* If required, do an MPI FW reset now */
2797 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2798 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2799 if (++abts_cnt < 5) {
2800 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2801 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2802 } else {
2803 /*
2804 * We exhausted the ISP abort retries. We have to
2805 * set the board offline.
2806 */
2807 abts_cnt = 0;
2808 vha->flags.online = 0;
2809 }
2810 }
2811 }
2812
0107109e
AV
2813 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2814 RD_REG_DWORD(&reg->hccr);
2815
2816 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2817 RD_REG_DWORD(&reg->hccr);
2818
2819 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2820 RD_REG_DWORD(&reg->hccr);
2821
52c82823 2822 RD_REG_WORD(&reg->mailbox0);
200ffb15 2823 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
d14e72fb 2824 rval == QLA_SUCCESS; cnt--) {
0107109e 2825 barrier();
d14e72fb
HM
2826 if (cnt)
2827 udelay(5);
2828 else
2829 rval = QLA_FUNCTION_TIMEOUT;
0107109e 2830 }
d14e72fb
HM
2831 if (rval == QLA_SUCCESS)
2832 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2833
2834 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2835 "Host Risc 0x%x, mailbox0 0x%x\n",
2836 RD_REG_DWORD(&reg->hccr),
2837 RD_REG_WORD(&reg->mailbox0));
0107109e
AV
2838
2839 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6 2840
d14e72fb
HM
2841 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2842 "Driver in %s mode\n",
2843 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2844
124f85e6
AV
2845 if (IS_NOPOLLING_TYPE(ha))
2846 ha->isp_ops->enable_intrs(ha);
d14e72fb
HM
2847
2848 return rval;
0107109e
AV
2849}
2850
4ea2c9c7
JC
2851static void
2852qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2853{
2854 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2855
2856 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2857 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2858
2859}
2860
2861static void
2862qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2863{
2864 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2865
2866 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2867 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2868}
2869
2870static void
2871qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2872{
4ea2c9c7
JC
2873 uint32_t wd32 = 0;
2874 uint delta_msec = 100;
2875 uint elapsed_msec = 0;
2876 uint timeout_msec;
2877 ulong n;
2878
cc790764
JC
2879 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2880 vha->hw->pdev->subsystem_device != 0x0240)
4ea2c9c7
JC
2881 return;
2882
8dd7e3a5
JC
2883 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2884 udelay(100);
2885
4ea2c9c7
JC
2886attempt:
2887 timeout_msec = TIMEOUT_SEMAPHORE;
2888 n = timeout_msec / delta_msec;
2889 while (n--) {
2890 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2891 qla25xx_read_risc_sema_reg(vha, &wd32);
2892 if (wd32 & RISC_SEMAPHORE)
2893 break;
2894 msleep(delta_msec);
2895 elapsed_msec += delta_msec;
2896 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2897 goto force;
2898 }
2899
2900 if (!(wd32 & RISC_SEMAPHORE))
2901 goto force;
2902
2903 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2904 goto acquired;
2905
2906 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2907 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2908 n = timeout_msec / delta_msec;
2909 while (n--) {
2910 qla25xx_read_risc_sema_reg(vha, &wd32);
2911 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2912 break;
2913 msleep(delta_msec);
2914 elapsed_msec += delta_msec;
2915 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2916 goto force;
2917 }
2918
2919 if (wd32 & RISC_SEMAPHORE_FORCE)
2920 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2921
2922 goto attempt;
2923
2924force:
2925 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2926
2927acquired:
2928 return;
2929}
2930
88c26663
AV
2931/**
2932 * qla24xx_reset_chip() - Reset ISP24xx chip.
2db6228d 2933 * @vha: HA context
88c26663
AV
2934 *
2935 * Returns 0 on success.
2936 */
3f006ac3 2937int
e315cd28 2938qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 2939{
e315cd28 2940 struct qla_hw_data *ha = vha->hw;
3f006ac3 2941 int rval = QLA_FUNCTION_FAILED;
85880801
AV
2942
2943 if (pci_channel_offline(ha->pdev) &&
2944 ha->flags.pci_channel_io_perm_failure) {
3f006ac3 2945 return rval;
85880801
AV
2946 }
2947
fd34f556 2948 ha->isp_ops->disable_intrs(ha);
88c26663 2949
4ea2c9c7
JC
2950 qla25xx_manipulate_risc_semaphore(vha);
2951
88c26663 2952 /* Perform RISC reset. */
3f006ac3
MH
2953 rval = qla24xx_reset_risc(vha);
2954
2955 return rval;
88c26663
AV
2956}
2957
1da177e4
LT
2958/**
2959 * qla2x00_chip_diag() - Test chip for proper operation.
2db6228d 2960 * @vha: HA context
1da177e4
LT
2961 *
2962 * Returns 0 on success.
2963 */
abbd8870 2964int
e315cd28 2965qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
2966{
2967 int rval;
e315cd28 2968 struct qla_hw_data *ha = vha->hw;
3d71644c 2969 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
2970 unsigned long flags = 0;
2971 uint16_t data;
2972 uint32_t cnt;
2973 uint16_t mb[5];
73208dfd 2974 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
2975
2976 /* Assume a failed state */
2977 rval = QLA_FUNCTION_FAILED;
2978
da4704d9
BVA
2979 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
2980 &reg->flash_address);
1da177e4
LT
2981
2982 spin_lock_irqsave(&ha->hardware_lock, flags);
2983
2984 /* Reset ISP chip. */
2985 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2986
2987 /*
2988 * We need to have a delay here since the card will not respond while
2989 * in reset causing an MCA on some architectures.
2990 */
2991 udelay(20);
2992 data = qla2x00_debounce_register(&reg->ctrl_status);
2993 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2994 udelay(5);
2995 data = RD_REG_WORD(&reg->ctrl_status);
2996 barrier();
2997 }
2998
2999 if (!cnt)
3000 goto chip_diag_failed;
3001
7c3df132
SK
3002 ql_dbg(ql_dbg_init, vha, 0x007c,
3003 "Reset register cleared by chip reset.\n");
1da177e4
LT
3004
3005 /* Reset RISC processor. */
3006 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
3007 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
3008
3009 /* Workaround for QLA2312 PCI parity error */
3010 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3011 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3012 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3013 udelay(5);
3014 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 3015 barrier();
1da177e4
LT
3016 }
3017 } else
3018 udelay(10);
3019
3020 if (!cnt)
3021 goto chip_diag_failed;
3022
3023 /* Check product ID of chip */
5a68a1c2 3024 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
1da177e4
LT
3025
3026 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3027 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3028 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3029 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3030 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3031 mb[3] != PROD_ID_3) {
7c3df132
SK
3032 ql_log(ql_log_warn, vha, 0x0062,
3033 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3034 mb[1], mb[2], mb[3]);
1da177e4
LT
3035
3036 goto chip_diag_failed;
3037 }
3038 ha->product_id[0] = mb[1];
3039 ha->product_id[1] = mb[2];
3040 ha->product_id[2] = mb[3];
3041 ha->product_id[3] = mb[4];
3042
3043 /* Adjust fw RISC transfer size */
73208dfd 3044 if (req->length > 1024)
1da177e4
LT
3045 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3046 else
3047 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 3048 req->length;
1da177e4
LT
3049
3050 if (IS_QLA2200(ha) &&
3051 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3052 /* Limit firmware transfer size with a 2200A */
7c3df132 3053 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 3054
ea5b6382 3055 ha->device_type |= DT_ISP2200A;
1da177e4
LT
3056 ha->fw_transfer_size = 128;
3057 }
3058
3059 /* Wrap Incoming Mailboxes Test. */
3060 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3061
7c3df132 3062 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 3063 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
3064 if (rval)
3065 ql_log(ql_log_warn, vha, 0x0080,
3066 "Failed mailbox send register test.\n");
3067 else
1da177e4
LT
3068 /* Flag a successful rval */
3069 rval = QLA_SUCCESS;
1da177e4
LT
3070 spin_lock_irqsave(&ha->hardware_lock, flags);
3071
3072chip_diag_failed:
3073 if (rval)
7c3df132
SK
3074 ql_log(ql_log_info, vha, 0x0081,
3075 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
3076
3077 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3078
3079 return (rval);
3080}
3081
0107109e
AV
3082/**
3083 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2db6228d 3084 * @vha: HA context
0107109e
AV
3085 *
3086 * Returns 0 on success.
3087 */
3088int
e315cd28 3089qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
3090{
3091 int rval;
e315cd28 3092 struct qla_hw_data *ha = vha->hw;
73208dfd 3093 struct req_que *req = ha->req_q_map[0];
0107109e 3094
7ec0effd 3095 if (IS_P3P_TYPE(ha))
a9083016
GM
3096 return QLA_SUCCESS;
3097
73208dfd 3098 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 3099
e315cd28 3100 rval = qla2x00_mbx_reg_test(vha);
0107109e 3101 if (rval) {
7c3df132
SK
3102 ql_log(ql_log_warn, vha, 0x0082,
3103 "Failed mailbox send register test.\n");
0107109e
AV
3104 } else {
3105 /* Flag a successful rval */
3106 rval = QLA_SUCCESS;
3107 }
3108
3109 return rval;
3110}
3111
ad0a0b01
QT
3112static void
3113qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
0107109e 3114{
a7a167bf 3115 int rval;
df613b96
AV
3116 dma_addr_t tc_dma;
3117 void *tc;
e315cd28 3118 struct qla_hw_data *ha = vha->hw;
a7a167bf 3119
ad0a0b01 3120 if (ha->eft) {
7c3df132 3121 ql_dbg(ql_dbg_init, vha, 0x00bd,
ad0a0b01
QT
3122 "%s: Offload Mem is already allocated.\n",
3123 __func__);
a7a167bf
AV
3124 return;
3125 }
d4e3e04d 3126
ad0a0b01 3127 if (IS_FWI2_CAPABLE(ha)) {
df613b96 3128 /* Allocate memory for Fibre Channel Event Buffer. */
f73cb695 3129 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
ecc89f25 3130 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
436a7b11 3131 goto try_eft;
df613b96 3132
f73cb695
CD
3133 if (ha->fce)
3134 dma_free_coherent(&ha->pdev->dev,
3135 FCE_SIZE, ha->fce, ha->fce_dma);
3136
3137 /* Allocate memory for Fibre Channel Event Buffer. */
750afb08
LC
3138 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3139 GFP_KERNEL);
df613b96 3140 if (!tc) {
7c3df132
SK
3141 ql_log(ql_log_warn, vha, 0x00be,
3142 "Unable to allocate (%d KB) for FCE.\n",
3143 FCE_SIZE / 1024);
17d98630 3144 goto try_eft;
df613b96
AV
3145 }
3146
e315cd28 3147 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
3148 ha->fce_mb, &ha->fce_bufs);
3149 if (rval) {
7c3df132
SK
3150 ql_log(ql_log_warn, vha, 0x00bf,
3151 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
3152 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
3153 tc_dma);
3154 ha->flags.fce_enabled = 0;
17d98630 3155 goto try_eft;
df613b96 3156 }
cfb0919c 3157 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 3158 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 3159
df613b96
AV
3160 ha->flags.fce_enabled = 1;
3161 ha->fce_dma = tc_dma;
3162 ha->fce = tc;
f73cb695 3163
436a7b11 3164try_eft:
f73cb695
CD
3165 if (ha->eft)
3166 dma_free_coherent(&ha->pdev->dev,
3167 EFT_SIZE, ha->eft, ha->eft_dma);
3168
436a7b11 3169 /* Allocate memory for Extended Trace Buffer. */
750afb08
LC
3170 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3171 GFP_KERNEL);
436a7b11 3172 if (!tc) {
7c3df132
SK
3173 ql_log(ql_log_warn, vha, 0x00c1,
3174 "Unable to allocate (%d KB) for EFT.\n",
3175 EFT_SIZE / 1024);
ad0a0b01 3176 goto eft_err;
436a7b11
AV
3177 }
3178
e315cd28 3179 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 3180 if (rval) {
7c3df132
SK
3181 ql_log(ql_log_warn, vha, 0x00c2,
3182 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
3183 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
3184 tc_dma);
ad0a0b01 3185 goto eft_err;
436a7b11 3186 }
cfb0919c 3187 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 3188 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11 3189
436a7b11
AV
3190 ha->eft_dma = tc_dma;
3191 ha->eft = tc;
d4e3e04d 3192 }
f73cb695 3193
ad0a0b01
QT
3194eft_err:
3195 return;
3196}
3197
3198void
3199qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3200{
a28d9e4e 3201 int rval;
ad0a0b01
QT
3202 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3203 eft_size, fce_size, mq_size;
3204 struct qla_hw_data *ha = vha->hw;
3205 struct req_que *req = ha->req_q_map[0];
3206 struct rsp_que *rsp = ha->rsp_q_map[0];
3207 struct qla2xxx_fw_dump *fw_dump;
a28d9e4e
JC
3208 dma_addr_t tc_dma;
3209 void *tc;
ad0a0b01
QT
3210
3211 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3212 req_q_size = rsp_q_size = 0;
3213
3214 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3215 fixed_size = sizeof(struct qla2100_fw_dump);
3216 } else if (IS_QLA23XX(ha)) {
3217 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3218 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3219 sizeof(uint16_t);
3220 } else if (IS_FWI2_CAPABLE(ha)) {
ecc89f25 3221 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
ad0a0b01
QT
3222 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3223 else if (IS_QLA81XX(ha))
3224 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3225 else if (IS_QLA25XX(ha))
3226 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3227 else
3228 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3229
3230 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3231 sizeof(uint32_t);
3232 if (ha->mqenable) {
ecc89f25
JC
3233 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
3234 !IS_QLA28XX(ha))
ad0a0b01
QT
3235 mq_size = sizeof(struct qla2xxx_mq_chain);
3236 /*
a4226ec3 3237 * Allocate maximum buffer size for all queues - Q0.
ad0a0b01
QT
3238 * Resizing must be done at end-of-dump processing.
3239 */
a4226ec3 3240 mq_size += (ha->max_req_queues - 1) *
ad0a0b01 3241 (req->length * sizeof(request_t));
a4226ec3 3242 mq_size += (ha->max_rsp_queues - 1) *
ad0a0b01
QT
3243 (rsp->length * sizeof(response_t));
3244 }
3245 if (ha->tgt.atio_ring)
3246 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3247 /* Allocate memory for Fibre Channel Event Buffer. */
3248 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
ecc89f25 3249 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
ad0a0b01
QT
3250 goto try_eft;
3251
3252 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3253try_eft:
a28d9e4e
JC
3254 if (ha->eft)
3255 dma_free_coherent(&ha->pdev->dev,
3256 EFT_SIZE, ha->eft, ha->eft_dma);
3257
3258 /* Allocate memory for Extended Trace Buffer. */
3259 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3260 GFP_KERNEL);
3261 if (!tc) {
3262 ql_log(ql_log_warn, vha, 0x00c1,
3263 "Unable to allocate (%d KB) for EFT.\n",
3264 EFT_SIZE / 1024);
3265 goto allocate;
3266 }
3267
3268 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3269 if (rval) {
3270 ql_log(ql_log_warn, vha, 0x00c2,
3271 "Unable to initialize EFT (%d).\n", rval);
3272 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
3273 tc_dma);
3274 }
ad0a0b01
QT
3275 ql_dbg(ql_dbg_init, vha, 0x00c3,
3276 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3277 eft_size = EFT_SIZE;
3278 }
3279
ecc89f25 3280 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
a28d9e4e
JC
3281 struct fwdt *fwdt = ha->fwdt;
3282 uint j;
3283
3284 for (j = 0; j < 2; j++, fwdt++) {
3285 if (!fwdt->template) {
3286 ql_log(ql_log_warn, vha, 0x00ba,
3287 "-> fwdt%u no template\n", j);
3288 continue;
3289 }
3290 ql_dbg(ql_dbg_init, vha, 0x00fa,
3291 "-> fwdt%u calculating fwdump size...\n", j);
3292 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3293 vha, fwdt->template);
3294 ql_dbg(ql_dbg_init, vha, 0x00fa,
3295 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3296 j, fwdt->dump_size);
3297 dump_size += fwdt->dump_size;
f73cb695 3298 }
f73cb695
CD
3299 goto allocate;
3300 }
3301
73208dfd
AC
3302 req_q_size = req->length * sizeof(request_t);
3303 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf 3304 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 3305 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
3306 ha->chain_offset = dump_size;
3307 dump_size += mq_size + fce_size;
d4e3e04d 3308
b945e777
QT
3309 if (ha->exchoffld_buf)
3310 dump_size += sizeof(struct qla2xxx_offld_chain) +
3311 ha->exchoffld_size;
3312 if (ha->exlogin_buf)
3313 dump_size += sizeof(struct qla2xxx_offld_chain) +
3314 ha->exlogin_size;
3315
f73cb695 3316allocate:
a4226ec3
QT
3317 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3318
3319 ql_dbg(ql_dbg_init, vha, 0x00c5,
3320 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3321 __func__, dump_size, ha->fw_dump_len,
3322 ha->fw_dump_alloc_len);
3323
ad0a0b01
QT
3324 fw_dump = vmalloc(dump_size);
3325 if (!fw_dump) {
3326 ql_log(ql_log_warn, vha, 0x00c4,
3327 "Unable to allocate (%d KB) for firmware dump.\n",
3328 dump_size / 1024);
3329 } else {
a6b95d1c 3330 mutex_lock(&ha->optrom_mutex);
a4226ec3
QT
3331 if (ha->fw_dumped) {
3332 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
ad0a0b01 3333 vfree(ha->fw_dump);
a4226ec3
QT
3334 ha->fw_dump = fw_dump;
3335 ha->fw_dump_alloc_len = dump_size;
3336 ql_dbg(ql_dbg_init, vha, 0x00c5,
3337 "Re-Allocated (%d KB) and save firmware dump.\n",
3338 dump_size / 1024);
3339 } else {
3340 if (ha->fw_dump)
3341 vfree(ha->fw_dump);
3342 ha->fw_dump = fw_dump;
3343
3344 ha->fw_dump_len = ha->fw_dump_alloc_len =
3345 dump_size;
3346 ql_dbg(ql_dbg_init, vha, 0x00c5,
3347 "Allocated (%d KB) for firmware dump.\n",
3348 dump_size / 1024);
3349
a6b95d1c
QT
3350 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3351 mutex_unlock(&ha->optrom_mutex);
a4226ec3 3352 return;
a6b95d1c 3353 }
a4226ec3
QT
3354
3355 ha->fw_dump->signature[0] = 'Q';
3356 ha->fw_dump->signature[1] = 'L';
3357 ha->fw_dump->signature[2] = 'G';
3358 ha->fw_dump->signature[3] = 'C';
3359 ha->fw_dump->version = htonl(1);
3360
3361 ha->fw_dump->fixed_size = htonl(fixed_size);
3362 ha->fw_dump->mem_size = htonl(mem_size);
3363 ha->fw_dump->req_q_size = htonl(req_q_size);
3364 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3365
3366 ha->fw_dump->eft_size = htonl(eft_size);
3367 ha->fw_dump->eft_addr_l =
3368 htonl(LSD(ha->eft_dma));
3369 ha->fw_dump->eft_addr_h =
3370 htonl(MSD(ha->eft_dma));
3371
3372 ha->fw_dump->header_size =
3373 htonl(offsetof
3374 (struct qla2xxx_fw_dump, isp));
3375 }
a6b95d1c 3376 mutex_unlock(&ha->optrom_mutex);
a7a167bf 3377 }
a7a167bf 3378 }
0107109e
AV
3379}
3380
18e7555a
AV
3381static int
3382qla81xx_mpi_sync(scsi_qla_host_t *vha)
3383{
3384#define MPS_MASK 0xe0
3385 int rval;
3386 uint16_t dc;
3387 uint32_t dw;
18e7555a
AV
3388
3389 if (!IS_QLA81XX(vha->hw))
3390 return QLA_SUCCESS;
3391
3392 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3393 if (rval != QLA_SUCCESS) {
7c3df132
SK
3394 ql_log(ql_log_warn, vha, 0x0105,
3395 "Unable to acquire semaphore.\n");
18e7555a
AV
3396 goto done;
3397 }
3398
3399 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3400 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3401 if (rval != QLA_SUCCESS) {
7c3df132 3402 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
3403 goto done_release;
3404 }
3405
3406 dc &= MPS_MASK;
3407 if (dc == (dw & MPS_MASK))
3408 goto done_release;
3409
3410 dw &= ~MPS_MASK;
3411 dw |= dc;
3412 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3413 if (rval != QLA_SUCCESS) {
7c3df132 3414 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
3415 }
3416
3417done_release:
3418 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3419 if (rval != QLA_SUCCESS) {
7c3df132
SK
3420 ql_log(ql_log_warn, vha, 0x006d,
3421 "Unable to release semaphore.\n");
18e7555a
AV
3422 }
3423
3424done:
3425 return rval;
3426}
3427
8d93f550
CD
3428int
3429qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3430{
3431 /* Don't try to reallocate the array */
3432 if (req->outstanding_cmds)
3433 return QLA_SUCCESS;
3434
d7459527 3435 if (!IS_FWI2_CAPABLE(ha))
8d93f550
CD
3436 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3437 else {
03e8c680
QT
3438 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3439 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
8d93f550 3440 else
03e8c680 3441 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
8d93f550
CD
3442 }
3443
6396bb22
KC
3444 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3445 sizeof(srb_t *),
3446 GFP_KERNEL);
8d93f550
CD
3447
3448 if (!req->outstanding_cmds) {
3449 /*
3450 * Try to allocate a minimal size just so we can get through
3451 * initialization.
3452 */
3453 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
6396bb22
KC
3454 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3455 sizeof(srb_t *),
3456 GFP_KERNEL);
8d93f550
CD
3457
3458 if (!req->outstanding_cmds) {
3459 ql_log(ql_log_fatal, NULL, 0x0126,
3460 "Failed to allocate memory for "
3461 "outstanding_cmds for req_que %p.\n", req);
3462 req->num_outstanding_cmds = 0;
3463 return QLA_FUNCTION_FAILED;
3464 }
3465 }
3466
3467 return QLA_SUCCESS;
3468}
3469
e4e3a2ce
QT
3470#define PRINT_FIELD(_field, _flag, _str) { \
3471 if (a0->_field & _flag) {\
3472 if (p) {\
3473 strcat(ptr, "|");\
3474 ptr++;\
3475 leftover--;\
3476 } \
3477 len = snprintf(ptr, leftover, "%s", _str); \
3478 p = 1;\
3479 leftover -= len;\
3480 ptr += len; \
3481 } \
3482}
3483
3484static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3485{
3486#define STR_LEN 64
3487 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3488 u8 str[STR_LEN], *ptr, p;
3489 int leftover, len;
3490
3491 memset(str, 0, STR_LEN);
3492 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3493 ql_dbg(ql_dbg_init, vha, 0x015a,
3494 "SFP MFG Name: %s\n", str);
3495
3496 memset(str, 0, STR_LEN);
3497 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3498 ql_dbg(ql_dbg_init, vha, 0x015c,
3499 "SFP Part Name: %s\n", str);
3500
3501 /* media */
3502 memset(str, 0, STR_LEN);
3503 ptr = str;
3504 leftover = STR_LEN;
3505 p = len = 0;
3506 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3507 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3508 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3509 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3510 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3511 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3512 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3513 ql_dbg(ql_dbg_init, vha, 0x0160,
3514 "SFP Media: %s\n", str);
3515
3516 /* link length */
3517 memset(str, 0, STR_LEN);
3518 ptr = str;
3519 leftover = STR_LEN;
3520 p = len = 0;
3521 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3522 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3523 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3524 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3525 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3526 ql_dbg(ql_dbg_init, vha, 0x0196,
3527 "SFP Link Length: %s\n", str);
3528
3529 memset(str, 0, STR_LEN);
3530 ptr = str;
3531 leftover = STR_LEN;
3532 p = len = 0;
3533 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3534 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3535 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3536 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3537 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3538 ql_dbg(ql_dbg_init, vha, 0x016e,
3539 "SFP FC Link Tech: %s\n", str);
3540
3541 if (a0->length_km)
3542 ql_dbg(ql_dbg_init, vha, 0x016f,
3543 "SFP Distant: %d km\n", a0->length_km);
3544 if (a0->length_100m)
3545 ql_dbg(ql_dbg_init, vha, 0x0170,
3546 "SFP Distant: %d m\n", a0->length_100m*100);
3547 if (a0->length_50um_10m)
3548 ql_dbg(ql_dbg_init, vha, 0x0189,
3549 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3550 if (a0->length_62um_10m)
3551 ql_dbg(ql_dbg_init, vha, 0x018a,
3552 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3553 if (a0->length_om4_10m)
3554 ql_dbg(ql_dbg_init, vha, 0x0194,
3555 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3556 if (a0->length_om3_10m)
3557 ql_dbg(ql_dbg_init, vha, 0x0195,
3558 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3559}
3560
3561
3562/*
3563 * Return Code:
3564 * QLA_SUCCESS: no action
3565 * QLA_INTERFACE_ERROR: SFP is not there.
3566 * QLA_FUNCTION_FAILED: detected New SFP
3567 */
3568int
3569qla24xx_detect_sfp(scsi_qla_host_t *vha)
3570{
3571 int rc = QLA_SUCCESS;
3572 struct sff_8247_a0 *a;
3573 struct qla_hw_data *ha = vha->hw;
3574
3575 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3576 goto out;
3577
3578 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3579 if (rc)
3580 goto out;
3581
3582 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3583 qla2xxx_print_sfp_info(vha);
3584
3585 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3586 /* long range */
3587 ha->flags.detected_lr_sfp = 1;
3588
3589 if (a->length_km > 5 || a->length_100m > 50)
3590 ha->long_range_distance = LR_DISTANCE_10K;
3591 else
3592 ha->long_range_distance = LR_DISTANCE_5K;
3593
3594 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3595 ql_dbg(ql_dbg_async, vha, 0x507b,
3596 "Detected Long Range SFP.\n");
3597 } else {
3598 /* short range */
3599 ha->flags.detected_lr_sfp = 0;
3600 if (ha->flags.using_lr_setting)
3601 ql_dbg(ql_dbg_async, vha, 0x5084,
3602 "Detected Short Range SFP.\n");
3603 }
3604
3605 if (!vha->flags.init_done)
3606 rc = QLA_SUCCESS;
3607out:
3608 return rc;
3609}
3610
1da177e4
LT
3611/**
3612 * qla2x00_setup_chip() - Load and start RISC firmware.
2db6228d 3613 * @vha: HA context
1da177e4
LT
3614 *
3615 * Returns 0 on success.
3616 */
3617static int
e315cd28 3618qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 3619{
0107109e
AV
3620 int rval;
3621 uint32_t srisc_address = 0;
e315cd28 3622 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
3623 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3624 unsigned long flags;
dda772e8 3625 uint16_t fw_major_version;
3db0652e 3626
7ec0effd 3627 if (IS_P3P_TYPE(ha)) {
a9083016 3628 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
3629 if (rval == QLA_SUCCESS) {
3630 qla2x00_stop_firmware(vha);
a9083016 3631 goto enable_82xx_npiv;
14e303d9 3632 } else
b963752f 3633 goto failed;
a9083016
GM
3634 }
3635
3db0652e
AV
3636 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3637 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3638 spin_lock_irqsave(&ha->hardware_lock, flags);
3639 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
3640 RD_REG_WORD(&reg->hccr);
3641 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3642 }
1da177e4 3643
18e7555a
AV
3644 qla81xx_mpi_sync(vha);
3645
1da177e4 3646 /* Load firmware sequences */
e315cd28 3647 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 3648 if (rval == QLA_SUCCESS) {
7c3df132
SK
3649 ql_dbg(ql_dbg_init, vha, 0x00c9,
3650 "Verifying Checksum of loaded RISC code.\n");
1da177e4 3651
e315cd28 3652 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
3653 if (rval == QLA_SUCCESS) {
3654 /* Start firmware execution. */
7c3df132
SK
3655 ql_dbg(ql_dbg_init, vha, 0x00ca,
3656 "Starting firmware.\n");
1da177e4 3657
b0d6cabd
HM
3658 if (ql2xexlogins)
3659 ha->flags.exlogins_enabled = 1;
3660
99e1b683 3661 if (qla_is_exch_offld_enabled(vha))
2f56a7f1
HM
3662 ha->flags.exchoffld_enabled = 1;
3663
e315cd28 3664 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 3665 /* Retrieve firmware information. */
dda772e8 3666 if (rval == QLA_SUCCESS) {
e4e3a2ce
QT
3667 qla24xx_detect_sfp(vha);
3668
ecc89f25
JC
3669 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3670 IS_QLA28XX(ha)) &&
8b4673ba
QT
3671 (ha->zio_mode == QLA_ZIO_MODE_6))
3672 qla27xx_set_zio_threshold(vha,
3673 ha->last_zio_threshold);
3674
b0d6cabd
HM
3675 rval = qla2x00_set_exlogins_buffer(vha);
3676 if (rval != QLA_SUCCESS)
3677 goto failed;
3678
2f56a7f1
HM
3679 rval = qla2x00_set_exchoffld_buffer(vha);
3680 if (rval != QLA_SUCCESS)
3681 goto failed;
3682
a9083016 3683enable_82xx_npiv:
dda772e8 3684 fw_major_version = ha->fw_major_version;
7ec0effd 3685 if (IS_P3P_TYPE(ha))
3173167f 3686 qla82xx_check_md_needed(vha);
6246b8a1
GM
3687 else
3688 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
3689 if (rval != QLA_SUCCESS)
3690 goto failed;
2c3dfe3f 3691 ha->flags.npiv_supported = 0;
e315cd28 3692 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 3693 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 3694 ha->flags.npiv_supported = 1;
4d0ea247
SJ
3695 if ((!ha->max_npiv_vports) ||
3696 ((ha->max_npiv_vports + 1) %
eb66dc60 3697 MIN_MULTI_ID_FABRIC))
4d0ea247 3698 ha->max_npiv_vports =
eb66dc60 3699 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 3700 }
03e8c680 3701 qla2x00_get_resource_cnts(vha);
d743de66 3702
8d93f550
CD
3703 /*
3704 * Allocate the array of outstanding commands
3705 * now that we know the firmware resources.
3706 */
3707 rval = qla2x00_alloc_outstanding_cmds(ha,
3708 vha->req);
3709 if (rval != QLA_SUCCESS)
3710 goto failed;
3711
ad0a0b01
QT
3712 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3713 qla2x00_alloc_offload_mem(vha);
3714
3715 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
08de2844 3716 qla2x00_alloc_fw_dump(vha);
ad0a0b01 3717
3b6e5b9d
CD
3718 } else {
3719 goto failed;
1da177e4
LT
3720 }
3721 } else {
7c3df132
SK
3722 ql_log(ql_log_fatal, vha, 0x00cd,
3723 "ISP Firmware failed checksum.\n");
3724 goto failed;
1da177e4 3725 }
c74d88a4
AV
3726 } else
3727 goto failed;
1da177e4 3728
3db0652e
AV
3729 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3730 /* Enable proper parity. */
3731 spin_lock_irqsave(&ha->hardware_lock, flags);
3732 if (IS_QLA2300(ha))
3733 /* SRAM parity */
3734 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
3735 else
3736 /* SRAM, Instruction RAM and GP RAM parity */
3737 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
3738 RD_REG_WORD(&reg->hccr);
3739 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3740 }
3741
ecc89f25 3742 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
f3982d89
CD
3743 ha->flags.fac_supported = 1;
3744 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1d2874de
JC
3745 uint32_t size;
3746
3747 rval = qla81xx_fac_get_sector_size(vha, &size);
3748 if (rval == QLA_SUCCESS) {
3749 ha->flags.fac_supported = 1;
3750 ha->fdt_block_size = size << 2;
3751 } else {
7c3df132 3752 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
3753 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3754 ha->fw_major_version, ha->fw_minor_version,
3755 ha->fw_subminor_version);
1ca60e3b 3756
ecc89f25
JC
3757 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3758 IS_QLA28XX(ha)) {
6246b8a1
GM
3759 ha->flags.fac_supported = 0;
3760 rval = QLA_SUCCESS;
3761 }
1d2874de
JC
3762 }
3763 }
ca9e9c3e 3764failed:
1da177e4 3765 if (rval) {
7c3df132
SK
3766 ql_log(ql_log_fatal, vha, 0x00cf,
3767 "Setup chip ****FAILED****.\n");
1da177e4
LT
3768 }
3769
3770 return (rval);
3771}
3772
3773/**
3774 * qla2x00_init_response_q_entries() - Initializes response queue entries.
2db6228d 3775 * @rsp: response queue
1da177e4
LT
3776 *
3777 * Beginning of request ring has initialization control block already built
3778 * by nvram config routine.
3779 *
3780 * Returns 0 on success.
3781 */
73208dfd
AC
3782void
3783qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
3784{
3785 uint16_t cnt;
3786 response_t *pkt;
3787
2afa19a9
AC
3788 rsp->ring_ptr = rsp->ring;
3789 rsp->ring_index = 0;
3790 rsp->status_srb = NULL;
e315cd28
AC
3791 pkt = rsp->ring_ptr;
3792 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
3793 pkt->signature = RESPONSE_PROCESSED;
3794 pkt++;
3795 }
1da177e4
LT
3796}
3797
3798/**
3799 * qla2x00_update_fw_options() - Read and process firmware options.
2db6228d 3800 * @vha: HA context
1da177e4
LT
3801 *
3802 * Returns 0 on success.
3803 */
abbd8870 3804void
e315cd28 3805qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
3806{
3807 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 3808 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3809
3810 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 3811 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
3812
3813 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3814 return;
3815
3816 /* Serial Link options. */
7c3df132
SK
3817 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3818 "Serial link options.\n");
3819 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
f8f97b0c 3820 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
1da177e4
LT
3821
3822 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3823 if (ha->fw_seriallink_options[3] & BIT_2) {
3824 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3825
3826 /* 1G settings */
3827 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3828 emphasis = (ha->fw_seriallink_options[2] &
3829 (BIT_4 | BIT_3)) >> 3;
3830 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 3831 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3832 rx_sens = (ha->fw_seriallink_options[0] &
3833 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3834 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3835 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3836 if (rx_sens == 0x0)
3837 rx_sens = 0x3;
3838 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3839 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3840 ha->fw_options[10] |= BIT_5 |
3841 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3842 (tx_sens & (BIT_1 | BIT_0));
3843
3844 /* 2G settings */
3845 swing = (ha->fw_seriallink_options[2] &
3846 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3847 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3848 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 3849 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3850 rx_sens = (ha->fw_seriallink_options[1] &
3851 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3852 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3853 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3854 if (rx_sens == 0x0)
3855 rx_sens = 0x3;
3856 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3857 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3858 ha->fw_options[11] |= BIT_5 |
3859 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3860 (tx_sens & (BIT_1 | BIT_0));
3861 }
3862
3863 /* FCP2 options. */
3864 /* Return command IOCBs without waiting for an ABTS to complete. */
3865 ha->fw_options[3] |= BIT_13;
3866
3867 /* LED scheme. */
3868 if (ha->flags.enable_led_scheme)
3869 ha->fw_options[2] |= BIT_12;
3870
48c02fde 3871 /* Detect ISP6312. */
3872 if (IS_QLA6312(ha))
3873 ha->fw_options[2] |= BIT_13;
3874
088d09d4
GM
3875 /* Set Retry FLOGI in case of P2P connection */
3876 if (ha->operating_mode == P2P) {
3877 ha->fw_options[2] |= BIT_3;
3878 ql_dbg(ql_dbg_disc, vha, 0x2100,
3879 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3880 __func__, ha->fw_options[2]);
3881 }
3882
1da177e4 3883 /* Update firmware options. */
e315cd28 3884 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
3885}
3886
0107109e 3887void
e315cd28 3888qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
3889{
3890 int rval;
e315cd28 3891 struct qla_hw_data *ha = vha->hw;
0107109e 3892
7ec0effd 3893 if (IS_P3P_TYPE(ha))
a9083016
GM
3894 return;
3895
f198cafa
HM
3896 /* Hold status IOCBs until ABTS response received. */
3897 if (ql2xfwholdabts)
3898 ha->fw_options[3] |= BIT_12;
3899
088d09d4
GM
3900 /* Set Retry FLOGI in case of P2P connection */
3901 if (ha->operating_mode == P2P) {
3902 ha->fw_options[2] |= BIT_3;
3903 ql_dbg(ql_dbg_disc, vha, 0x2101,
3904 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3905 __func__, ha->fw_options[2]);
3906 }
3907
41dc529a 3908 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3c4810ff 3909 if (ql2xmvasynctoatio &&
ecc89f25 3910 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
41dc529a
QT
3911 if (qla_tgt_mode_enabled(vha) ||
3912 qla_dual_mode_enabled(vha))
3913 ha->fw_options[2] |= BIT_11;
3914 else
3915 ha->fw_options[2] &= ~BIT_11;
3916 }
3917
ecc89f25
JC
3918 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3919 IS_QLA28XX(ha)) {
f7e761f5
QT
3920 /*
3921 * Tell FW to track each exchange to prevent
3922 * driver from using stale exchange.
3923 */
3924 if (qla_tgt_mode_enabled(vha) ||
3925 qla_dual_mode_enabled(vha))
3926 ha->fw_options[2] |= BIT_4;
3927 else
3928 ha->fw_options[2] &= ~BIT_4;
9ecf0b0d
QT
3929
3930 /* Reserve 1/2 of emergency exchanges for ELS.*/
3931 if (qla2xuseresexchforels)
3932 ha->fw_options[2] |= BIT_8;
3933 else
3934 ha->fw_options[2] &= ~BIT_8;
f7e761f5
QT
3935 }
3936
83548fe2
QT
3937 ql_dbg(ql_dbg_init, vha, 0x00e8,
3938 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3939 __func__, ha->fw_options[1], ha->fw_options[2],
3940 ha->fw_options[3], vha->host->active_mode);
3c4810ff
QT
3941
3942 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3943 qla2x00_set_fw_options(vha, ha->fw_options);
41dc529a 3944
0107109e 3945 /* Update Serial Link options. */
f94097ed 3946 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
3947 return;
3948
e315cd28 3949 rval = qla2x00_set_serdes_params(vha,
f94097ed 3950 le16_to_cpu(ha->fw_seriallink_options24[1]),
3951 le16_to_cpu(ha->fw_seriallink_options24[2]),
3952 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 3953 if (rval != QLA_SUCCESS) {
7c3df132 3954 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
3955 "Unable to update Serial Link options (%x).\n", rval);
3956 }
3957}
3958
abbd8870 3959void
e315cd28 3960qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 3961{
e315cd28 3962 struct qla_hw_data *ha = vha->hw;
3d71644c 3963 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
3964 struct req_que *req = ha->req_q_map[0];
3965 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
3966
3967 /* Setup ring parameters in initialization control block. */
ad950360
BVA
3968 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3969 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3970 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3971 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
d4556a49
BVA
3972 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
3973 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
abbd8870
AV
3974
3975 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3976 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3977 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3978 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3979 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3980}
3981
0107109e 3982void
e315cd28 3983qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 3984{
e315cd28 3985 struct qla_hw_data *ha = vha->hw;
118e2ef9 3986 device_reg_t *reg = ISP_QUE_REG(ha, 0);
73208dfd
AC
3987 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3988 struct qla_msix_entry *msix;
0107109e 3989 struct init_cb_24xx *icb;
73208dfd
AC
3990 uint16_t rid = 0;
3991 struct req_que *req = ha->req_q_map[0];
3992 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 3993
6246b8a1 3994 /* Setup ring parameters in initialization control block. */
0107109e 3995 icb = (struct init_cb_24xx *)ha->init_cb;
ad950360
BVA
3996 icb->request_q_outpointer = cpu_to_le16(0);
3997 icb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3998 icb->request_q_length = cpu_to_le16(req->length);
3999 icb->response_q_length = cpu_to_le16(rsp->length);
d4556a49
BVA
4000 put_unaligned_le64(req->dma, &icb->request_q_address);
4001 put_unaligned_le64(rsp->dma, &icb->response_q_address);
0107109e 4002
2d70c103 4003 /* Setup ATIO queue dma pointers for target mode */
ad950360 4004 icb->atio_q_inpointer = cpu_to_le16(0);
2d70c103 4005 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
d4556a49 4006 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
2d70c103 4007
7c6300e3 4008 if (IS_SHADOW_REG_CAPABLE(ha))
ad950360 4009 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
7c6300e3 4010
ecc89f25
JC
4011 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4012 IS_QLA28XX(ha)) {
ad950360
BVA
4013 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4014 icb->rid = cpu_to_le16(rid);
73208dfd
AC
4015 if (ha->flags.msix_enabled) {
4016 msix = &ha->msix_entries[1];
83548fe2 4017 ql_dbg(ql_dbg_init, vha, 0x0019,
7c3df132
SK
4018 "Registering vector 0x%x for base que.\n",
4019 msix->entry);
73208dfd
AC
4020 icb->msix = cpu_to_le16(msix->entry);
4021 }
4022 /* Use alternate PCI bus number */
4023 if (MSB(rid))
ad950360 4024 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
73208dfd
AC
4025 /* Use alternate PCI devfn */
4026 if (LSB(rid))
ad950360 4027 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
73208dfd 4028
3155754a 4029 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
4030 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4031 (ha->flags.msix_enabled)) {
ad950360 4032 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3155754a 4033 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
4034 ql_dbg(ql_dbg_init, vha, 0x00fe,
4035 "MSIX Handshake Disable Mode turned on.\n");
3155754a 4036 } else {
ad950360 4037 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3155754a 4038 }
ad950360 4039 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
73208dfd
AC
4040
4041 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
4042 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
4043 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
4044 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
4045 } else {
4046 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
4047 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
4048 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
4049 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
4050 }
4910b524 4051
aa230bc5 4052 qlt_24xx_config_rings(vha);
2d70c103 4053
4910b524
AG
4054 /* If the user has configured the speed, set it here */
4055 if (ha->set_data_rate) {
4056 ql_dbg(ql_dbg_init, vha, 0x00fd,
4057 "Speed set by user : %s Gbps \n",
4058 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4059 icb->firmware_options_3 = (ha->set_data_rate << 13);
4060 }
4061
73208dfd
AC
4062 /* PCI posting */
4063 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
4064}
4065
1da177e4
LT
4066/**
4067 * qla2x00_init_rings() - Initializes firmware.
2db6228d 4068 * @vha: HA context
1da177e4
LT
4069 *
4070 * Beginning of request ring has initialization control block already built
4071 * by nvram config routine.
4072 *
4073 * Returns 0 on success.
4074 */
8ae6d9c7 4075int
e315cd28 4076qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
4077{
4078 int rval;
4079 unsigned long flags = 0;
29bdccbe 4080 int cnt, que;
e315cd28 4081 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
4082 struct req_que *req;
4083 struct rsp_que *rsp;
2c3dfe3f
SJ
4084 struct mid_init_cb_24xx *mid_init_cb =
4085 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
4086
4087 spin_lock_irqsave(&ha->hardware_lock, flags);
4088
4089 /* Clear outstanding commands array. */
2afa19a9 4090 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 4091 req = ha->req_q_map[que];
cb43285f 4092 if (!req || !test_bit(que, ha->req_qid_map))
29bdccbe 4093 continue;
7c6300e3
JC
4094 req->out_ptr = (void *)(req->ring + req->length);
4095 *req->out_ptr = 0;
8d93f550 4096 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 4097 req->outstanding_cmds[cnt] = NULL;
1da177e4 4098
2afa19a9 4099 req->current_outstanding_cmd = 1;
1da177e4 4100
29bdccbe
AC
4101 /* Initialize firmware. */
4102 req->ring_ptr = req->ring;
4103 req->ring_index = 0;
4104 req->cnt = req->length;
4105 }
1da177e4 4106
2afa19a9 4107 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe 4108 rsp = ha->rsp_q_map[que];
cb43285f 4109 if (!rsp || !test_bit(que, ha->rsp_qid_map))
29bdccbe 4110 continue;
7c6300e3
JC
4111 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
4112 *rsp->in_ptr = 0;
29bdccbe 4113 /* Initialize response queue entries */
8ae6d9c7
GM
4114 if (IS_QLAFX00(ha))
4115 qlafx00_init_response_q_entries(rsp);
4116 else
4117 qla2x00_init_response_q_entries(rsp);
29bdccbe 4118 }
1da177e4 4119
2d70c103
NB
4120 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4121 ha->tgt.atio_ring_index = 0;
4122 /* Initialize ATIO queue entries */
4123 qlt_init_atio_q_entries(vha);
4124
e315cd28 4125 ha->isp_ops->config_rings(vha);
1da177e4
LT
4126
4127 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4128
8ae6d9c7
GM
4129 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
4130
4131 if (IS_QLAFX00(ha)) {
4132 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4133 goto next_check;
4134 }
4135
1da177e4 4136 /* Update any ISP specific firmware options before initialization. */
e315cd28 4137 ha->isp_ops->update_fw_options(vha);
1da177e4 4138
605aa2bc 4139 if (ha->flags.npiv_supported) {
45980cc2 4140 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 4141 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 4142 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
4143 }
4144
24a08138 4145 if (IS_FWI2_CAPABLE(ha)) {
ad950360 4146 mid_init_cb->options = cpu_to_le16(BIT_1);
24a08138 4147 mid_init_cb->init_cb.execution_throttle =
03e8c680 4148 cpu_to_le16(ha->cur_fw_xcb_count);
40f3862b
JC
4149 ha->flags.dport_enabled =
4150 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
4151 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4152 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4153 /* FA-WWPN Status */
2486c627 4154 ha->flags.fawwpn_enabled =
40f3862b 4155 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
83548fe2 4156 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
2486c627 4157 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
24a08138 4158 }
2c3dfe3f 4159
e315cd28 4160 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 4161next_check:
1da177e4 4162 if (rval) {
7c3df132
SK
4163 ql_log(ql_log_fatal, vha, 0x00d2,
4164 "Init Firmware **** FAILED ****.\n");
1da177e4 4165 } else {
7c3df132
SK
4166 ql_dbg(ql_dbg_init, vha, 0x00d3,
4167 "Init Firmware -- success.\n");
4b60c827 4168 QLA_FW_STARTED(ha);
0645cb83 4169 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
1da177e4
LT
4170 }
4171
4172 return (rval);
4173}
4174
4175/**
4176 * qla2x00_fw_ready() - Waits for firmware ready.
2db6228d 4177 * @vha: HA context
1da177e4
LT
4178 *
4179 * Returns 0 on success.
4180 */
4181static int
e315cd28 4182qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
4183{
4184 int rval;
4d4df193 4185 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
4186 uint16_t min_wait; /* Minimum wait time if loop is down */
4187 uint16_t wait_time; /* Wait time if loop is coming ready */
b5a340dd 4188 uint16_t state[6];
e315cd28 4189 struct qla_hw_data *ha = vha->hw;
1da177e4 4190
8ae6d9c7
GM
4191 if (IS_QLAFX00(vha->hw))
4192 return qlafx00_fw_ready(vha);
4193
1da177e4
LT
4194 rval = QLA_SUCCESS;
4195
33461491
CD
4196 /* Time to wait for loop down */
4197 if (IS_P3P_TYPE(ha))
4198 min_wait = 30;
4199 else
4200 min_wait = 20;
1da177e4
LT
4201
4202 /*
4203 * Firmware should take at most one RATOV to login, plus 5 seconds for
4204 * our own processing.
4205 */
4206 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4207 wait_time = min_wait;
4208 }
4209
4210 /* Min wait time if loop down */
4211 mtime = jiffies + (min_wait * HZ);
4212
4213 /* wait time before firmware ready */
4214 wtime = jiffies + (wait_time * HZ);
4215
4216 /* Wait for ISP to finish LIP */
e315cd28 4217 if (!vha->flags.init_done)
7c3df132
SK
4218 ql_log(ql_log_info, vha, 0x801e,
4219 "Waiting for LIP to complete.\n");
1da177e4
LT
4220
4221 do {
5b939038 4222 memset(state, -1, sizeof(state));
e315cd28 4223 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 4224 if (rval == QLA_SUCCESS) {
4d4df193 4225 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 4226 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 4227 }
4d4df193 4228 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
4229 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4230 "fw_state=%x 84xx=%x.\n", state[0],
4231 state[2]);
4d4df193
HK
4232 if ((state[2] & FSTATE_LOGGED_IN) &&
4233 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
4234 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4235 "Sending verify iocb.\n");
4d4df193
HK
4236
4237 cs84xx_time = jiffies;
e315cd28 4238 rval = qla84xx_init_chip(vha);
7c3df132
SK
4239 if (rval != QLA_SUCCESS) {
4240 ql_log(ql_log_warn,
cfb0919c 4241 vha, 0x8007,
7c3df132 4242 "Init chip failed.\n");
4d4df193 4243 break;
7c3df132 4244 }
4d4df193
HK
4245
4246 /* Add time taken to initialize. */
4247 cs84xx_time = jiffies - cs84xx_time;
4248 wtime += cs84xx_time;
4249 mtime += cs84xx_time;
cfb0919c 4250 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
4251 "Increasing wait time by %ld. "
4252 "New time %ld.\n", cs84xx_time,
4253 wtime);
4d4df193
HK
4254 }
4255 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
4256 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4257 "F/W Ready - OK.\n");
1da177e4 4258
e315cd28 4259 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
4260 &ha->login_timeout, &ha->r_a_tov);
4261
4262 rval = QLA_SUCCESS;
4263 break;
4264 }
4265
4266 rval = QLA_FUNCTION_FAILED;
4267
e315cd28 4268 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 4269 state[0] != FSTATE_READY) {
1da177e4 4270 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
4271 * other than Wait for Login.
4272 */
1da177e4 4273 if (time_after_eq(jiffies, mtime)) {
7c3df132 4274 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
4275 "Cable is unplugged...\n");
4276
e315cd28 4277 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
4278 break;
4279 }
4280 }
4281 } else {
4282 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 4283 if (time_after_eq(jiffies, mtime) ||
7190575f 4284 ha->flags.isp82xx_fw_hung)
1da177e4
LT
4285 break;
4286 }
4287
4288 if (time_after_eq(jiffies, wtime))
4289 break;
4290
4291 /* Delay for a while */
4292 msleep(500);
1da177e4
LT
4293 } while (1);
4294
7c3df132 4295 ql_dbg(ql_dbg_taskm, vha, 0x803a,
b5a340dd
JC
4296 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4297 state[1], state[2], state[3], state[4], state[5], jiffies);
1da177e4 4298
cfb0919c 4299 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
4300 ql_log(ql_log_warn, vha, 0x803b,
4301 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
4302 }
4303
4304 return (rval);
4305}
4306
4307/*
4308* qla2x00_configure_hba
4309* Setup adapter context.
4310*
4311* Input:
4312* ha = adapter state pointer.
4313*
4314* Returns:
4315* 0 = success
4316*
4317* Context:
4318* Kernel context.
4319*/
4320static int
e315cd28 4321qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
4322{
4323 int rval;
4324 uint16_t loop_id;
4325 uint16_t topo;
2c3dfe3f 4326 uint16_t sw_cap;
1da177e4
LT
4327 uint8_t al_pa;
4328 uint8_t area;
4329 uint8_t domain;
4330 char connect_type[22];
e315cd28 4331 struct qla_hw_data *ha = vha->hw;
61e1b269 4332 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
482c9dc7 4333 port_id_t id;
9d1aa4e1 4334 unsigned long flags;
1da177e4
LT
4335
4336 /* Get host addresses. */
e315cd28 4337 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 4338 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 4339 if (rval != QLA_SUCCESS) {
e315cd28 4340 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 4341 IS_CNA_CAPABLE(ha) ||
33135aa2 4342 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
4343 ql_dbg(ql_dbg_disc, vha, 0x2008,
4344 "Loop is in a transition state.\n");
33135aa2 4345 } else {
7c3df132
SK
4346 ql_log(ql_log_warn, vha, 0x2009,
4347 "Unable to get host loop ID.\n");
61e1b269
JC
4348 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4349 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4350 ql_log(ql_log_warn, vha, 0x1151,
4351 "Doing link init.\n");
4352 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4353 return rval;
4354 }
e315cd28 4355 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 4356 }
1da177e4
LT
4357 return (rval);
4358 }
4359
4360 if (topo == 4) {
7c3df132
SK
4361 ql_log(ql_log_info, vha, 0x200a,
4362 "Cannot get topology - retrying.\n");
1da177e4
LT
4363 return (QLA_FUNCTION_FAILED);
4364 }
4365
e315cd28 4366 vha->loop_id = loop_id;
1da177e4
LT
4367
4368 /* initialize */
4369 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4370 ha->operating_mode = LOOP;
2c3dfe3f 4371 ha->switch_cap = 0;
1da177e4
LT
4372
4373 switch (topo) {
4374 case 0:
7c3df132 4375 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
4376 ha->current_topology = ISP_CFG_NL;
4377 strcpy(connect_type, "(Loop)");
4378 break;
4379
4380 case 1:
7c3df132 4381 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 4382 ha->switch_cap = sw_cap;
1da177e4
LT
4383 ha->current_topology = ISP_CFG_FL;
4384 strcpy(connect_type, "(FL_Port)");
4385 break;
4386
4387 case 2:
7c3df132 4388 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
4389 ha->operating_mode = P2P;
4390 ha->current_topology = ISP_CFG_N;
4391 strcpy(connect_type, "(N_Port-to-N_Port)");
4392 break;
4393
4394 case 3:
7c3df132 4395 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 4396 ha->switch_cap = sw_cap;
1da177e4
LT
4397 ha->operating_mode = P2P;
4398 ha->current_topology = ISP_CFG_F;
4399 strcpy(connect_type, "(F_Port)");
4400 break;
4401
4402 default:
7c3df132
SK
4403 ql_dbg(ql_dbg_disc, vha, 0x200f,
4404 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
4405 ha->current_topology = ISP_CFG_NL;
4406 strcpy(connect_type, "(Loop)");
4407 break;
4408 }
4409
4410 /* Save Host port and loop ID. */
4411 /* byte order - Big Endian */
482c9dc7
QT
4412 id.b.domain = domain;
4413 id.b.area = area;
4414 id.b.al_pa = al_pa;
4415 id.b.rsvd_1 = 0;
9d1aa4e1 4416 spin_lock_irqsave(&ha->hardware_lock, flags);
8777e431
QT
4417 if (!(topo == 2 && ha->flags.n2n_bigger))
4418 qlt_update_host_map(vha, id);
9d1aa4e1 4419 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103 4420
e315cd28 4421 if (!vha->flags.init_done)
7c3df132
SK
4422 ql_log(ql_log_info, vha, 0x2010,
4423 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 4424 connect_type, vha->loop_id);
1da177e4 4425
1da177e4
LT
4426 return(rval);
4427}
4428
a9083016 4429inline void
e315cd28
AC
4430qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4431 char *def)
9bb9fcf2
AV
4432{
4433 char *st, *en;
4434 uint16_t index;
a28d9e4e 4435 uint64_t zero[2] = { 0 };
e315cd28 4436 struct qla_hw_data *ha = vha->hw;
ab671149 4437 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 4438 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2 4439
a28d9e4e
JC
4440 if (len > sizeof(zero))
4441 len = sizeof(zero);
4442 if (memcmp(model, &zero, len) != 0) {
9bb9fcf2
AV
4443 strncpy(ha->model_number, model, len);
4444 st = en = ha->model_number;
4445 en += len - 1;
4446 while (en > st) {
4447 if (*en != 0x20 && *en != 0x00)
4448 break;
4449 *en-- = '\0';
4450 }
4451
4452 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
4453 if (use_tbl &&
4454 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 4455 index < QLA_MODEL_NAMES)
1ee27146
JC
4456 strncpy(ha->model_desc,
4457 qla2x00_model_name[index * 2 + 1],
4458 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
4459 } else {
4460 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
4461 if (use_tbl &&
4462 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
4463 index < QLA_MODEL_NAMES) {
4464 strcpy(ha->model_number,
4465 qla2x00_model_name[index * 2]);
1ee27146
JC
4466 strncpy(ha->model_desc,
4467 qla2x00_model_name[index * 2 + 1],
4468 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
4469 } else {
4470 strcpy(ha->model_number, def);
4471 }
4472 }
1ee27146 4473 if (IS_FWI2_CAPABLE(ha))
e315cd28 4474 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 4475 sizeof(ha->model_desc));
9bb9fcf2
AV
4476}
4477
4e08df3f
DM
4478/* On sparc systems, obtain port and node WWN from firmware
4479 * properties.
4480 */
e315cd28 4481static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
4482{
4483#ifdef CONFIG_SPARC
e315cd28 4484 struct qla_hw_data *ha = vha->hw;
4e08df3f 4485 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
4486 struct device_node *dp = pci_device_to_OF_node(pdev);
4487 const u8 *val;
4e08df3f
DM
4488 int len;
4489
4490 val = of_get_property(dp, "port-wwn", &len);
4491 if (val && len >= WWN_SIZE)
4492 memcpy(nv->port_name, val, WWN_SIZE);
4493
4494 val = of_get_property(dp, "node-wwn", &len);
4495 if (val && len >= WWN_SIZE)
4496 memcpy(nv->node_name, val, WWN_SIZE);
4497#endif
4498}
4499
1da177e4
LT
4500/*
4501* NVRAM configuration for ISP 2xxx
4502*
4503* Input:
4504* ha = adapter block pointer.
4505*
4506* Output:
4507* initialization control block in response_ring
4508* host adapters parameters in host adapter block
4509*
4510* Returns:
4511* 0 = success.
4512*/
abbd8870 4513int
e315cd28 4514qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 4515{
4e08df3f 4516 int rval;
0107109e
AV
4517 uint8_t chksum = 0;
4518 uint16_t cnt;
4519 uint8_t *dptr1, *dptr2;
e315cd28 4520 struct qla_hw_data *ha = vha->hw;
0107109e 4521 init_cb_t *icb = ha->init_cb;
281afe19
SJ
4522 nvram_t *nv = ha->nvram;
4523 uint8_t *ptr = ha->nvram;
3d71644c 4524 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 4525
4e08df3f
DM
4526 rval = QLA_SUCCESS;
4527
1da177e4 4528 /* Determine NVRAM starting address. */
f8f97b0c 4529 ha->nvram_size = sizeof(*nv);
1da177e4
LT
4530 ha->nvram_base = 0;
4531 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4532 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
4533 ha->nvram_base = 0x80;
4534
4535 /* Get NVRAM data and calculate checksum. */
e315cd28 4536 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
4537 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4538 chksum += *ptr++;
1da177e4 4539
7c3df132
SK
4540 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4541 "Contents of NVRAM.\n");
4542 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
f8f97b0c 4543 nv, ha->nvram_size);
1da177e4
LT
4544
4545 /* Bad NVRAM data, set defaults parameters. */
a28d9e4e
JC
4546 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4547 nv->nvram_version < 1) {
1da177e4 4548 /* Reset NVRAM data. */
7c3df132 4549 ql_log(ql_log_warn, vha, 0x0064,
3695310e
JC
4550 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4551 chksum, nv->id, nv->nvram_version);
7c3df132
SK
4552 ql_log(ql_log_warn, vha, 0x0065,
4553 "Falling back to "
4554 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
4555
4556 /*
4557 * Set default initialization control block.
4558 */
4559 memset(nv, 0, ha->nvram_size);
4560 nv->parameter_block_version = ICB_VERSION;
4561
4562 if (IS_QLA23XX(ha)) {
4563 nv->firmware_options[0] = BIT_2 | BIT_1;
4564 nv->firmware_options[1] = BIT_7 | BIT_5;
4565 nv->add_firmware_options[0] = BIT_5;
4566 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 4567 nv->frame_payload_size = 2048;
4e08df3f
DM
4568 nv->special_options[1] = BIT_7;
4569 } else if (IS_QLA2200(ha)) {
4570 nv->firmware_options[0] = BIT_2 | BIT_1;
4571 nv->firmware_options[1] = BIT_7 | BIT_5;
4572 nv->add_firmware_options[0] = BIT_5;
4573 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 4574 nv->frame_payload_size = 1024;
4e08df3f
DM
4575 } else if (IS_QLA2100(ha)) {
4576 nv->firmware_options[0] = BIT_3 | BIT_1;
4577 nv->firmware_options[1] = BIT_5;
98aee70d 4578 nv->frame_payload_size = 1024;
4e08df3f
DM
4579 }
4580
ad950360
BVA
4581 nv->max_iocb_allocation = cpu_to_le16(256);
4582 nv->execution_throttle = cpu_to_le16(16);
4e08df3f
DM
4583 nv->retry_count = 8;
4584 nv->retry_delay = 1;
4585
4586 nv->port_name[0] = 33;
4587 nv->port_name[3] = 224;
4588 nv->port_name[4] = 139;
4589
e315cd28 4590 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
4591
4592 nv->login_timeout = 4;
4593
4594 /*
4595 * Set default host adapter parameters
4596 */
4597 nv->host_p[1] = BIT_2;
4598 nv->reset_delay = 5;
4599 nv->port_down_retry_count = 8;
ad950360 4600 nv->max_luns_per_target = cpu_to_le16(8);
4e08df3f
DM
4601 nv->link_down_timeout = 60;
4602
4603 rval = 1;
1da177e4
LT
4604 }
4605
4606#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
4607 /*
4608 * The SN2 does not provide BIOS emulation which means you can't change
4609 * potentially bogus BIOS settings. Force the use of default settings
4610 * for link rate and frame size. Hope that the rest of the settings
4611 * are valid.
4612 */
4613 if (ia64_platform_is("sn2")) {
98aee70d 4614 nv->frame_payload_size = 2048;
1da177e4
LT
4615 if (IS_QLA23XX(ha))
4616 nv->special_options[1] = BIT_7;
4617 }
4618#endif
4619
4620 /* Reset Initialization control block */
0107109e 4621 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
4622
4623 /*
4624 * Setup driver NVRAM options.
4625 */
4626 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4627 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4628 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4629 nv->firmware_options[1] &= ~BIT_4;
4630
4631 if (IS_QLA23XX(ha)) {
4632 nv->firmware_options[0] |= BIT_2;
4633 nv->firmware_options[0] &= ~BIT_3;
2d70c103 4634 nv->special_options[0] &= ~BIT_6;
0107109e 4635 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
4636
4637 if (IS_QLA2300(ha)) {
4638 if (ha->fb_rev == FPM_2310) {
4639 strcpy(ha->model_number, "QLA2310");
4640 } else {
4641 strcpy(ha->model_number, "QLA2300");
4642 }
4643 } else {
e315cd28 4644 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 4645 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
4646 }
4647 } else if (IS_QLA2200(ha)) {
4648 nv->firmware_options[0] |= BIT_2;
4649 /*
4650 * 'Point-to-point preferred, else loop' is not a safe
4651 * connection mode setting.
4652 */
4653 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4654 (BIT_5 | BIT_4)) {
4655 /* Force 'loop preferred, else point-to-point'. */
4656 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4657 nv->add_firmware_options[0] |= BIT_5;
4658 }
4659 strcpy(ha->model_number, "QLA22xx");
4660 } else /*if (IS_QLA2100(ha))*/ {
4661 strcpy(ha->model_number, "QLA2100");
4662 }
4663
4664 /*
4665 * Copy over NVRAM RISC parameter block to initialization control block.
4666 */
4667 dptr1 = (uint8_t *)icb;
4668 dptr2 = (uint8_t *)&nv->parameter_block_version;
4669 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4670 while (cnt--)
4671 *dptr1++ = *dptr2++;
4672
4673 /* Copy 2nd half. */
4674 dptr1 = (uint8_t *)icb->add_firmware_options;
4675 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4676 while (cnt--)
4677 *dptr1++ = *dptr2++;
0eaaca4c 4678 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5341e868
AV
4679 /* Use alternate WWN? */
4680 if (nv->host_p[1] & BIT_7) {
4681 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4682 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4683 }
4684
1da177e4
LT
4685 /* Prepare nodename */
4686 if ((icb->firmware_options[1] & BIT_6) == 0) {
4687 /*
4688 * Firmware will apply the following mask if the nodename was
4689 * not provided.
4690 */
4691 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4692 icb->node_name[0] &= 0xF0;
4693 }
4694
4695 /*
4696 * Set host adapter parameters.
4697 */
3ce8866c
SK
4698
4699 /*
4700 * BIT_7 in the host-parameters section allows for modification to
4701 * internal driver logging.
4702 */
0181944f 4703 if (nv->host_p[0] & BIT_7)
cfb0919c 4704 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
4705 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4706 /* Always load RISC code on non ISP2[12]00 chips. */
4707 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4708 ha->flags.disable_risc_code_load = 0;
4709 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4710 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4711 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 4712 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 4713 ha->flags.disable_serdes = 0;
1da177e4
LT
4714
4715 ha->operating_mode =
4716 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4717
4718 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4719 sizeof(ha->fw_seriallink_options));
4720
4721 /* save HBA serial number */
4722 ha->serial0 = icb->port_name[5];
4723 ha->serial1 = icb->port_name[6];
4724 ha->serial2 = icb->port_name[7];
e315cd28
AC
4725 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4726 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4 4727
ad950360 4728 icb->execution_throttle = cpu_to_le16(0xFFFF);
1da177e4
LT
4729
4730 ha->retry_count = nv->retry_count;
4731
4732 /* Set minimum login_timeout to 4 seconds. */
5b91490e 4733 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
4734 nv->login_timeout = ql2xlogintimeout;
4735 if (nv->login_timeout < 4)
4736 nv->login_timeout = 4;
4737 ha->login_timeout = nv->login_timeout;
1da177e4 4738
00a537b8
AV
4739 /* Set minimum RATOV to 100 tenths of a second. */
4740 ha->r_a_tov = 100;
1da177e4 4741
1da177e4
LT
4742 ha->loop_reset_delay = nv->reset_delay;
4743
1da177e4
LT
4744 /* Link Down Timeout = 0:
4745 *
4746 * When Port Down timer expires we will start returning
4747 * I/O's to OS with "DID_NO_CONNECT".
4748 *
4749 * Link Down Timeout != 0:
4750 *
4751 * The driver waits for the link to come up after link down
4752 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 4753 */
1da177e4
LT
4754 if (nv->link_down_timeout == 0) {
4755 ha->loop_down_abort_time =
354d6b21 4756 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
4757 } else {
4758 ha->link_down_timeout = nv->link_down_timeout;
4759 ha->loop_down_abort_time =
4760 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 4761 }
1da177e4 4762
1da177e4
LT
4763 /*
4764 * Need enough time to try and get the port back.
4765 */
4766 ha->port_down_retry_count = nv->port_down_retry_count;
4767 if (qlport_down_retry)
4768 ha->port_down_retry_count = qlport_down_retry;
4769 /* Set login_retry_count */
4770 ha->login_retry_count = nv->retry_count;
4771 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4772 ha->port_down_retry_count > 3)
4773 ha->login_retry_count = ha->port_down_retry_count;
4774 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4775 ha->login_retry_count = ha->port_down_retry_count;
4776 if (ql2xloginretrycount)
4777 ha->login_retry_count = ql2xloginretrycount;
4778
ad950360 4779 icb->lun_enables = cpu_to_le16(0);
1da177e4
LT
4780 icb->command_resource_count = 0;
4781 icb->immediate_notify_resource_count = 0;
ad950360 4782 icb->timeout = cpu_to_le16(0);
1da177e4
LT
4783
4784 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4785 /* Enable RIO */
4786 icb->firmware_options[0] &= ~BIT_3;
4787 icb->add_firmware_options[0] &=
4788 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4789 icb->add_firmware_options[0] |= BIT_2;
4790 icb->response_accumulation_timer = 3;
4791 icb->interrupt_delay_timer = 5;
4792
e315cd28 4793 vha->flags.process_response_queue = 1;
1da177e4 4794 } else {
4fdfefe5 4795 /* Enable ZIO. */
e315cd28 4796 if (!vha->flags.init_done) {
4fdfefe5
AV
4797 ha->zio_mode = icb->add_firmware_options[0] &
4798 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4799 ha->zio_timer = icb->interrupt_delay_timer ?
58e2753c 4800 icb->interrupt_delay_timer : 2;
4fdfefe5 4801 }
1da177e4
LT
4802 icb->add_firmware_options[0] &=
4803 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 4804 vha->flags.process_response_queue = 0;
4fdfefe5 4805 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 4806 ha->zio_mode = QLA_ZIO_MODE_6;
4807
7c3df132 4808 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
4809 "ZIO mode %d enabled; timer delay (%d us).\n",
4810 ha->zio_mode, ha->zio_timer * 100);
1da177e4 4811
4fdfefe5
AV
4812 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4813 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 4814 vha->flags.process_response_queue = 1;
1da177e4
LT
4815 }
4816 }
4817
4e08df3f 4818 if (rval) {
7c3df132
SK
4819 ql_log(ql_log_warn, vha, 0x0069,
4820 "NVRAM configuration failed.\n");
4e08df3f
DM
4821 }
4822 return (rval);
1da177e4
LT
4823}
4824
19a7b4ae
JSEC
4825static void
4826qla2x00_rport_del(void *data)
4827{
4828 fc_port_t *fcport = data;
d97994dc 4829 struct fc_rport *rport;
044d78e1 4830 unsigned long flags;
d97994dc 4831
044d78e1 4832 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
58e2753c 4833 rport = fcport->drport ? fcport->drport : fcport->rport;
d97994dc 4834 fcport->drport = NULL;
044d78e1 4835 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
726b8548 4836 if (rport) {
83548fe2
QT
4837 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4838 "%s %8phN. rport %p roles %x\n",
4839 __func__, fcport->port_name, rport,
4840 rport->roles);
726b8548 4841
d97994dc 4842 fc_remote_port_delete(rport);
726b8548 4843 }
19a7b4ae
JSEC
4844}
4845
a630bdc5
BVA
4846void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
4847{
4848 int old_state;
4849
4850 old_state = atomic_read(&fcport->state);
4851 atomic_set(&fcport->state, state);
4852
4853 /* Don't print state transitions during initial allocation of fcport */
4854 if (old_state && old_state != state) {
4855 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
4856 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
4857 fcport->port_name, port_state_str[old_state],
4858 port_state_str[state], fcport->d_id.b.domain,
4859 fcport->d_id.b.area, fcport->d_id.b.al_pa);
4860 }
4861}
4862
1da177e4
LT
4863/**
4864 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2db6228d 4865 * @vha: HA context
1da177e4
LT
4866 * @flags: allocation flags
4867 *
4868 * Returns a pointer to the allocated fcport, or NULL, if none available.
4869 */
9a069e19 4870fc_port_t *
e315cd28 4871qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
4872{
4873 fc_port_t *fcport;
4874
bbfbbbc1
MK
4875 fcport = kzalloc(sizeof(fc_port_t), flags);
4876 if (!fcport)
4877 return NULL;
1da177e4 4878
9ecd6564
QT
4879 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4880 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4881 flags);
4882 if (!fcport->ct_desc.ct_sns) {
4883 ql_log(ql_log_warn, vha, 0xd049,
4884 "Failed to allocate ct_sns request.\n");
4885 kfree(fcport);
4886 return NULL;
4887 }
4888
1da177e4 4889 /* Setup fcport template structure. */
e315cd28 4890 fcport->vha = vha;
1da177e4
LT
4891 fcport->port_type = FCT_UNKNOWN;
4892 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 4893 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 4894 fcport->supported_classes = FC_COS_UNSPECIFIED;
f635e48e 4895 fcport->fp_speed = PORT_SPEED_UNKNOWN;
1da177e4 4896
726b8548
QT
4897 fcport->disc_state = DSC_DELETED;
4898 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4899 fcport->deleted = QLA_SESS_DELETED;
4900 fcport->login_retry = vha->hw->login_retry_count;
9ecd6564 4901 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
726b8548
QT
4902 fcport->logout_on_delete = 1;
4903
4904 if (!fcport->ct_desc.ct_sns) {
83548fe2 4905 ql_log(ql_log_warn, vha, 0xd049,
726b8548
QT
4906 "Failed to allocate ct_sns request.\n");
4907 kfree(fcport);
4908 fcport = NULL;
4909 }
9ecd6564 4910
726b8548 4911 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
cd4ed6b4 4912 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
726b8548
QT
4913 INIT_LIST_HEAD(&fcport->gnl_entry);
4914 INIT_LIST_HEAD(&fcport->list);
4915
bbfbbbc1 4916 return fcport;
1da177e4
LT
4917}
4918
726b8548
QT
4919void
4920qla2x00_free_fcport(fc_port_t *fcport)
4921{
4922 if (fcport->ct_desc.ct_sns) {
4923 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4924 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4925 fcport->ct_desc.ct_sns_dma);
4926
4927 fcport->ct_desc.ct_sns = NULL;
4928 }
ffbc6476
QT
4929 list_del(&fcport->list);
4930 qla2x00_clear_loop_id(fcport);
726b8548
QT
4931 kfree(fcport);
4932}
4933
1da177e4
LT
4934/*
4935 * qla2x00_configure_loop
4936 * Updates Fibre Channel Device Database with what is actually on loop.
4937 *
4938 * Input:
4939 * ha = adapter block pointer.
4940 *
4941 * Returns:
4942 * 0 = success.
4943 * 1 = error.
4944 * 2 = database was full and device was not configured.
4945 */
4946static int
e315cd28 4947qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
4948{
4949 int rval;
4950 unsigned long flags, save_flags;
e315cd28 4951 struct qla_hw_data *ha = vha->hw;
bd432bb5 4952
1da177e4
LT
4953 rval = QLA_SUCCESS;
4954
4955 /* Get Initiator ID */
e315cd28
AC
4956 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4957 rval = qla2x00_configure_hba(vha);
1da177e4 4958 if (rval != QLA_SUCCESS) {
7c3df132
SK
4959 ql_dbg(ql_dbg_disc, vha, 0x2013,
4960 "Unable to configure HBA.\n");
1da177e4
LT
4961 return (rval);
4962 }
4963 }
4964
e315cd28 4965 save_flags = flags = vha->dpc_flags;
7c3df132
SK
4966 ql_dbg(ql_dbg_disc, vha, 0x2014,
4967 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
4968
4969 /*
4970 * If we have both an RSCN and PORT UPDATE pending then handle them
4971 * both at the same time.
4972 */
e315cd28
AC
4973 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4974 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 4975
3064ff39
MH
4976 qla2x00_get_data_rate(vha);
4977
1da177e4
LT
4978 /* Determine what we need to do */
4979 if (ha->current_topology == ISP_CFG_FL &&
4980 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4981
1da177e4
LT
4982 set_bit(RSCN_UPDATE, &flags);
4983
4984 } else if (ha->current_topology == ISP_CFG_F &&
4985 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4986
1da177e4
LT
4987 set_bit(RSCN_UPDATE, &flags);
4988 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
4989
4990 } else if (ha->current_topology == ISP_CFG_N) {
4991 clear_bit(RSCN_UPDATE, &flags);
48acad09
QT
4992 if (qla_tgt_mode_enabled(vha)) {
4993 /* allow the other side to start the login */
9cd883f0
QT
4994 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4995 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
9cd883f0 4996 }
41dc529a
QT
4997 } else if (ha->current_topology == ISP_CFG_NL) {
4998 clear_bit(RSCN_UPDATE, &flags);
4999 set_bit(LOCAL_LOOP_UPDATE, &flags);
e315cd28 5000 } else if (!vha->flags.online ||
1da177e4 5001 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1da177e4
LT
5002 set_bit(RSCN_UPDATE, &flags);
5003 set_bit(LOCAL_LOOP_UPDATE, &flags);
5004 }
5005
5006 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
5007 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5008 ql_dbg(ql_dbg_disc, vha, 0x2015,
5009 "Loop resync needed, failing.\n");
1da177e4 5010 rval = QLA_FUNCTION_FAILED;
642ef983 5011 } else
e315cd28 5012 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
5013 }
5014
5015 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132 5016 if (LOOP_TRANSITION(vha)) {
83548fe2 5017 ql_dbg(ql_dbg_disc, vha, 0x2099,
7c3df132 5018 "Needs RSCN update and loop transition.\n");
1da177e4 5019 rval = QLA_FUNCTION_FAILED;
7c3df132 5020 }
e315cd28
AC
5021 else
5022 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
5023 }
5024
5025 if (rval == QLA_SUCCESS) {
e315cd28
AC
5026 if (atomic_read(&vha->loop_down_timer) ||
5027 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
5028 rval = QLA_FUNCTION_FAILED;
5029 } else {
e315cd28 5030 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
5031 ql_dbg(ql_dbg_disc, vha, 0x2069,
5032 "LOOP READY.\n");
ec7193e2 5033 ha->flags.fw_init_done = 1;
3bb67df5
DKU
5034
5035 /*
5036 * Process any ATIO queue entries that came in
5037 * while we weren't online.
5038 */
ead03855
QT
5039 if (qla_tgt_mode_enabled(vha) ||
5040 qla_dual_mode_enabled(vha)) {
1073daa4
QT
5041 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5042 qlt_24xx_process_atio_queue(vha, 0);
5043 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5044 flags);
3bb67df5 5045 }
1da177e4
LT
5046 }
5047 }
5048
5049 if (rval) {
7c3df132
SK
5050 ql_dbg(ql_dbg_disc, vha, 0x206a,
5051 "%s *** FAILED ***.\n", __func__);
1da177e4 5052 } else {
7c3df132
SK
5053 ql_dbg(ql_dbg_disc, vha, 0x206b,
5054 "%s: exiting normally.\n", __func__);
1da177e4
LT
5055 }
5056
cc3ef7bc 5057 /* Restore state if a resync event occurred during processing */
e315cd28 5058 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 5059 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 5060 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 5061 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 5062 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 5063 }
1da177e4
LT
5064 }
5065
5066 return (rval);
5067}
5068
1da177e4
LT
5069/*
5070 * qla2x00_configure_local_loop
5071 * Updates Fibre Channel Device Database with local loop devices.
5072 *
5073 * Input:
5074 * ha = adapter block pointer.
5075 *
5076 * Returns:
5077 * 0 = success.
5078 */
5079static int
e315cd28 5080qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
5081{
5082 int rval, rval2;
5083 int found_devs;
5084 int found;
5085 fc_port_t *fcport, *new_fcport;
5086
5087 uint16_t index;
5088 uint16_t entries;
5089 char *id_iter;
5090 uint16_t loop_id;
5091 uint8_t domain, area, al_pa;
e315cd28 5092 struct qla_hw_data *ha = vha->hw;
41dc529a 5093 unsigned long flags;
1da177e4 5094
8777e431
QT
5095 /* Inititae N2N login. */
5096 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
5097 /* borrowing */
5098 u32 *bp, i, sz;
5099
5100 memset(ha->init_cb, 0, ha->init_cb_size);
5101 sz = min_t(int, sizeof(struct els_plogi_payload),
5102 ha->init_cb_size);
5103 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5104 (void *)ha->init_cb, sz);
5105 if (rval == QLA_SUCCESS) {
5106 bp = (uint32_t *)ha->init_cb;
5107 for (i = 0; i < sz/4 ; i++, bp++)
5108 *bp = cpu_to_be32(*bp);
5109
5110 memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
5111 sizeof(ha->plogi_els_payld.data));
5112 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5113 } else {
5114 ql_dbg(ql_dbg_init, vha, 0x00d1,
5115 "PLOGI ELS param read fail.\n");
5116 }
5117 return QLA_SUCCESS;
5118 }
5119
1da177e4
LT
5120 found_devs = 0;
5121 new_fcport = NULL;
642ef983 5122 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 5123
1da177e4 5124 /* Get list of logged in devices. */
642ef983 5125 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 5126 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
5127 &entries);
5128 if (rval != QLA_SUCCESS)
5129 goto cleanup_allocation;
5130
83548fe2 5131 ql_dbg(ql_dbg_disc, vha, 0x2011,
7c3df132
SK
5132 "Entries in ID list (%d).\n", entries);
5133 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
f8f97b0c 5134 ha->gid_list, entries * sizeof(*ha->gid_list));
1da177e4 5135
0e324e94
QT
5136 if (entries == 0) {
5137 spin_lock_irqsave(&vha->work_lock, flags);
5138 vha->scan.scan_retry++;
5139 spin_unlock_irqrestore(&vha->work_lock, flags);
5140
5141 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5142 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5143 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5144 }
5145 } else {
5146 vha->scan.scan_retry = 0;
5147 }
5148
9cd883f0
QT
5149 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5150 fcport->scan_state = QLA_FCPORT_SCAN;
5151 }
5152
1da177e4 5153 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 5154 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5155 if (new_fcport == NULL) {
83548fe2 5156 ql_log(ql_log_warn, vha, 0x2012,
7c3df132 5157 "Memory allocation failed for fcport.\n");
1da177e4
LT
5158 rval = QLA_MEMORY_ALLOC_FAILED;
5159 goto cleanup_allocation;
5160 }
5161 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5162
1da177e4
LT
5163 /* Add devices to port list. */
5164 id_iter = (char *)ha->gid_list;
5165 for (index = 0; index < entries; index++) {
5166 domain = ((struct gid_list_info *)id_iter)->domain;
5167 area = ((struct gid_list_info *)id_iter)->area;
5168 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 5169 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
5170 loop_id = (uint16_t)
5171 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 5172 else
1da177e4
LT
5173 loop_id = le16_to_cpu(
5174 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 5175 id_iter += ha->gid_list_info_size;
1da177e4
LT
5176
5177 /* Bypass reserved domain fields. */
5178 if ((domain & 0xf0) == 0xf0)
5179 continue;
5180
4705f10e
QT
5181 /* Bypass if not same domain and area of adapter. */
5182 if (area && domain && ((area != vha->d_id.b.area) ||
5183 (domain != vha->d_id.b.domain)) &&
5184 (ha->current_topology == ISP_CFG_NL))
5185 continue;
5186
5187
1da177e4
LT
5188 /* Bypass invalid local loop ID. */
5189 if (loop_id > LAST_LOCAL_LOOP_ID)
5190 continue;
5191
41dc529a 5192 memset(new_fcport->port_name, 0, WWN_SIZE);
370d550e 5193
1da177e4
LT
5194 /* Fill in member data. */
5195 new_fcport->d_id.b.domain = domain;
5196 new_fcport->d_id.b.area = area;
5197 new_fcport->d_id.b.al_pa = al_pa;
5198 new_fcport->loop_id = loop_id;
9cd883f0 5199 new_fcport->scan_state = QLA_FCPORT_FOUND;
41dc529a 5200
e315cd28 5201 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 5202 if (rval2 != QLA_SUCCESS) {
83548fe2 5203 ql_dbg(ql_dbg_disc, vha, 0x2097,
7c3df132
SK
5204 "Failed to retrieve fcport information "
5205 "-- get_port_database=%x, loop_id=0x%04x.\n",
5206 rval2, new_fcport->loop_id);
edd05de1
DG
5207 /* Skip retry if N2N */
5208 if (ha->current_topology != ISP_CFG_N) {
5209 ql_dbg(ql_dbg_disc, vha, 0x2105,
5210 "Scheduling resync.\n");
5211 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5212 continue;
5213 }
1da177e4
LT
5214 }
5215
41dc529a 5216 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
5217 /* Check for matching device in port list. */
5218 found = 0;
5219 fcport = NULL;
e315cd28 5220 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
5221 if (memcmp(new_fcport->port_name, fcport->port_name,
5222 WWN_SIZE))
5223 continue;
5224
ddb9b126 5225 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
5226 fcport->loop_id = new_fcport->loop_id;
5227 fcport->port_type = new_fcport->port_type;
5228 fcport->d_id.b24 = new_fcport->d_id.b24;
5229 memcpy(fcport->node_name, new_fcport->node_name,
5230 WWN_SIZE);
9cd883f0 5231 fcport->scan_state = QLA_FCPORT_FOUND;
1da177e4
LT
5232 found++;
5233 break;
5234 }
5235
5236 if (!found) {
5237 /* New device, add to fcports list. */
e315cd28 5238 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
5239
5240 /* Allocate a new replacement fcport. */
5241 fcport = new_fcport;
41dc529a
QT
5242
5243 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5244
e315cd28 5245 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
41dc529a 5246
1da177e4 5247 if (new_fcport == NULL) {
83548fe2 5248 ql_log(ql_log_warn, vha, 0xd031,
7c3df132 5249 "Failed to allocate memory for fcport.\n");
1da177e4
LT
5250 rval = QLA_MEMORY_ALLOC_FAILED;
5251 goto cleanup_allocation;
5252 }
41dc529a 5253 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
5254 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5255 }
5256
41dc529a
QT
5257 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5258
d8b45213 5259 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 5260 fcport->fp_speed = ha->link_data_rate;
d8b45213 5261
1da177e4
LT
5262 found_devs++;
5263 }
5264
9cd883f0
QT
5265 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5266 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5267 break;
5268
5269 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5270 if ((qla_dual_mode_enabled(vha) ||
5271 qla_ini_mode_enabled(vha)) &&
5272 atomic_read(&fcport->state) == FCS_ONLINE) {
5273 qla2x00_mark_device_lost(vha, fcport,
5274 ql2xplogiabsentdevice, 0);
5275 if (fcport->loop_id != FC_NO_LOOP_ID &&
5276 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5277 fcport->port_type != FCT_INITIATOR &&
5278 fcport->port_type != FCT_BROADCAST) {
5279 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5280 "%s %d %8phC post del sess\n",
5281 __func__, __LINE__,
5282 fcport->port_name);
5283
d8630bb9 5284 qlt_schedule_sess_for_deletion(fcport);
9cd883f0
QT
5285 continue;
5286 }
5287 }
5288 }
5289
5290 if (fcport->scan_state == QLA_FCPORT_FOUND)
5291 qla24xx_fcport_handle_login(vha, fcport);
5292 }
5293
1da177e4 5294cleanup_allocation:
c9475cb0 5295 kfree(new_fcport);
1da177e4
LT
5296
5297 if (rval != QLA_SUCCESS) {
83548fe2 5298 ql_dbg(ql_dbg_disc, vha, 0x2098,
7c3df132 5299 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
5300 }
5301
1da177e4
LT
5302 return (rval);
5303}
5304
d8b45213 5305static void
e315cd28 5306qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 5307{
d8b45213 5308 int rval;
93f2bd67 5309 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 5310 struct qla_hw_data *ha = vha->hw;
d8b45213 5311
c76f2c01 5312 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
5313 return;
5314
c9afb9a2
GM
5315 if (atomic_read(&fcport->state) != FCS_ONLINE)
5316 return;
5317
39bd9622 5318 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
413c2f33
HM
5319 fcport->fp_speed > ha->link_data_rate ||
5320 !ha->flags.gpsc_supported)
d8b45213
AV
5321 return;
5322
e315cd28 5323 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 5324 mb);
d8b45213 5325 if (rval != QLA_SUCCESS) {
7c3df132 5326 ql_dbg(ql_dbg_disc, vha, 0x2004,
7b833558
OK
5327 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5328 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
d8b45213 5329 } else {
7c3df132 5330 ql_dbg(ql_dbg_disc, vha, 0x2005,
33b28357 5331 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
d0297c9a 5332 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
33b28357 5333 fcport->fp_speed, fcport->port_name);
d8b45213
AV
5334 }
5335}
5336
cc28e0ac
QT
5337void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5338{
5339 qla2x00_iidma_fcport(vha, fcport);
5340 qla24xx_update_fcport_fcp_prio(vha, fcport);
5341}
5342
5343int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5344{
5345 struct qla_work_evt *e;
5346
5347 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5348 if (!e)
5349 return QLA_FUNCTION_FAILED;
5350
5351 e->u.fcport.fcport = fcport;
5352 return qla2x00_post_work(vha, e);
5353}
5354
726b8548 5355/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
23be331d 5356static void
e315cd28 5357qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118 5358{
5359 struct fc_rport_identifiers rport_ids;
bdf79621 5360 struct fc_rport *rport;
044d78e1 5361 unsigned long flags;
8482e118 5362
b63d8b89
QT
5363 if (atomic_read(&fcport->state) == FCS_ONLINE)
5364 return;
5365
f8b02a85
AV
5366 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5367 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118 5368 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5369 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 5370 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 5371 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 5372 if (!rport) {
7c3df132
SK
5373 ql_log(ql_log_warn, vha, 0x2006,
5374 "Unable to allocate fc remote port.\n");
77d74143
AV
5375 return;
5376 }
2d70c103 5377
044d78e1 5378 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 5379 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 5380 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 5381
ad3e0eda 5382 rport->supported_classes = fcport->supported_classes;
77d74143 5383
a6a6d058 5384 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
8482e118 5385 if (fcport->port_type == FCT_INITIATOR)
a6a6d058 5386 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
8482e118 5387 if (fcport->port_type == FCT_TARGET)
a6a6d058
HR
5388 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5389 if (fcport->port_type & FCT_NVME_INITIATOR)
5390 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5391 if (fcport->port_type & FCT_NVME_TARGET)
5392 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5393 if (fcport->port_type & FCT_NVME_DISCOVERY)
5394 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
726b8548 5395
83548fe2
QT
5396 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5397 "%s %8phN. rport %p is %s mode\n",
5398 __func__, fcport->port_name, rport,
a6a6d058
HR
5399 (fcport->port_type == FCT_TARGET) ? "tgt" :
5400 ((fcport->port_type & FCT_NVME) ? "nvme" :"ini"));
726b8548 5401
77d74143 5402 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
5403}
5404
23be331d
AB
5405/*
5406 * qla2x00_update_fcport
5407 * Updates device on list.
5408 *
5409 * Input:
5410 * ha = adapter block pointer.
5411 * fcport = port structure pointer.
5412 *
5413 * Return:
5414 * 0 - Success
5415 * BIT_0 - error
5416 *
5417 * Context:
5418 * Kernel context.
5419 */
5420void
e315cd28 5421qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 5422{
726b8548
QT
5423 if (IS_SW_RESV_ADDR(fcport->d_id))
5424 return;
5425
cd4ed6b4
QT
5426 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5427 __func__, fcport->port_name);
5428
5429 fcport->disc_state = DSC_UPD_FCPORT;
5430 fcport->login_retry = vha->hw->login_retry_count;
b63d8b89 5431 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
b63d8b89
QT
5432 fcport->deleted = 0;
5433 fcport->logout_on_delete = 1;
8777e431 5434 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
23be331d 5435
8777e431
QT
5436 switch (vha->hw->current_topology) {
5437 case ISP_CFG_N:
5438 case ISP_CFG_NL:
5439 fcport->keep_nport_handle = 1;
5440 break;
5441 default:
5442 break;
5443 }
5444
aecf0434
QT
5445 qla2x00_iidma_fcport(vha, fcport);
5446
e84067d7
DG
5447 if (fcport->fc4f_nvme) {
5448 qla_nvme_register_remote(vha, fcport);
b63d8b89
QT
5449 fcport->disc_state = DSC_LOGIN_COMPLETE;
5450 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e84067d7
DG
5451 return;
5452 }
5453
21090cbe 5454 qla24xx_update_fcport_fcp_prio(vha, fcport);
d20ed91b 5455
726b8548
QT
5456 switch (vha->host->active_mode) {
5457 case MODE_INITIATOR:
d20ed91b 5458 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
5459 break;
5460 case MODE_TARGET:
5461 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5462 !vha->vha_tgt.qla_tgt->tgt_stopped)
5463 qlt_fc_port_added(vha, fcport);
5464 break;
5465 case MODE_DUAL:
d20ed91b 5466 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
5467 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5468 !vha->vha_tgt.qla_tgt->tgt_stopped)
5469 qlt_fc_port_added(vha, fcport);
5470 break;
5471 default:
5472 break;
d20ed91b 5473 }
cc28e0ac 5474
aecf0434
QT
5475 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5476
cc28e0ac
QT
5477 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5478 if (fcport->id_changed) {
5479 fcport->id_changed = 0;
5480 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5481 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5482 __func__, __LINE__, fcport->port_name,
5483 vha->fcport_count);
5484 qla24xx_post_gfpnid_work(vha, fcport);
5485 } else {
5486 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5487 "%s %d %8phC post gpsc fcp_cnt %d\n",
5488 __func__, __LINE__, fcport->port_name,
5489 vha->fcport_count);
5490 qla24xx_post_gpsc_work(vha, fcport);
5491 }
5492 }
cd4ed6b4
QT
5493
5494 fcport->disc_state = DSC_LOGIN_COMPLETE;
5495}
5496
5497void qla_register_fcport_fn(struct work_struct *work)
5498{
5499 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5500 u32 rscn_gen = fcport->rscn_gen;
5501 u16 data[2];
5502
5503 if (IS_SW_RESV_ADDR(fcport->d_id))
5504 return;
5505
5506 qla2x00_update_fcport(fcport->vha, fcport);
5507
5508 if (rscn_gen != fcport->rscn_gen) {
5509 /* RSCN(s) came in while registration */
5510 switch (fcport->next_disc_state) {
5511 case DSC_DELETE_PEND:
5512 qlt_schedule_sess_for_deletion(fcport);
5513 break;
5514 case DSC_ADISC:
5515 data[0] = data[1] = 0;
5516 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5517 data);
5518 break;
5519 default:
5520 break;
5521 }
5522 }
23be331d
AB
5523}
5524
1da177e4
LT
5525/*
5526 * qla2x00_configure_fabric
5527 * Setup SNS devices with loop ID's.
5528 *
5529 * Input:
5530 * ha = adapter block pointer.
5531 *
5532 * Returns:
5533 * 0 = success.
5534 * BIT_0 = error
5535 */
5536static int
e315cd28 5537qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 5538{
b3b02e6e 5539 int rval;
726b8548 5540 fc_port_t *fcport;
1da177e4 5541 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 5542 uint16_t loop_id;
1da177e4 5543 LIST_HEAD(new_fcports);
e315cd28 5544 struct qla_hw_data *ha = vha->hw;
df673274 5545 int discovery_gen;
1da177e4
LT
5546
5547 /* If FL port exists, then SNS is present */
e428924c 5548 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
5549 loop_id = NPH_F_PORT;
5550 else
5551 loop_id = SNS_FL_PORT;
e315cd28 5552 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 5553 if (rval != QLA_SUCCESS) {
83548fe2 5554 ql_dbg(ql_dbg_disc, vha, 0x20a0,
7c3df132 5555 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 5556
e315cd28 5557 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
5558 return (QLA_SUCCESS);
5559 }
e315cd28 5560 vha->device_flags |= SWITCH_FOUND;
1da177e4 5561
41dc529a
QT
5562
5563 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5564 rval = qla2x00_send_change_request(vha, 0x3, 0);
5565 if (rval != QLA_SUCCESS)
5566 ql_log(ql_log_warn, vha, 0x121,
5567 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5568 rval);
5569 }
5570
5571
1da177e4 5572 do {
726b8548
QT
5573 qla2x00_mgmt_svr_login(vha);
5574
cca5335c
AV
5575 /* FDMI support. */
5576 if (ql2xfdmienable &&
e315cd28
AC
5577 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5578 qla2x00_fdmi_register(vha);
cca5335c 5579
1da177e4 5580 /* Ensure we are logged into the SNS. */
a14c7711 5581 loop_id = NPH_SNS_LID(ha);
0b91d116
CD
5582 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5583 0xfc, mb, BIT_1|BIT_0);
a14c7711
JC
5584 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5585 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5586 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5587 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
0b91d116 5588 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 5589 return rval;
0b91d116 5590 }
e315cd28
AC
5591 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5592 if (qla2x00_rft_id(vha)) {
1da177e4 5593 /* EMPTY */
83548fe2 5594 ql_dbg(ql_dbg_disc, vha, 0x20a2,
7c3df132 5595 "Register FC-4 TYPE failed.\n");
b98ae0d7
QT
5596 if (test_bit(LOOP_RESYNC_NEEDED,
5597 &vha->dpc_flags))
5598 break;
1da177e4 5599 }
d3bae931 5600 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
1da177e4 5601 /* EMPTY */
83548fe2 5602 ql_dbg(ql_dbg_disc, vha, 0x209a,
7c3df132 5603 "Register FC-4 Features failed.\n");
b98ae0d7
QT
5604 if (test_bit(LOOP_RESYNC_NEEDED,
5605 &vha->dpc_flags))
5606 break;
1da177e4 5607 }
d3bae931
DG
5608 if (vha->flags.nvme_enabled) {
5609 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5610 ql_dbg(ql_dbg_disc, vha, 0x2049,
5611 "Register NVME FC Type Features failed.\n");
5612 }
5613 }
e315cd28 5614 if (qla2x00_rnn_id(vha)) {
1da177e4 5615 /* EMPTY */
83548fe2 5616 ql_dbg(ql_dbg_disc, vha, 0x2104,
7c3df132 5617 "Register Node Name failed.\n");
b98ae0d7
QT
5618 if (test_bit(LOOP_RESYNC_NEEDED,
5619 &vha->dpc_flags))
5620 break;
e315cd28 5621 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 5622 /* EMPTY */
83548fe2 5623 ql_dbg(ql_dbg_disc, vha, 0x209b,
0bf0efa1 5624 "Register Symbolic Node Name failed.\n");
b98ae0d7
QT
5625 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5626 break;
1da177e4
LT
5627 }
5628 }
5629
827210ba 5630
df673274
AP
5631 /* Mark the time right before querying FW for connected ports.
5632 * This process is long, asynchronous and by the time it's done,
5633 * collected information might not be accurate anymore. E.g.
5634 * disconnected port might have re-connected and a brand new
5635 * session has been created. In this case session's generation
5636 * will be newer than discovery_gen. */
5637 qlt_do_generation_tick(vha, &discovery_gen);
5638
a4239945 5639 if (USE_ASYNC_SCAN(ha)) {
33b28357
QT
5640 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
5641 NULL);
a4239945
QT
5642 if (rval)
5643 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5644 } else {
f352eeb7
QT
5645 list_for_each_entry(fcport, &vha->vp_fcports, list)
5646 fcport->scan_state = QLA_FCPORT_SCAN;
5647
a4239945
QT
5648 rval = qla2x00_find_all_fabric_devs(vha);
5649 }
1da177e4
LT
5650 if (rval != QLA_SUCCESS)
5651 break;
1da177e4
LT
5652 } while (0);
5653
e84067d7
DG
5654 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5655 qla_nvme_register_hba(vha);
5656
726b8548 5657 if (rval)
7c3df132
SK
5658 ql_dbg(ql_dbg_disc, vha, 0x2068,
5659 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
5660
5661 return (rval);
5662}
5663
1da177e4
LT
5664/*
5665 * qla2x00_find_all_fabric_devs
5666 *
5667 * Input:
5668 * ha = adapter block pointer.
5669 * dev = database device entry pointer.
5670 *
5671 * Returns:
5672 * 0 = success.
5673 *
5674 * Context:
5675 * Kernel context.
5676 */
5677static int
726b8548 5678qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1da177e4
LT
5679{
5680 int rval;
5681 uint16_t loop_id;
726b8548 5682 fc_port_t *fcport, *new_fcport;
1da177e4
LT
5683 int found;
5684
5685 sw_info_t *swl;
5686 int swl_idx;
5687 int first_dev, last_dev;
1516ef44 5688 port_id_t wrap = {}, nxt_d_id;
e315cd28 5689 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 5690 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
726b8548 5691 unsigned long flags;
1da177e4
LT
5692
5693 rval = QLA_SUCCESS;
5694
5695 /* Try GID_PT to get device list, else GAN. */
7a67735b 5696 if (!ha->swl)
642ef983 5697 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
5698 GFP_KERNEL);
5699 swl = ha->swl;
bbfbbbc1 5700 if (!swl) {
1da177e4 5701 /*EMPTY*/
83548fe2 5702 ql_dbg(ql_dbg_disc, vha, 0x209c,
7c3df132 5703 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 5704 } else {
642ef983 5705 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 5706 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 5707 swl = NULL;
b98ae0d7
QT
5708 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5709 return rval;
e315cd28 5710 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 5711 swl = NULL;
b98ae0d7
QT
5712 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5713 return rval;
e315cd28 5714 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 5715 swl = NULL;
b98ae0d7
QT
5716 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5717 return rval;
726b8548
QT
5718 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5719 swl = NULL;
b98ae0d7
QT
5720 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5721 return rval;
1da177e4 5722 }
e8c72ba5
CD
5723
5724 /* If other queries succeeded probe for FC-4 type */
b98ae0d7 5725 if (swl) {
e8c72ba5 5726 qla2x00_gff_id(vha, swl);
b98ae0d7
QT
5727 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5728 return rval;
5729 }
1da177e4
LT
5730 }
5731 swl_idx = 0;
5732
5733 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 5734 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5735 if (new_fcport == NULL) {
83548fe2 5736 ql_log(ql_log_warn, vha, 0x209d,
7c3df132 5737 "Failed to allocate memory for fcport.\n");
1da177e4
LT
5738 return (QLA_MEMORY_ALLOC_FAILED);
5739 }
5740 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
5741 /* Set start port ID scan at adapter ID. */
5742 first_dev = 1;
5743 last_dev = 0;
5744
5745 /* Starting free loop ID. */
e315cd28
AC
5746 loop_id = ha->min_external_loopid;
5747 for (; loop_id <= ha->max_loop_id; loop_id++) {
5748 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
5749 continue;
5750
3a6478df
GM
5751 if (ha->current_topology == ISP_CFG_FL &&
5752 (atomic_read(&vha->loop_down_timer) ||
5753 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
5754 atomic_set(&vha->loop_down_timer, 0);
5755 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5756 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 5757 break;
bb2d52b2 5758 }
1da177e4
LT
5759
5760 if (swl != NULL) {
5761 if (last_dev) {
5762 wrap.b24 = new_fcport->d_id.b24;
5763 } else {
5764 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5765 memcpy(new_fcport->node_name,
5766 swl[swl_idx].node_name, WWN_SIZE);
5767 memcpy(new_fcport->port_name,
5768 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
5769 memcpy(new_fcport->fabric_port_name,
5770 swl[swl_idx].fabric_port_name, WWN_SIZE);
5771 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 5772 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4 5773
a5d42f4c 5774 new_fcport->nvme_flag = 0;
1a28faa0 5775 new_fcport->fc4f_nvme = 0;
a5d42f4c
DG
5776 if (vha->flags.nvme_enabled &&
5777 swl[swl_idx].fc4f_nvme) {
5778 new_fcport->fc4f_nvme =
5779 swl[swl_idx].fc4f_nvme;
5780 ql_log(ql_log_info, vha, 0x2131,
5781 "FOUND: NVME port %8phC as FC Type 28h\n",
5782 new_fcport->port_name);
5783 }
5784
1da177e4
LT
5785 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5786 last_dev = 1;
5787 }
5788 swl_idx++;
5789 }
5790 } else {
5791 /* Send GA_NXT to the switch */
e315cd28 5792 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 5793 if (rval != QLA_SUCCESS) {
83548fe2 5794 ql_log(ql_log_warn, vha, 0x209e,
7c3df132
SK
5795 "SNS scan failed -- assuming "
5796 "zero-entry result.\n");
1da177e4
LT
5797 rval = QLA_SUCCESS;
5798 break;
5799 }
5800 }
5801
5802 /* If wrap on switch device list, exit. */
5803 if (first_dev) {
5804 wrap.b24 = new_fcport->d_id.b24;
5805 first_dev = 0;
5806 } else if (new_fcport->d_id.b24 == wrap.b24) {
83548fe2 5807 ql_dbg(ql_dbg_disc, vha, 0x209f,
7c3df132
SK
5808 "Device wrap (%02x%02x%02x).\n",
5809 new_fcport->d_id.b.domain,
5810 new_fcport->d_id.b.area,
5811 new_fcport->d_id.b.al_pa);
1da177e4
LT
5812 break;
5813 }
5814
2c3dfe3f 5815 /* Bypass if same physical adapter. */
e315cd28 5816 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
5817 continue;
5818
2c3dfe3f 5819 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
5820 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5821 continue;
2c3dfe3f 5822
f7d289f6
AV
5823 /* Bypass if same domain and area of adapter. */
5824 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 5825 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
5826 ISP_CFG_FL)
5827 continue;
5828
1da177e4
LT
5829 /* Bypass reserved domain fields. */
5830 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5831 continue;
5832
e8c72ba5 5833 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
5834 if (ql2xgffidenable &&
5835 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5836 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
5837 continue;
5838
726b8548
QT
5839 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5840
1da177e4
LT
5841 /* Locate matching device in database. */
5842 found = 0;
e315cd28 5843 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
5844 if (memcmp(new_fcport->port_name, fcport->port_name,
5845 WWN_SIZE))
5846 continue;
5847
827210ba 5848 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 5849
1da177e4
LT
5850 found++;
5851
d8b45213
AV
5852 /* Update port state. */
5853 memcpy(fcport->fabric_port_name,
5854 new_fcport->fabric_port_name, WWN_SIZE);
5855 fcport->fp_speed = new_fcport->fp_speed;
5856
1da177e4 5857 /*
b2032fd5
RD
5858 * If address the same and state FCS_ONLINE
5859 * (or in target mode), nothing changed.
1da177e4
LT
5860 */
5861 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
b2032fd5 5862 (atomic_read(&fcport->state) == FCS_ONLINE ||
726b8548 5863 (vha->host->active_mode == MODE_TARGET))) {
1da177e4
LT
5864 break;
5865 }
5866
5867 /*
5868 * If device was not a fabric device before.
5869 */
5870 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5871 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 5872 qla2x00_clear_loop_id(fcport);
1da177e4
LT
5873 fcport->flags |= (FCF_FABRIC_DEVICE |
5874 FCF_LOGIN_NEEDED);
1da177e4
LT
5875 break;
5876 }
5877
5878 /*
5879 * Port ID changed or device was marked to be updated;
5880 * Log it out if still logged in and mark it for
5881 * relogin later.
5882 */
726b8548 5883 if (qla_tgt_mode_enabled(base_vha)) {
b2032fd5
RD
5884 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5885 "port changed FC ID, %8phC"
5886 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5887 fcport->port_name,
5888 fcport->d_id.b.domain,
5889 fcport->d_id.b.area,
5890 fcport->d_id.b.al_pa,
5891 fcport->loop_id,
5892 new_fcport->d_id.b.domain,
5893 new_fcport->d_id.b.area,
5894 new_fcport->d_id.b.al_pa);
5895 fcport->d_id.b24 = new_fcport->d_id.b24;
5896 break;
5897 }
5898
1da177e4
LT
5899 fcport->d_id.b24 = new_fcport->d_id.b24;
5900 fcport->flags |= FCF_LOGIN_NEEDED;
1da177e4
LT
5901 break;
5902 }
5903
9dd9686b
DT
5904 if (fcport->fc4f_nvme) {
5905 if (fcport->disc_state == DSC_DELETE_PEND) {
5906 fcport->disc_state = DSC_GNL;
5907 vha->fcport_count--;
5908 fcport->login_succ = 0;
5909 }
5910 }
5911
726b8548
QT
5912 if (found) {
5913 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1da177e4 5914 continue;
726b8548 5915 }
1da177e4 5916 /* If device was not in our fcports list, then add it. */
b2032fd5 5917 new_fcport->scan_state = QLA_FCPORT_FOUND;
726b8548
QT
5918 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5919
5920 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5921
1da177e4
LT
5922
5923 /* Allocate a new replacement fcport. */
5924 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 5925 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5926 if (new_fcport == NULL) {
83548fe2 5927 ql_log(ql_log_warn, vha, 0xd032,
7c3df132 5928 "Memory allocation failed for fcport.\n");
1da177e4
LT
5929 return (QLA_MEMORY_ALLOC_FAILED);
5930 }
5931 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5932 new_fcport->d_id.b24 = nxt_d_id.b24;
5933 }
5934
726b8548
QT
5935 qla2x00_free_fcport(new_fcport);
5936
5937 /*
5938 * Logout all previous fabric dev marked lost, except FCP2 devices.
5939 */
5940 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5941 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5942 break;
5943
5944 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5945 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5946 continue;
5947
5948 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5949 if ((qla_dual_mode_enabled(vha) ||
5950 qla_ini_mode_enabled(vha)) &&
5951 atomic_read(&fcport->state) == FCS_ONLINE) {
5952 qla2x00_mark_device_lost(vha, fcport,
5953 ql2xplogiabsentdevice, 0);
5954 if (fcport->loop_id != FC_NO_LOOP_ID &&
5955 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5956 fcport->port_type != FCT_INITIATOR &&
5957 fcport->port_type != FCT_BROADCAST) {
83548fe2 5958 ql_dbg(ql_dbg_disc, vha, 0x20f0,
726b8548
QT
5959 "%s %d %8phC post del sess\n",
5960 __func__, __LINE__,
5961 fcport->port_name);
d8630bb9 5962 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
5963 continue;
5964 }
5965 }
5966 }
1da177e4 5967
726b8548
QT
5968 if (fcport->scan_state == QLA_FCPORT_FOUND)
5969 qla24xx_fcport_handle_login(vha, fcport);
5970 }
1da177e4
LT
5971 return (rval);
5972}
5973
f6602f3b
QT
5974/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
5975int
5976qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
5977{
5978 int loop_id = FC_NO_LOOP_ID;
5979 int lid = NPH_MGMT_SERVER - vha->vp_idx;
5980 unsigned long flags;
5981 struct qla_hw_data *ha = vha->hw;
5982
5983 if (vha->vp_idx == 0) {
5984 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
5985 return NPH_MGMT_SERVER;
5986 }
5987
5988 /* pick id from high and work down to low */
5989 spin_lock_irqsave(&ha->vport_slock, flags);
5990 for (; lid > 0; lid--) {
5991 if (!test_bit(lid, vha->hw->loop_id_map)) {
5992 set_bit(lid, vha->hw->loop_id_map);
5993 loop_id = lid;
5994 break;
5995 }
5996 }
5997 spin_unlock_irqrestore(&ha->vport_slock, flags);
5998
5999 return loop_id;
6000}
6001
1da177e4
LT
6002/*
6003 * qla2x00_fabric_login
6004 * Issue fabric login command.
6005 *
6006 * Input:
6007 * ha = adapter block pointer.
6008 * device = pointer to FC device type structure.
6009 *
6010 * Returns:
6011 * 0 - Login successfully
6012 * 1 - Login failed
6013 * 2 - Initiator device
6014 * 3 - Fatal error
6015 */
6016int
e315cd28 6017qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
6018 uint16_t *next_loopid)
6019{
6020 int rval;
6021 int retry;
6022 uint16_t tmp_loopid;
6023 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 6024 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
6025
6026 retry = 0;
6027 tmp_loopid = 0;
6028
6029 for (;;) {
7c3df132
SK
6030 ql_dbg(ql_dbg_disc, vha, 0x2000,
6031 "Trying Fabric Login w/loop id 0x%04x for port "
6032 "%02x%02x%02x.\n",
6033 fcport->loop_id, fcport->d_id.b.domain,
6034 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
6035
6036 /* Login fcport on switch. */
0b91d116 6037 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
6038 fcport->d_id.b.domain, fcport->d_id.b.area,
6039 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
6040 if (rval != QLA_SUCCESS) {
6041 return rval;
6042 }
1da177e4
LT
6043 if (mb[0] == MBS_PORT_ID_USED) {
6044 /*
6045 * Device has another loop ID. The firmware team
0107109e
AV
6046 * recommends the driver perform an implicit login with
6047 * the specified ID again. The ID we just used is save
6048 * here so we return with an ID that can be tried by
6049 * the next login.
1da177e4
LT
6050 */
6051 retry++;
6052 tmp_loopid = fcport->loop_id;
6053 fcport->loop_id = mb[1];
6054
7c3df132
SK
6055 ql_dbg(ql_dbg_disc, vha, 0x2001,
6056 "Fabric Login: port in use - next loop "
6057 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 6058 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 6059 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
6060
6061 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6062 /*
6063 * Login succeeded.
6064 */
6065 if (retry) {
6066 /* A retry occurred before. */
6067 *next_loopid = tmp_loopid;
6068 } else {
6069 /*
6070 * No retry occurred before. Just increment the
6071 * ID value for next login.
6072 */
6073 *next_loopid = (fcport->loop_id + 1);
6074 }
6075
6076 if (mb[1] & BIT_0) {
6077 fcport->port_type = FCT_INITIATOR;
6078 } else {
6079 fcport->port_type = FCT_TARGET;
6080 if (mb[1] & BIT_1) {
8474f3a0 6081 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
6082 }
6083 }
6084
ad3e0eda
AV
6085 if (mb[10] & BIT_0)
6086 fcport->supported_classes |= FC_COS_CLASS2;
6087 if (mb[10] & BIT_1)
6088 fcport->supported_classes |= FC_COS_CLASS3;
6089
2d70c103
NB
6090 if (IS_FWI2_CAPABLE(ha)) {
6091 if (mb[10] & BIT_7)
6092 fcport->flags |=
6093 FCF_CONF_COMP_SUPPORTED;
6094 }
6095
1da177e4
LT
6096 rval = QLA_SUCCESS;
6097 break;
6098 } else if (mb[0] == MBS_LOOP_ID_USED) {
6099 /*
6100 * Loop ID already used, try next loop ID.
6101 */
6102 fcport->loop_id++;
e315cd28 6103 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
6104 if (rval != QLA_SUCCESS) {
6105 /* Ran out of loop IDs to use */
6106 break;
6107 }
6108 } else if (mb[0] == MBS_COMMAND_ERROR) {
6109 /*
6110 * Firmware possibly timed out during login. If NO
6111 * retries are left to do then the device is declared
6112 * dead.
6113 */
6114 *next_loopid = fcport->loop_id;
e315cd28 6115 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
6116 fcport->d_id.b.domain, fcport->d_id.b.area,
6117 fcport->d_id.b.al_pa);
e315cd28 6118 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
6119
6120 rval = 1;
6121 break;
6122 } else {
6123 /*
6124 * unrecoverable / not handled error
6125 */
7c3df132
SK
6126 ql_dbg(ql_dbg_disc, vha, 0x2002,
6127 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6128 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6129 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6130 fcport->loop_id, jiffies);
1da177e4
LT
6131
6132 *next_loopid = fcport->loop_id;
e315cd28 6133 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
6134 fcport->d_id.b.domain, fcport->d_id.b.area,
6135 fcport->d_id.b.al_pa);
5f16b331 6136 qla2x00_clear_loop_id(fcport);
0eedfcf0 6137 fcport->login_retry = 0;
1da177e4
LT
6138
6139 rval = 3;
6140 break;
6141 }
6142 }
6143
6144 return (rval);
6145}
6146
6147/*
6148 * qla2x00_local_device_login
6149 * Issue local device login command.
6150 *
6151 * Input:
6152 * ha = adapter block pointer.
6153 * loop_id = loop id of device to login to.
6154 *
6155 * Returns (Where's the #define!!!!):
6156 * 0 - Login successfully
6157 * 1 - Login failed
6158 * 3 - Fatal error
6159 */
6160int
e315cd28 6161qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
6162{
6163 int rval;
6164 uint16_t mb[MAILBOX_REGISTER_COUNT];
6165
6166 memset(mb, 0, sizeof(mb));
e315cd28 6167 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
6168 if (rval == QLA_SUCCESS) {
6169 /* Interrogate mailbox registers for any errors */
6170 if (mb[0] == MBS_COMMAND_ERROR)
6171 rval = 1;
6172 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6173 /* device not in PCB table */
6174 rval = 3;
6175 }
6176
6177 return (rval);
6178}
6179
6180/*
6181 * qla2x00_loop_resync
6182 * Resync with fibre channel devices.
6183 *
6184 * Input:
6185 * ha = adapter block pointer.
6186 *
6187 * Returns:
6188 * 0 = success
6189 */
6190int
e315cd28 6191qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 6192{
73208dfd 6193 int rval = QLA_SUCCESS;
1da177e4 6194 uint32_t wait_time;
1da177e4 6195
e315cd28
AC
6196 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6197 if (vha->flags.online) {
6198 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
6199 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6200 wait_time = 256;
6201 do {
8ae6d9c7
GM
6202 if (!IS_QLAFX00(vha->hw)) {
6203 /*
6204 * Issue a marker after FW becomes
6205 * ready.
6206 */
9eb9c6dc
QT
6207 qla2x00_marker(vha, vha->hw->base_qpair,
6208 0, 0, MK_SYNC_ALL);
8ae6d9c7
GM
6209 vha->marker_needed = 0;
6210 }
1da177e4
LT
6211
6212 /* Remap devices on Loop. */
e315cd28 6213 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 6214
8ae6d9c7
GM
6215 if (IS_QLAFX00(vha->hw))
6216 qlafx00_configure_devices(vha);
6217 else
6218 qla2x00_configure_loop(vha);
6219
1da177e4 6220 wait_time--;
e315cd28
AC
6221 } while (!atomic_read(&vha->loop_down_timer) &&
6222 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6223 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6224 &vha->dpc_flags)));
1da177e4 6225 }
1da177e4
LT
6226 }
6227
e315cd28 6228 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 6229 return (QLA_FUNCTION_FAILED);
1da177e4 6230
e315cd28 6231 if (rval)
7c3df132
SK
6232 ql_dbg(ql_dbg_disc, vha, 0x206c,
6233 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
6234
6235 return (rval);
6236}
6237
579d12b5
SK
6238/*
6239* qla2x00_perform_loop_resync
6240* Description: This function will set the appropriate flags and call
6241* qla2x00_loop_resync. If successful loop will be resynced
6242* Arguments : scsi_qla_host_t pointer
6243* returm : Success or Failure
6244*/
6245
6246int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6247{
6248 int32_t rval = 0;
6249
6250 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6251 /*Configure the flags so that resync happens properly*/
6252 atomic_set(&ha->loop_down_timer, 0);
6253 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6254 atomic_set(&ha->loop_state, LOOP_UP);
6255 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6256 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6257 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6258
6259 rval = qla2x00_loop_resync(ha);
6260 } else
6261 atomic_set(&ha->loop_state, LOOP_DEAD);
6262
6263 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6264 }
6265
6266 return rval;
6267}
6268
d97994dc 6269void
67becc00 6270qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc 6271{
6272 fc_port_t *fcport;
feafb7b1
AE
6273 struct scsi_qla_host *vha;
6274 struct qla_hw_data *ha = base_vha->hw;
6275 unsigned long flags;
d97994dc 6276
feafb7b1 6277 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 6278 /* Go with deferred removal of rport references. */
feafb7b1
AE
6279 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
6280 atomic_inc(&vha->vref_count);
6281 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 6282 if (fcport->drport &&
feafb7b1
AE
6283 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6284 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 6285 qla2x00_rport_del(fcport);
df673274 6286
feafb7b1
AE
6287 spin_lock_irqsave(&ha->vport_slock, flags);
6288 }
6289 }
6290 atomic_dec(&vha->vref_count);
c4a9b538 6291 wake_up(&vha->vref_waitq);
feafb7b1
AE
6292 }
6293 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc 6294}
6295
7d613ac6
SV
6296/* Assumes idc_lock always held on entry */
6297void
6298qla83xx_reset_ownership(scsi_qla_host_t *vha)
6299{
6300 struct qla_hw_data *ha = vha->hw;
6301 uint32_t drv_presence, drv_presence_mask;
6302 uint32_t dev_part_info1, dev_part_info2, class_type;
6303 uint32_t class_type_mask = 0x3;
6304 uint16_t fcoe_other_function = 0xffff, i;
6305
7ec0effd
AD
6306 if (IS_QLA8044(ha)) {
6307 drv_presence = qla8044_rd_direct(vha,
6308 QLA8044_CRB_DRV_ACTIVE_INDEX);
6309 dev_part_info1 = qla8044_rd_direct(vha,
6310 QLA8044_CRB_DEV_PART_INFO_INDEX);
6311 dev_part_info2 = qla8044_rd_direct(vha,
6312 QLA8044_CRB_DEV_PART_INFO2);
6313 } else {
6314 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6315 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6316 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6317 }
7d613ac6
SV
6318 for (i = 0; i < 8; i++) {
6319 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6320 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6321 (i != ha->portnum)) {
6322 fcoe_other_function = i;
6323 break;
6324 }
6325 }
6326 if (fcoe_other_function == 0xffff) {
6327 for (i = 0; i < 8; i++) {
6328 class_type = ((dev_part_info2 >> (i * 4)) &
6329 class_type_mask);
6330 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6331 ((i + 8) != ha->portnum)) {
6332 fcoe_other_function = i + 8;
6333 break;
6334 }
6335 }
6336 }
6337 /*
6338 * Prepare drv-presence mask based on fcoe functions present.
6339 * However consider only valid physical fcoe function numbers (0-15).
6340 */
6341 drv_presence_mask = ~((1 << (ha->portnum)) |
6342 ((fcoe_other_function == 0xffff) ?
6343 0 : (1 << (fcoe_other_function))));
6344
6345 /* We are the reset owner iff:
6346 * - No other protocol drivers present.
6347 * - This is the lowest among fcoe functions. */
6348 if (!(drv_presence & drv_presence_mask) &&
6349 (ha->portnum < fcoe_other_function)) {
6350 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6351 "This host is Reset owner.\n");
6352 ha->flags.nic_core_reset_owner = 1;
6353 }
6354}
6355
fa492630 6356static int
7d613ac6
SV
6357__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6358{
6359 int rval = QLA_SUCCESS;
6360 struct qla_hw_data *ha = vha->hw;
6361 uint32_t drv_ack;
6362
6363 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6364 if (rval == QLA_SUCCESS) {
6365 drv_ack |= (1 << ha->portnum);
6366 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6367 }
6368
6369 return rval;
6370}
6371
fa492630 6372static int
7d613ac6
SV
6373__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6374{
6375 int rval = QLA_SUCCESS;
6376 struct qla_hw_data *ha = vha->hw;
6377 uint32_t drv_ack;
6378
6379 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6380 if (rval == QLA_SUCCESS) {
6381 drv_ack &= ~(1 << ha->portnum);
6382 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6383 }
6384
6385 return rval;
6386}
6387
fa492630 6388static const char *
7d613ac6
SV
6389qla83xx_dev_state_to_string(uint32_t dev_state)
6390{
6391 switch (dev_state) {
6392 case QLA8XXX_DEV_COLD:
6393 return "COLD/RE-INIT";
6394 case QLA8XXX_DEV_INITIALIZING:
6395 return "INITIALIZING";
6396 case QLA8XXX_DEV_READY:
6397 return "READY";
6398 case QLA8XXX_DEV_NEED_RESET:
6399 return "NEED RESET";
6400 case QLA8XXX_DEV_NEED_QUIESCENT:
6401 return "NEED QUIESCENT";
6402 case QLA8XXX_DEV_FAILED:
6403 return "FAILED";
6404 case QLA8XXX_DEV_QUIESCENT:
6405 return "QUIESCENT";
6406 default:
6407 return "Unknown";
6408 }
6409}
6410
6411/* Assumes idc-lock always held on entry */
6412void
6413qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6414{
6415 struct qla_hw_data *ha = vha->hw;
6416 uint32_t idc_audit_reg = 0, duration_secs = 0;
6417
6418 switch (audit_type) {
6419 case IDC_AUDIT_TIMESTAMP:
6420 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6421 idc_audit_reg = (ha->portnum) |
6422 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6423 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6424 break;
6425
6426 case IDC_AUDIT_COMPLETION:
6427 duration_secs = ((jiffies_to_msecs(jiffies) -
6428 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6429 idc_audit_reg = (ha->portnum) |
6430 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6431 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6432 break;
6433
6434 default:
6435 ql_log(ql_log_warn, vha, 0xb078,
6436 "Invalid audit type specified.\n");
6437 break;
6438 }
6439}
6440
6441/* Assumes idc_lock always held on entry */
fa492630 6442static int
7d613ac6
SV
6443qla83xx_initiating_reset(scsi_qla_host_t *vha)
6444{
6445 struct qla_hw_data *ha = vha->hw;
6446 uint32_t idc_control, dev_state;
6447
6448 __qla83xx_get_idc_control(vha, &idc_control);
6449 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6450 ql_log(ql_log_info, vha, 0xb080,
6451 "NIC Core reset has been disabled. idc-control=0x%x\n",
6452 idc_control);
6453 return QLA_FUNCTION_FAILED;
6454 }
6455
6456 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6457 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6458 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6459 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6460 QLA8XXX_DEV_NEED_RESET);
6461 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6462 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6463 } else {
6464 const char *state = qla83xx_dev_state_to_string(dev_state);
bd432bb5 6465
7d613ac6
SV
6466 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6467
6468 /* SV: XXX: Is timeout required here? */
6469 /* Wait for IDC state change READY -> NEED_RESET */
6470 while (dev_state == QLA8XXX_DEV_READY) {
6471 qla83xx_idc_unlock(vha, 0);
6472 msleep(200);
6473 qla83xx_idc_lock(vha, 0);
6474 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6475 }
6476 }
6477
6478 /* Send IDC ack by writing to drv-ack register */
6479 __qla83xx_set_drv_ack(vha);
6480
6481 return QLA_SUCCESS;
6482}
6483
6484int
6485__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6486{
6487 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6488}
6489
7d613ac6
SV
6490int
6491__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6492{
6493 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6494}
6495
fa492630 6496static int
7d613ac6
SV
6497qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6498{
6499 uint32_t drv_presence = 0;
6500 struct qla_hw_data *ha = vha->hw;
6501
6502 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6503 if (drv_presence & (1 << ha->portnum))
6504 return QLA_SUCCESS;
6505 else
6506 return QLA_TEST_FAILED;
6507}
6508
6509int
6510qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6511{
6512 int rval = QLA_SUCCESS;
6513 struct qla_hw_data *ha = vha->hw;
6514
6515 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6516 "Entered %s().\n", __func__);
6517
6518 if (vha->device_flags & DFLG_DEV_FAILED) {
6519 ql_log(ql_log_warn, vha, 0xb059,
6520 "Device in unrecoverable FAILED state.\n");
6521 return QLA_FUNCTION_FAILED;
6522 }
6523
6524 qla83xx_idc_lock(vha, 0);
6525
6526 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6527 ql_log(ql_log_warn, vha, 0xb05a,
6528 "Function=0x%x has been removed from IDC participation.\n",
6529 ha->portnum);
6530 rval = QLA_FUNCTION_FAILED;
6531 goto exit;
6532 }
6533
6534 qla83xx_reset_ownership(vha);
6535
6536 rval = qla83xx_initiating_reset(vha);
6537
6538 /*
6539 * Perform reset if we are the reset-owner,
6540 * else wait till IDC state changes to READY/FAILED.
6541 */
6542 if (rval == QLA_SUCCESS) {
6543 rval = qla83xx_idc_state_handler(vha);
6544
6545 if (rval == QLA_SUCCESS)
6546 ha->flags.nic_core_hung = 0;
6547 __qla83xx_clear_drv_ack(vha);
6548 }
6549
6550exit:
6551 qla83xx_idc_unlock(vha, 0);
6552
6553 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6554
6555 return rval;
6556}
6557
81178772
SK
6558int
6559qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6560{
6561 struct qla_hw_data *ha = vha->hw;
6562 int rval = QLA_FUNCTION_FAILED;
6563
6564 if (!IS_MCTP_CAPABLE(ha)) {
6565 /* This message can be removed from the final version */
6566 ql_log(ql_log_info, vha, 0x506d,
6567 "This board is not MCTP capable\n");
6568 return rval;
6569 }
6570
6571 if (!ha->mctp_dump) {
6572 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6573 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6574
6575 if (!ha->mctp_dump) {
6576 ql_log(ql_log_warn, vha, 0x506e,
6577 "Failed to allocate memory for mctp dump\n");
6578 return rval;
6579 }
6580 }
6581
6582#define MCTP_DUMP_STR_ADDR 0x00000000
6583 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6584 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6585 if (rval != QLA_SUCCESS) {
6586 ql_log(ql_log_warn, vha, 0x506f,
6587 "Failed to capture mctp dump\n");
6588 } else {
6589 ql_log(ql_log_info, vha, 0x5070,
6590 "Mctp dump capture for host (%ld/%p).\n",
6591 vha->host_no, ha->mctp_dump);
6592 ha->mctp_dumped = 1;
6593 }
6594
409ee0fe 6595 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
6596 ha->flags.nic_core_reset_hdlr_active = 1;
6597 rval = qla83xx_restart_nic_firmware(vha);
6598 if (rval)
6599 /* NIC Core reset failed. */
6600 ql_log(ql_log_warn, vha, 0x5071,
6601 "Failed to restart nic firmware\n");
6602 else
6603 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6604 "Restarted NIC firmware successfully.\n");
6605 ha->flags.nic_core_reset_hdlr_active = 0;
6606 }
6607
6608 return rval;
6609
6610}
6611
579d12b5 6612/*
8fcd6b8b 6613* qla2x00_quiesce_io
579d12b5
SK
6614* Description: This function will block the new I/Os
6615* Its not aborting any I/Os as context
6616* is not destroyed during quiescence
6617* Arguments: scsi_qla_host_t
6618* return : void
6619*/
6620void
8fcd6b8b 6621qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
6622{
6623 struct qla_hw_data *ha = vha->hw;
6624 struct scsi_qla_host *vp;
6625
8fcd6b8b
CD
6626 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6627 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
6628
6629 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6630 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6631 atomic_set(&vha->loop_state, LOOP_DOWN);
6632 qla2x00_mark_all_devices_lost(vha, 0);
6633 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 6634 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
6635 } else {
6636 if (!atomic_read(&vha->loop_down_timer))
6637 atomic_set(&vha->loop_down_timer,
6638 LOOP_DOWN_TIME);
6639 }
6640 /* Wait for pending cmds to complete */
6641 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
6642}
6643
a9083016
GM
6644void
6645qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6646{
6647 struct qla_hw_data *ha = vha->hw;
579d12b5 6648 struct scsi_qla_host *vp;
feafb7b1 6649 unsigned long flags;
6aef87be 6650 fc_port_t *fcport;
7c3f8fd1 6651 u16 i;
a9083016 6652
e46ef004
SK
6653 /* For ISP82XX, driver waits for completion of the commands.
6654 * online flag should be set.
6655 */
7ec0effd 6656 if (!(IS_P3P_TYPE(ha)))
e46ef004 6657 vha->flags.online = 0;
a9083016
GM
6658 ha->flags.chip_reset_done = 0;
6659 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 6660 vha->qla_stats.total_isp_aborts++;
a9083016 6661
7c3df132
SK
6662 ql_log(ql_log_info, vha, 0x00af,
6663 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 6664
b2000805 6665 ha->flags.purge_mbox = 1;
e46ef004
SK
6666 /* For ISP82XX, reset_chip is just disabling interrupts.
6667 * Driver waits for the completion of the commands.
6668 * the interrupts need to be enabled.
6669 */
7ec0effd 6670 if (!(IS_P3P_TYPE(ha)))
a9083016
GM
6671 ha->isp_ops->reset_chip(vha);
6672
5d74c87a 6673 ha->link_data_rate = PORT_SPEED_UNKNOWN;
9cd883f0
QT
6674 SAVE_TOPO(ha);
6675 ha->flags.rida_fmt2 = 0;
ec7193e2
QT
6676 ha->flags.n2n_ae = 0;
6677 ha->flags.lip_ae = 0;
6678 ha->current_topology = 0;
6679 ha->flags.fw_started = 0;
6680 ha->flags.fw_init_done = 0;
b2000805
QT
6681 ha->chip_reset++;
6682 ha->base_qpair->chip_reset = ha->chip_reset;
7c3f8fd1
QT
6683 for (i = 0; i < ha->max_qpairs; i++) {
6684 if (ha->queue_pair_map[i])
6685 ha->queue_pair_map[i]->chip_reset =
6686 ha->base_qpair->chip_reset;
6687 }
726b8548 6688
b2000805
QT
6689 /* purge MBox commands */
6690 if (atomic_read(&ha->num_pend_mbx_stage3)) {
6691 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
6692 complete(&ha->mbx_intr_comp);
6693 }
6694
6695 i = 0;
6696 while (atomic_read(&ha->num_pend_mbx_stage3) ||
6697 atomic_read(&ha->num_pend_mbx_stage2) ||
6698 atomic_read(&ha->num_pend_mbx_stage1)) {
6699 msleep(20);
6700 i++;
6701 if (i > 50)
6702 break;
6703 }
6704 ha->flags.purge_mbox = 0;
6705
a9083016
GM
6706 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6707 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6708 atomic_set(&vha->loop_state, LOOP_DOWN);
6709 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
6710
6711 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 6712 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
6713 atomic_inc(&vp->vref_count);
6714 spin_unlock_irqrestore(&ha->vport_slock, flags);
6715
a9083016 6716 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
6717
6718 spin_lock_irqsave(&ha->vport_slock, flags);
6719 atomic_dec(&vp->vref_count);
6720 }
6721 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
6722 } else {
6723 if (!atomic_read(&vha->loop_down_timer))
6724 atomic_set(&vha->loop_down_timer,
6725 LOOP_DOWN_TIME);
6726 }
6727
6aef87be 6728 /* Clear all async request states across all VPs. */
8b5292bc 6729 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6aef87be 6730 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
8b5292bc
QT
6731 fcport->scan_state = 0;
6732 }
6aef87be
AV
6733 spin_lock_irqsave(&ha->vport_slock, flags);
6734 list_for_each_entry(vp, &ha->vp_list, list) {
6735 atomic_inc(&vp->vref_count);
6736 spin_unlock_irqrestore(&ha->vport_slock, flags);
6737
6738 list_for_each_entry(fcport, &vp->vp_fcports, list)
6739 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6740
6741 spin_lock_irqsave(&ha->vport_slock, flags);
6742 atomic_dec(&vp->vref_count);
6743 }
6744 spin_unlock_irqrestore(&ha->vport_slock, flags);
6745
bddd2d65
LC
6746 if (!ha->flags.eeh_busy) {
6747 /* Make sure for ISP 82XX IO DMA is complete */
7ec0effd 6748 if (IS_P3P_TYPE(ha)) {
7190575f 6749 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
6750 ql_log(ql_log_info, vha, 0x00b4,
6751 "Done chip reset cleanup.\n");
a9083016 6752
e46ef004
SK
6753 /* Done waiting for pending commands.
6754 * Reset the online flag.
6755 */
6756 vha->flags.online = 0;
4d78c973 6757 }
a9083016 6758
bddd2d65
LC
6759 /* Requeue all commands in outstanding command list. */
6760 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6761 }
b6a029e1
AE
6762 /* memory barrier */
6763 wmb();
a9083016
GM
6764}
6765
1da177e4
LT
6766/*
6767* qla2x00_abort_isp
6768* Resets ISP and aborts all outstanding commands.
6769*
6770* Input:
6771* ha = adapter block pointer.
6772*
6773* Returns:
6774* 0 = success
6775*/
6776int
e315cd28 6777qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 6778{
476e8978 6779 int rval;
1da177e4 6780 uint8_t status = 0;
e315cd28
AC
6781 struct qla_hw_data *ha = vha->hw;
6782 struct scsi_qla_host *vp;
73208dfd 6783 struct req_que *req = ha->req_q_map[0];
feafb7b1 6784 unsigned long flags;
1da177e4 6785
e315cd28 6786 if (vha->flags.online) {
a9083016 6787 qla2x00_abort_isp_cleanup(vha);
1da177e4 6788
3f006ac3
MH
6789 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
6790 ha->flags.chip_reset_done = 1;
6791 vha->flags.online = 1;
6792 status = 0;
6793 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6794 return status;
6795 }
6796
a6171297
SV
6797 if (IS_QLA8031(ha)) {
6798 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6799 "Clearing fcoe driver presence.\n");
6800 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6801 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6802 "Error while clearing DRV-Presence.\n");
6803 }
6804
85880801
AV
6805 if (unlikely(pci_channel_offline(ha->pdev) &&
6806 ha->flags.pci_channel_io_perm_failure)) {
6807 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6808 status = 0;
6809 return status;
6810 }
6811
0645cb83
QT
6812 switch (vha->qlini_mode) {
6813 case QLA2XXX_INI_MODE_DISABLED:
6814 if (!qla_tgt_mode_enabled(vha))
6815 return 0;
6816 break;
6817 case QLA2XXX_INI_MODE_DUAL:
6818 if (!qla_dual_mode_enabled(vha))
6819 return 0;
6820 break;
6821 case QLA2XXX_INI_MODE_ENABLED:
6822 default:
6823 break;
6824 }
6825
73208dfd 6826 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 6827
e315cd28 6828 ha->isp_ops->nvram_config(vha);
1da177e4 6829
e315cd28
AC
6830 if (!qla2x00_restart_isp(vha)) {
6831 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 6832
e315cd28 6833 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
6834 /*
6835 * Issue marker command only when we are going
6836 * to start the I/O .
6837 */
e315cd28 6838 vha->marker_needed = 1;
1da177e4
LT
6839 }
6840
e315cd28 6841 vha->flags.online = 1;
1da177e4 6842
fd34f556 6843 ha->isp_ops->enable_intrs(ha);
1da177e4 6844
fa2a1ce5 6845 ha->isp_abort_cnt = 0;
e315cd28 6846 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 6847
6246b8a1
GM
6848 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6849 qla2x00_get_fw_version(vha);
df613b96
AV
6850 if (ha->fce) {
6851 ha->flags.fce_enabled = 1;
6852 memset(ha->fce, 0,
6853 fce_calc_size(ha->fce_bufs));
e315cd28 6854 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
6855 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6856 &ha->fce_bufs);
6857 if (rval) {
7c3df132 6858 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
6859 "Unable to reinitialize FCE "
6860 "(%d).\n", rval);
6861 ha->flags.fce_enabled = 0;
6862 }
6863 }
436a7b11
AV
6864
6865 if (ha->eft) {
6866 memset(ha->eft, 0, EFT_SIZE);
e315cd28 6867 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
6868 ha->eft_dma, EFT_NUM_BUFFERS);
6869 if (rval) {
7c3df132 6870 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
6871 "Unable to reinitialize EFT "
6872 "(%d).\n", rval);
6873 }
6874 }
1da177e4 6875 } else { /* failed the ISP abort */
e315cd28
AC
6876 vha->flags.online = 1;
6877 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 6878 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
6879 ql_log(ql_log_fatal, vha, 0x8035,
6880 "ISP error recover failed - "
6881 "board disabled.\n");
fa2a1ce5 6882 /*
1da177e4
LT
6883 * The next call disables the board
6884 * completely.
6885 */
1e4ac5d6 6886 qla2x00_abort_isp_cleanup(vha);
e315cd28 6887 vha->flags.online = 0;
1da177e4 6888 clear_bit(ISP_ABORT_RETRY,
e315cd28 6889 &vha->dpc_flags);
1da177e4
LT
6890 status = 0;
6891 } else { /* schedule another ISP abort */
6892 ha->isp_abort_cnt--;
7c3df132
SK
6893 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6894 "ISP abort - retry remaining %d.\n",
6895 ha->isp_abort_cnt);
1da177e4
LT
6896 status = 1;
6897 }
6898 } else {
6899 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
6900 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6901 "ISP error recovery - retrying (%d) "
6902 "more times.\n", ha->isp_abort_cnt);
e315cd28 6903 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
6904 status = 1;
6905 }
6906 }
fa2a1ce5 6907
1da177e4
LT
6908 }
6909
e315cd28 6910 if (!status) {
7c3df132 6911 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
1608cc4a 6912 qla2x00_configure_hba(vha);
feafb7b1
AE
6913 spin_lock_irqsave(&ha->vport_slock, flags);
6914 list_for_each_entry(vp, &ha->vp_list, list) {
6915 if (vp->vp_idx) {
6916 atomic_inc(&vp->vref_count);
6917 spin_unlock_irqrestore(&ha->vport_slock, flags);
6918
e315cd28 6919 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
6920
6921 spin_lock_irqsave(&ha->vport_slock, flags);
6922 atomic_dec(&vp->vref_count);
6923 }
e315cd28 6924 }
feafb7b1
AE
6925 spin_unlock_irqrestore(&ha->vport_slock, flags);
6926
7d613ac6
SV
6927 if (IS_QLA8031(ha)) {
6928 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6929 "Setting back fcoe driver presence.\n");
6930 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6931 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6932 "Error while setting DRV-Presence.\n");
6933 }
e315cd28 6934 } else {
d8424f68
JP
6935 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6936 __func__);
1da177e4
LT
6937 }
6938
6939 return(status);
6940}
6941
6942/*
6943* qla2x00_restart_isp
6944* restarts the ISP after a reset
6945*
6946* Input:
6947* ha = adapter block pointer.
6948*
6949* Returns:
6950* 0 = success
6951*/
6952static int
e315cd28 6953qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 6954{
c6b2fca8 6955 int status = 0;
e315cd28 6956 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
6957
6958 /* If firmware needs to be loaded */
e315cd28
AC
6959 if (qla2x00_isp_firmware(vha)) {
6960 vha->flags.online = 0;
6961 status = ha->isp_ops->chip_diag(vha);
6962 if (!status)
6963 status = qla2x00_setup_chip(vha);
1da177e4
LT
6964 }
6965
e315cd28
AC
6966 if (!status && !(status = qla2x00_init_rings(vha))) {
6967 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 6968 ha->flags.chip_reset_done = 1;
7108b76e 6969
73208dfd
AC
6970 /* Initialize the queues in use */
6971 qla25xx_init_queues(ha);
6972
e315cd28
AC
6973 status = qla2x00_fw_ready(vha);
6974 if (!status) {
0107109e 6975 /* Issue a marker after FW becomes ready. */
9eb9c6dc 6976 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7108b76e 6977 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
6978 }
6979
6980 /* if no cable then assume it's good */
e315cd28 6981 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4 6982 status = 0;
1da177e4
LT
6983 }
6984 return (status);
6985}
6986
73208dfd
AC
6987static int
6988qla25xx_init_queues(struct qla_hw_data *ha)
6989{
6990 struct rsp_que *rsp = NULL;
6991 struct req_que *req = NULL;
6992 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6993 int ret = -1;
6994 int i;
6995
2afa19a9 6996 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd 6997 rsp = ha->rsp_q_map[i];
cb43285f 6998 if (rsp && test_bit(i, ha->rsp_qid_map)) {
73208dfd 6999 rsp->options &= ~BIT_0;
618a7523 7000 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 7001 if (ret != QLA_SUCCESS)
7c3df132
SK
7002 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7003 "%s Rsp que: %d init failed.\n",
7004 __func__, rsp->id);
73208dfd 7005 else
7c3df132
SK
7006 ql_dbg(ql_dbg_init, base_vha, 0x0100,
7007 "%s Rsp que: %d inited.\n",
7008 __func__, rsp->id);
73208dfd 7009 }
2afa19a9
AC
7010 }
7011 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd 7012 req = ha->req_q_map[i];
cb43285f
QT
7013 if (req && test_bit(i, ha->req_qid_map)) {
7014 /* Clear outstanding commands array. */
73208dfd 7015 req->options &= ~BIT_0;
618a7523 7016 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 7017 if (ret != QLA_SUCCESS)
7c3df132
SK
7018 ql_dbg(ql_dbg_init, base_vha, 0x0101,
7019 "%s Req que: %d init failed.\n",
7020 __func__, req->id);
73208dfd 7021 else
7c3df132
SK
7022 ql_dbg(ql_dbg_init, base_vha, 0x0102,
7023 "%s Req que: %d inited.\n",
7024 __func__, req->id);
73208dfd
AC
7025 }
7026 }
7027 return ret;
7028}
7029
1da177e4
LT
7030/*
7031* qla2x00_reset_adapter
7032* Reset adapter.
7033*
7034* Input:
7035* ha = adapter block pointer.
7036*/
3f006ac3 7037int
e315cd28 7038qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
7039{
7040 unsigned long flags = 0;
e315cd28 7041 struct qla_hw_data *ha = vha->hw;
3d71644c 7042 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 7043
e315cd28 7044 vha->flags.online = 0;
fd34f556 7045 ha->isp_ops->disable_intrs(ha);
1da177e4 7046
1da177e4
LT
7047 spin_lock_irqsave(&ha->hardware_lock, flags);
7048 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
7049 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
7050 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
7051 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
7052 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3f006ac3
MH
7053
7054 return QLA_SUCCESS;
1da177e4 7055}
0107109e 7056
3f006ac3 7057int
e315cd28 7058qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
7059{
7060 unsigned long flags = 0;
e315cd28 7061 struct qla_hw_data *ha = vha->hw;
0107109e 7062 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3f006ac3 7063 int rval = QLA_SUCCESS;
0107109e 7064
7ec0effd 7065 if (IS_P3P_TYPE(ha))
3f006ac3 7066 return rval;
a9083016 7067
e315cd28 7068 vha->flags.online = 0;
fd34f556 7069 ha->isp_ops->disable_intrs(ha);
0107109e
AV
7070
7071 spin_lock_irqsave(&ha->hardware_lock, flags);
7072 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
7073 RD_REG_DWORD(&reg->hccr);
7074 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
7075 RD_REG_DWORD(&reg->hccr);
7076 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
7077
7078 if (IS_NOPOLLING_TYPE(ha))
7079 ha->isp_ops->enable_intrs(ha);
3f006ac3
MH
7080
7081 return rval;
0107109e
AV
7082}
7083
4e08df3f
DM
7084/* On sparc systems, obtain port and node WWN from firmware
7085 * properties.
7086 */
e315cd28
AC
7087static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7088 struct nvram_24xx *nv)
4e08df3f
DM
7089{
7090#ifdef CONFIG_SPARC
e315cd28 7091 struct qla_hw_data *ha = vha->hw;
4e08df3f 7092 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
7093 struct device_node *dp = pci_device_to_OF_node(pdev);
7094 const u8 *val;
4e08df3f
DM
7095 int len;
7096
7097 val = of_get_property(dp, "port-wwn", &len);
7098 if (val && len >= WWN_SIZE)
7099 memcpy(nv->port_name, val, WWN_SIZE);
7100
7101 val = of_get_property(dp, "node-wwn", &len);
7102 if (val && len >= WWN_SIZE)
7103 memcpy(nv->node_name, val, WWN_SIZE);
7104#endif
7105}
7106
0107109e 7107int
e315cd28 7108qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 7109{
4e08df3f 7110 int rval;
0107109e
AV
7111 struct init_cb_24xx *icb;
7112 struct nvram_24xx *nv;
7113 uint32_t *dptr;
7114 uint8_t *dptr1, *dptr2;
7115 uint32_t chksum;
7116 uint16_t cnt;
e315cd28 7117 struct qla_hw_data *ha = vha->hw;
0107109e 7118
4e08df3f 7119 rval = QLA_SUCCESS;
0107109e 7120 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 7121 nv = ha->nvram;
0107109e
AV
7122
7123 /* Determine NVRAM starting address. */
f73cb695 7124 if (ha->port_no == 0) {
e5b68a61
AC
7125 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7126 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7127 } else {
0107109e 7128 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790 7129 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7130 }
f73cb695 7131
f8f97b0c 7132 ha->nvram_size = sizeof(*nv);
e5b68a61 7133 ha->vpd_size = FA_NVRAM_VPD_SIZE;
0107109e 7134
281afe19
SJ
7135 /* Get VPD data into cache */
7136 ha->vpd = ha->nvram + VPD_OFFSET;
3695310e 7137 ha->isp_ops->read_nvram(vha, ha->vpd,
281afe19
SJ
7138 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7139
7140 /* Get NVRAM data into cache and calculate checksum. */
0107109e 7141 dptr = (uint32_t *)nv;
3695310e 7142 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
da08ef5c
JC
7143 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7144 chksum += le32_to_cpu(*dptr);
0107109e 7145
7c3df132
SK
7146 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7147 "Contents of NVRAM\n");
7148 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
f8f97b0c 7149 nv, ha->nvram_size);
0107109e
AV
7150
7151 /* Bad NVRAM data, set defaults parameters. */
a28d9e4e
JC
7152 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7153 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
0107109e 7154 /* Reset NVRAM data. */
7c3df132 7155 ql_log(ql_log_warn, vha, 0x006b,
3695310e
JC
7156 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7157 chksum, nv->id, nv->nvram_version);
7158 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7c3df132
SK
7159 ql_log(ql_log_warn, vha, 0x006c,
7160 "Falling back to functioning (yet invalid -- WWPN) "
7161 "defaults.\n");
4e08df3f
DM
7162
7163 /*
7164 * Set default initialization control block.
7165 */
7166 memset(nv, 0, ha->nvram_size);
ad950360
BVA
7167 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7168 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 7169 nv->frame_payload_size = 2048;
ad950360
BVA
7170 nv->execution_throttle = cpu_to_le16(0xFFFF);
7171 nv->exchange_count = cpu_to_le16(0);
7172 nv->hard_address = cpu_to_le16(124);
4e08df3f 7173 nv->port_name[0] = 0x21;
f73cb695 7174 nv->port_name[1] = 0x00 + ha->port_no + 1;
4e08df3f
DM
7175 nv->port_name[2] = 0x00;
7176 nv->port_name[3] = 0xe0;
7177 nv->port_name[4] = 0x8b;
7178 nv->port_name[5] = 0x1c;
7179 nv->port_name[6] = 0x55;
7180 nv->port_name[7] = 0x86;
7181 nv->node_name[0] = 0x20;
7182 nv->node_name[1] = 0x00;
7183 nv->node_name[2] = 0x00;
7184 nv->node_name[3] = 0xe0;
7185 nv->node_name[4] = 0x8b;
7186 nv->node_name[5] = 0x1c;
7187 nv->node_name[6] = 0x55;
7188 nv->node_name[7] = 0x86;
e315cd28 7189 qla24xx_nvram_wwn_from_ofw(vha, nv);
ad950360
BVA
7190 nv->login_retry_count = cpu_to_le16(8);
7191 nv->interrupt_delay_timer = cpu_to_le16(0);
7192 nv->login_timeout = cpu_to_le16(0);
4e08df3f 7193 nv->firmware_options_1 =
ad950360
BVA
7194 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7195 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7196 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7197 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7198 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7199 nv->efi_parameters = cpu_to_le32(0);
4e08df3f 7200 nv->reset_delay = 5;
ad950360
BVA
7201 nv->max_luns_per_target = cpu_to_le16(128);
7202 nv->port_down_retry_count = cpu_to_le16(30);
7203 nv->link_down_timeout = cpu_to_le16(30);
4e08df3f
DM
7204
7205 rval = 1;
0107109e
AV
7206 }
7207
726b8548 7208 if (qla_tgt_mode_enabled(vha)) {
2d70c103 7209 /* Don't enable full login after initial LIP */
ad950360 7210 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
2d70c103 7211 /* Don't enable LIP full login for initiator */
ad950360 7212 nv->host_p &= cpu_to_le32(~BIT_10);
2d70c103
NB
7213 }
7214
7215 qlt_24xx_config_nvram_stage1(vha, nv);
7216
0107109e 7217 /* Reset Initialization control block */
e315cd28 7218 memset(icb, 0, ha->init_cb_size);
0107109e
AV
7219
7220 /* Copy 1st segment. */
7221 dptr1 = (uint8_t *)icb;
7222 dptr2 = (uint8_t *)&nv->version;
7223 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7224 while (cnt--)
7225 *dptr1++ = *dptr2++;
7226
7227 icb->login_retry_count = nv->login_retry_count;
3ea66e28 7228 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
7229
7230 /* Copy 2nd segment. */
7231 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7232 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7233 cnt = (uint8_t *)&icb->reserved_3 -
7234 (uint8_t *)&icb->interrupt_delay_timer;
7235 while (cnt--)
7236 *dptr1++ = *dptr2++;
0eaaca4c 7237 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
0107109e
AV
7238 /*
7239 * Setup driver NVRAM options.
7240 */
e315cd28 7241 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 7242 "QLA2462");
0107109e 7243
2d70c103
NB
7244 qlt_24xx_config_nvram_stage2(vha, icb);
7245
ad950360 7246 if (nv->host_p & cpu_to_le32(BIT_15)) {
2d70c103 7247 /* Use alternate WWN? */
5341e868
AV
7248 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7249 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7250 }
7251
0107109e 7252 /* Prepare nodename */
ad950360 7253 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
7254 /*
7255 * Firmware will apply the following mask if the nodename was
7256 * not provided.
7257 */
7258 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7259 icb->node_name[0] &= 0xF0;
7260 }
7261
7262 /* Set host adapter parameters. */
7263 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
7264 ha->flags.enable_lip_reset = 0;
7265 ha->flags.enable_lip_full_login =
58e2753c 7266 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
0c8c39af 7267 ha->flags.enable_target_reset =
58e2753c 7268 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
0107109e 7269 ha->flags.enable_led_scheme = 0;
58e2753c 7270 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
0107109e 7271
fd0e7e4d
AV
7272 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7273 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
7274
7275 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7276 sizeof(ha->fw_seriallink_options24));
7277
7278 /* save HBA serial number */
7279 ha->serial0 = icb->port_name[5];
7280 ha->serial1 = icb->port_name[6];
7281 ha->serial2 = icb->port_name[7];
e315cd28
AC
7282 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7283 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 7284
ad950360 7285 icb->execution_throttle = cpu_to_le16(0xFFFF);
bc8fb3cb 7286
0107109e
AV
7287 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7288
7289 /* Set minimum login_timeout to 4 seconds. */
7290 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7291 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7292 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 7293 nv->login_timeout = cpu_to_le16(4);
0107109e 7294 ha->login_timeout = le16_to_cpu(nv->login_timeout);
0107109e 7295
00a537b8
AV
7296 /* Set minimum RATOV to 100 tenths of a second. */
7297 ha->r_a_tov = 100;
0107109e
AV
7298
7299 ha->loop_reset_delay = nv->reset_delay;
7300
7301 /* Link Down Timeout = 0:
7302 *
7303 * When Port Down timer expires we will start returning
7304 * I/O's to OS with "DID_NO_CONNECT".
7305 *
7306 * Link Down Timeout != 0:
7307 *
7308 * The driver waits for the link to come up after link down
7309 * before returning I/Os to OS with "DID_NO_CONNECT".
7310 */
7311 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7312 ha->loop_down_abort_time =
7313 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7314 } else {
7315 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7316 ha->loop_down_abort_time =
7317 (LOOP_DOWN_TIME - ha->link_down_timeout);
7318 }
7319
7320 /* Need enough time to try and get the port back. */
7321 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7322 if (qlport_down_retry)
7323 ha->port_down_retry_count = qlport_down_retry;
7324
7325 /* Set login_retry_count */
7326 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7327 if (ha->port_down_retry_count ==
7328 le16_to_cpu(nv->port_down_retry_count) &&
7329 ha->port_down_retry_count > 3)
7330 ha->login_retry_count = ha->port_down_retry_count;
7331 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7332 ha->login_retry_count = ha->port_down_retry_count;
7333 if (ql2xloginretrycount)
7334 ha->login_retry_count = ql2xloginretrycount;
7335
8777e431
QT
7336 /* N2N: driver will initiate Login instead of FW */
7337 icb->firmware_options_3 |= BIT_8;
7338
4fdfefe5 7339 /* Enable ZIO. */
e315cd28 7340 if (!vha->flags.init_done) {
4fdfefe5
AV
7341 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7342 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7343 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
58e2753c 7344 le16_to_cpu(icb->interrupt_delay_timer) : 2;
4fdfefe5 7345 }
ad950360 7346 icb->firmware_options_2 &= cpu_to_le32(
4fdfefe5 7347 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4fdfefe5 7348 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 7349 ha->zio_mode = QLA_ZIO_MODE_6;
7350
7c3df132 7351 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
7352 "ZIO mode %d enabled; timer delay (%d us).\n",
7353 ha->zio_mode, ha->zio_timer * 100);
7354
7355 icb->firmware_options_2 |= cpu_to_le32(
7356 (uint32_t)ha->zio_mode);
7357 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4fdfefe5
AV
7358 }
7359
4e08df3f 7360 if (rval) {
7c3df132
SK
7361 ql_log(ql_log_warn, vha, 0x0070,
7362 "NVRAM configuration failed.\n");
4e08df3f
DM
7363 }
7364 return (rval);
0107109e
AV
7365}
7366
5fa8774c
JC
7367static void
7368qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7369 struct qla27xx_image_status *image_status)
7370{
7371 ql_dbg(ql_dbg_init, vha, 0x018b,
7372 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7373 name, "status",
7374 image_status->image_status_mask,
7375 le16_to_cpu(image_status->generation),
7376 image_status->ver_major,
7377 image_status->ver_minor,
7378 image_status->bitmap,
7379 le32_to_cpu(image_status->checksum),
7380 le32_to_cpu(image_status->signature));
7381}
7382
7383static bool
7384qla28xx_check_aux_image_status_signature(
7385 struct qla27xx_image_status *image_status)
7386{
7387 ulong signature = le32_to_cpu(image_status->signature);
7388
7389 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7390}
7391
7392static bool
7393qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7394{
7395 ulong signature = le32_to_cpu(image_status->signature);
7396
7397 return
7398 signature != QLA27XX_IMG_STATUS_SIGN &&
7399 signature != QLA28XX_IMG_STATUS_SIGN;
7400}
7401
7402static ulong
7403qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7404{
7405 uint32_t *p = (void *)image_status;
7406 uint n = sizeof(*image_status) / sizeof(*p);
7407 uint32_t sum = 0;
7408
7409 for ( ; n--; p++)
7410 sum += le32_to_cpup(p);
7411
7412 return sum;
7413}
7414
7415static inline uint
7416qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7417{
7418 return aux->bitmap & bitmask ?
7419 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7420}
7421
7422static void
7423qla28xx_component_status(
7424 struct active_regions *active_regions, struct qla27xx_image_status *aux)
7425{
7426 active_regions->aux.board_config =
7427 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7428
7429 active_regions->aux.vpd_nvram =
7430 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7431
7432 active_regions->aux.npiv_config_0_1 =
7433 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7434
7435 active_regions->aux.npiv_config_2_3 =
7436 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7437}
7438
7439static int
7440qla27xx_compare_image_generation(
7441 struct qla27xx_image_status *pri_image_status,
7442 struct qla27xx_image_status *sec_image_status)
7443{
7444 /* calculate generation delta as uint16 (this accounts for wrap) */
7445 int16_t delta =
7446 le16_to_cpu(pri_image_status->generation) -
7447 le16_to_cpu(sec_image_status->generation);
7448
7449 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7450
7451 return delta;
7452}
7453
7454void
7455qla28xx_get_aux_images(
7456 struct scsi_qla_host *vha, struct active_regions *active_regions)
4243c115 7457{
4243c115 7458 struct qla_hw_data *ha = vha->hw;
5fa8774c
JC
7459 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7460 bool valid_pri_image = false, valid_sec_image = false;
7461 bool active_pri_image = false, active_sec_image = false;
7462
7463 if (!ha->flt_region_aux_img_status_pri) {
7464 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7465 goto check_sec_image;
7466 }
7467
7468 qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
7469 ha->flt_region_aux_img_status_pri,
7470 sizeof(pri_aux_image_status) >> 2);
7471 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7472
7473 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7474 ql_dbg(ql_dbg_init, vha, 0x018b,
7475 "Primary aux image signature (%#x) not valid\n",
7476 le32_to_cpu(pri_aux_image_status.signature));
7477 goto check_sec_image;
7478 }
7479
7480 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7481 ql_dbg(ql_dbg_init, vha, 0x018c,
7482 "Primary aux image checksum failed\n");
7483 goto check_sec_image;
7484 }
7485
7486 valid_pri_image = true;
7487
7488 if (pri_aux_image_status.image_status_mask & 1) {
7489 ql_dbg(ql_dbg_init, vha, 0x018d,
7490 "Primary aux image is active\n");
7491 active_pri_image = true;
7492 }
7493
7494check_sec_image:
7495 if (!ha->flt_region_aux_img_status_sec) {
7496 ql_dbg(ql_dbg_init, vha, 0x018a,
7497 "Secondary aux image not addressed\n");
7498 goto check_valid_image;
7499 }
7500
7501 qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
7502 ha->flt_region_aux_img_status_sec,
7503 sizeof(sec_aux_image_status) >> 2);
7504 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
7505
7506 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
7507 ql_dbg(ql_dbg_init, vha, 0x018b,
7508 "Secondary aux image signature (%#x) not valid\n",
7509 le32_to_cpu(sec_aux_image_status.signature));
7510 goto check_valid_image;
7511 }
7512
7513 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
7514 ql_dbg(ql_dbg_init, vha, 0x018c,
7515 "Secondary aux image checksum failed\n");
7516 goto check_valid_image;
7517 }
4243c115 7518
5fa8774c
JC
7519 valid_sec_image = true;
7520
7521 if (sec_aux_image_status.image_status_mask & 1) {
7522 ql_dbg(ql_dbg_init, vha, 0x018d,
7523 "Secondary aux image is active\n");
7524 active_sec_image = true;
7525 }
7526
7527check_valid_image:
7528 if (valid_pri_image && active_pri_image &&
7529 valid_sec_image && active_sec_image) {
7530 if (qla27xx_compare_image_generation(&pri_aux_image_status,
7531 &sec_aux_image_status) >= 0) {
7532 qla28xx_component_status(active_regions,
7533 &pri_aux_image_status);
7534 } else {
7535 qla28xx_component_status(active_regions,
7536 &sec_aux_image_status);
7537 }
7538 } else if (valid_pri_image && active_pri_image) {
7539 qla28xx_component_status(active_regions, &pri_aux_image_status);
7540 } else if (valid_sec_image && active_sec_image) {
7541 qla28xx_component_status(active_regions, &sec_aux_image_status);
7542 }
7543
7544 ql_dbg(ql_dbg_init, vha, 0x018f,
7545 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
7546 active_regions->aux.board_config,
7547 active_regions->aux.vpd_nvram,
7548 active_regions->aux.npiv_config_0_1,
7549 active_regions->aux.npiv_config_2_3);
7550}
7551
7552void
7553qla27xx_get_active_image(struct scsi_qla_host *vha,
7554 struct active_regions *active_regions)
7555{
7556 struct qla_hw_data *ha = vha->hw;
7557 struct qla27xx_image_status pri_image_status, sec_image_status;
7558 bool valid_pri_image = false, valid_sec_image = false;
7559 bool active_pri_image = false, active_sec_image = false;
4243c115
SC
7560
7561 if (!ha->flt_region_img_status_pri) {
5fa8774c 7562 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
4243c115
SC
7563 goto check_sec_image;
7564 }
7565
5fa8774c
JC
7566 qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
7567 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2);
7568 qla27xx_print_image(vha, "Primary image", &pri_image_status);
4243c115 7569
5fa8774c 7570 if (qla27xx_check_image_status_signature(&pri_image_status)) {
4243c115 7571 ql_dbg(ql_dbg_init, vha, 0x018b,
f8f97b0c
JC
7572 "Primary image signature (%#x) not valid\n",
7573 le32_to_cpu(pri_image_status.signature));
4243c115
SC
7574 goto check_sec_image;
7575 }
7576
5fa8774c
JC
7577 if (qla27xx_image_status_checksum(&pri_image_status)) {
7578 ql_dbg(ql_dbg_init, vha, 0x018c,
7579 "Primary image checksum failed\n");
7580 goto check_sec_image;
7581 }
4243c115 7582
5fa8774c 7583 valid_pri_image = true;
41dc529a 7584
5fa8774c
JC
7585 if (pri_image_status.image_status_mask & 1) {
7586 ql_dbg(ql_dbg_init, vha, 0x018d,
7587 "Primary image is active\n");
7588 active_pri_image = true;
4243c115
SC
7589 }
7590
7591check_sec_image:
7592 if (!ha->flt_region_img_status_sec) {
5fa8774c 7593 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
4243c115
SC
7594 goto check_valid_image;
7595 }
7596
7597 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
5fa8774c
JC
7598 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
7599 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
4243c115 7600
5fa8774c
JC
7601 if (qla27xx_check_image_status_signature(&sec_image_status)) {
7602 ql_dbg(ql_dbg_init, vha, 0x018b,
f8f97b0c
JC
7603 "Secondary image signature (%#x) not valid\n",
7604 le32_to_cpu(sec_image_status.signature));
4243c115
SC
7605 goto check_valid_image;
7606 }
7607
5fa8774c
JC
7608 if (qla27xx_image_status_checksum(&sec_image_status)) {
7609 ql_dbg(ql_dbg_init, vha, 0x018c,
7610 "Secondary image checksum failed\n");
7611 goto check_valid_image;
7612 }
7613
7614 valid_sec_image = true;
7615
7616 if (sec_image_status.image_status_mask & 1) {
7617 ql_dbg(ql_dbg_init, vha, 0x018d,
7618 "Secondary image is active\n");
7619 active_sec_image = true;
4243c115
SC
7620 }
7621
7622check_valid_image:
5fa8774c
JC
7623 if (valid_pri_image && active_pri_image)
7624 active_regions->global = QLA27XX_PRIMARY_IMAGE;
7625
7626 if (valid_sec_image && active_sec_image) {
7627 if (!active_regions->global ||
7628 qla27xx_compare_image_generation(
7629 &pri_image_status, &sec_image_status) < 0) {
7630 active_regions->global = QLA27XX_SECONDARY_IMAGE;
f8f97b0c 7631 }
4243c115
SC
7632 }
7633
5fa8774c
JC
7634 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
7635 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
7636 "default (boot/fw)" :
7637 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
7638 "primary" :
7639 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
7640 "secondary" : "invalid",
7641 active_regions->global);
4243c115
SC
7642}
7643
f8f97b0c
JC
7644bool qla24xx_risc_firmware_invalid(uint32_t *dword)
7645{
7646 return
7647 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
7648 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
7649}
7650
413975a0 7651static int
cbc8eb67
AV
7652qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7653 uint32_t faddr)
d1c61909 7654{
a28d9e4e
JC
7655 int rval;
7656 uint templates, segments, fragment;
7657 ulong i;
7658 uint j;
7659 ulong dlen;
7660 uint32_t *dcode;
7661 uint32_t risc_addr, risc_size, risc_attr = 0;
e315cd28 7662 struct qla_hw_data *ha = vha->hw;
73208dfd 7663 struct req_que *req = ha->req_q_map[0];
a28d9e4e 7664 struct fwdt *fwdt = ha->fwdt;
eaac30be 7665
7c3df132 7666 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 7667 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 7668
f8f97b0c
JC
7669 dcode = (void *)req->ring;
7670 qla24xx_read_flash_data(vha, dcode, faddr, 8);
7671 if (qla24xx_risc_firmware_invalid(dcode)) {
7c3df132
SK
7672 ql_log(ql_log_fatal, vha, 0x008c,
7673 "Unable to verify the integrity of flash firmware "
7674 "image.\n");
7675 ql_log(ql_log_fatal, vha, 0x008d,
7676 "Firmware data: %08x %08x %08x %08x.\n",
7677 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
7678
7679 return QLA_FUNCTION_FAILED;
7680 }
7681
a28d9e4e
JC
7682 dcode = (void *)req->ring;
7683 *srisc_addr = 0;
7684 segments = FA_RISC_CODE_SEGMENTS;
7685 for (j = 0; j < segments; j++) {
7686 ql_dbg(ql_dbg_init, vha, 0x008d,
7687 "-> Loading segment %u...\n", j);
7688 qla24xx_read_flash_data(vha, dcode, faddr, 10);
d1c61909 7689 risc_addr = be32_to_cpu(dcode[2]);
d1c61909 7690 risc_size = be32_to_cpu(dcode[3]);
a28d9e4e
JC
7691 if (!*srisc_addr) {
7692 *srisc_addr = risc_addr;
7693 risc_attr = be32_to_cpu(dcode[9]);
7694 }
d1c61909 7695
a28d9e4e
JC
7696 dlen = ha->fw_transfer_size >> 2;
7697 for (fragment = 0; risc_size; fragment++) {
d1c61909
AV
7698 if (dlen > risc_size)
7699 dlen = risc_size;
7700
7c3df132 7701 ql_dbg(ql_dbg_init, vha, 0x008e,
a28d9e4e
JC
7702 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
7703 fragment, risc_addr, faddr, dlen);
e315cd28 7704 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
7705 for (i = 0; i < dlen; i++)
7706 dcode[i] = swab32(dcode[i]);
7707
a28d9e4e 7708 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
d1c61909 7709 if (rval) {
7c3df132 7710 ql_log(ql_log_fatal, vha, 0x008f,
a28d9e4e 7711 "-> Failed load firmware fragment %u.\n",
7c3df132 7712 fragment);
f261f7af 7713 return QLA_FUNCTION_FAILED;
d1c61909
AV
7714 }
7715
7716 faddr += dlen;
7717 risc_addr += dlen;
7718 risc_size -= dlen;
d1c61909 7719 }
d1c61909
AV
7720 }
7721
ecc89f25 7722 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
a28d9e4e 7723 return QLA_SUCCESS;
f73cb695 7724
a28d9e4e
JC
7725 templates = (risc_attr & BIT_9) ? 2 : 1;
7726 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
7727 for (j = 0; j < templates; j++, fwdt++) {
7728 if (fwdt->template)
7729 vfree(fwdt->template);
7730 fwdt->template = NULL;
7731 fwdt->length = 0;
7732
3695310e 7733 dcode = (void *)req->ring;
a28d9e4e
JC
7734 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7735 risc_size = be32_to_cpu(dcode[2]);
7736 ql_dbg(ql_dbg_init, vha, 0x0161,
7737 "-> fwdt%u template array at %#x (%#x dwords)\n",
7738 j, faddr, risc_size);
7739 if (!risc_size || !~risc_size) {
7740 ql_dbg(ql_dbg_init, vha, 0x0162,
7741 "-> fwdt%u failed to read array\n", j);
7742 goto failed;
7743 }
f73cb695 7744
a28d9e4e
JC
7745 /* skip header and ignore checksum */
7746 faddr += 7;
7747 risc_size -= 8;
7748
7749 ql_dbg(ql_dbg_init, vha, 0x0163,
7750 "-> fwdt%u template allocate template %#x words...\n",
7751 j, risc_size);
7752 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
7753 if (!fwdt->template) {
7754 ql_log(ql_log_warn, vha, 0x0164,
7755 "-> fwdt%u failed allocate template.\n", j);
7756 goto failed;
7757 }
f73cb695 7758
a28d9e4e
JC
7759 dcode = fwdt->template;
7760 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
f73cb695 7761
a28d9e4e
JC
7762 if (!qla27xx_fwdt_template_valid(dcode)) {
7763 ql_log(ql_log_warn, vha, 0x0165,
7764 "-> fwdt%u failed template validate\n", j);
7765 goto failed;
7766 }
f73cb695 7767
a28d9e4e
JC
7768 dlen = qla27xx_fwdt_template_size(dcode);
7769 ql_dbg(ql_dbg_init, vha, 0x0166,
7770 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
7771 j, dlen, dlen / sizeof(*dcode));
7772 if (dlen > risc_size * sizeof(*dcode)) {
7773 ql_log(ql_log_warn, vha, 0x0167,
7774 "-> fwdt%u template exceeds array (%-lu bytes)\n",
7775 j, dlen - risc_size * sizeof(*dcode));
7776 goto failed;
7777 }
7778
7779 fwdt->length = dlen;
7780 ql_dbg(ql_dbg_init, vha, 0x0168,
7781 "-> fwdt%u loaded template ok\n", j);
7782
7783 faddr += risc_size + 1;
f73cb695 7784 }
a28d9e4e
JC
7785
7786 return QLA_SUCCESS;
f73cb695 7787
2ff6ae85 7788failed:
a28d9e4e
JC
7789 if (fwdt->template)
7790 vfree(fwdt->template);
7791 fwdt->template = NULL;
7792 fwdt->length = 0;
7793
7794 return QLA_SUCCESS;
d1c61909
AV
7795}
7796
e9454a88 7797#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 7798
0107109e 7799int
e315cd28 7800qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
7801{
7802 int rval;
7803 int i, fragment;
7804 uint16_t *wcode, *fwcode;
7805 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7806 struct fw_blob *blob;
e315cd28 7807 struct qla_hw_data *ha = vha->hw;
73208dfd 7808 struct req_que *req = ha->req_q_map[0];
5433383e
AV
7809
7810 /* Load firmware blob. */
e315cd28 7811 blob = qla2x00_request_firmware(vha);
5433383e 7812 if (!blob) {
7c3df132 7813 ql_log(ql_log_info, vha, 0x0083,
94bcf830 7814 "Firmware image unavailable.\n");
7c3df132
SK
7815 ql_log(ql_log_info, vha, 0x0084,
7816 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
7817 return QLA_FUNCTION_FAILED;
7818 }
7819
7820 rval = QLA_SUCCESS;
7821
73208dfd 7822 wcode = (uint16_t *)req->ring;
5433383e
AV
7823 *srisc_addr = 0;
7824 fwcode = (uint16_t *)blob->fw->data;
7825 fwclen = 0;
7826
7827 /* Validate firmware image by checking version. */
7828 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132 7829 ql_log(ql_log_fatal, vha, 0x0085,
5b5e0928 7830 "Unable to verify integrity of firmware image (%zd).\n",
5433383e
AV
7831 blob->fw->size);
7832 goto fail_fw_integrity;
7833 }
7834 for (i = 0; i < 4; i++)
7835 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7836 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7837 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7838 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
7839 ql_log(ql_log_fatal, vha, 0x0086,
7840 "Unable to verify integrity of firmware image.\n");
7841 ql_log(ql_log_fatal, vha, 0x0087,
7842 "Firmware data: %04x %04x %04x %04x.\n",
7843 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
7844 goto fail_fw_integrity;
7845 }
7846
7847 seg = blob->segs;
7848 while (*seg && rval == QLA_SUCCESS) {
7849 risc_addr = *seg;
7850 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7851 risc_size = be16_to_cpu(fwcode[3]);
7852
7853 /* Validate firmware image size. */
7854 fwclen += risc_size * sizeof(uint16_t);
7855 if (blob->fw->size < fwclen) {
7c3df132 7856 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 7857 "Unable to verify integrity of firmware image "
5b5e0928 7858 "(%zd).\n", blob->fw->size);
5433383e
AV
7859 goto fail_fw_integrity;
7860 }
7861
7862 fragment = 0;
7863 while (risc_size > 0 && rval == QLA_SUCCESS) {
7864 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7865 if (wlen > risc_size)
7866 wlen = risc_size;
7c3df132
SK
7867 ql_dbg(ql_dbg_init, vha, 0x0089,
7868 "Loading risc segment@ risc addr %x number of "
7869 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
7870
7871 for (i = 0; i < wlen; i++)
7872 wcode[i] = swab16(fwcode[i]);
7873
73208dfd 7874 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
7875 wlen);
7876 if (rval) {
7c3df132
SK
7877 ql_log(ql_log_fatal, vha, 0x008a,
7878 "Failed to load segment %d of firmware.\n",
7879 fragment);
5433383e
AV
7880 break;
7881 }
7882
7883 fwcode += wlen;
7884 risc_addr += wlen;
7885 risc_size -= wlen;
7886 fragment++;
7887 }
7888
7889 /* Next segment. */
7890 seg++;
7891 }
7892 return rval;
7893
7894fail_fw_integrity:
7895 return QLA_FUNCTION_FAILED;
7896}
7897
eaac30be
AV
7898static int
7899qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
7900{
7901 int rval;
a28d9e4e
JC
7902 uint templates, segments, fragment;
7903 uint32_t *dcode;
7904 ulong dlen;
7905 uint32_t risc_addr, risc_size, risc_attr = 0;
7906 ulong i;
7907 uint j;
5433383e 7908 struct fw_blob *blob;
f8f97b0c 7909 uint32_t *fwcode;
e315cd28 7910 struct qla_hw_data *ha = vha->hw;
73208dfd 7911 struct req_que *req = ha->req_q_map[0];
a28d9e4e
JC
7912 struct fwdt *fwdt = ha->fwdt;
7913
7914 ql_dbg(ql_dbg_init, vha, 0x0090,
7915 "-> FW: Loading via request-firmware.\n");
0107109e 7916
e315cd28 7917 blob = qla2x00_request_firmware(vha);
5433383e 7918 if (!blob) {
a28d9e4e
JC
7919 ql_log(ql_log_warn, vha, 0x0092,
7920 "-> Firmware file not found.\n");
d1c61909 7921
eaac30be 7922 return QLA_FUNCTION_FAILED;
0107109e
AV
7923 }
7924
f8f97b0c 7925 fwcode = (void *)blob->fw->data;
1710ac17 7926 dcode = fwcode;
f8f97b0c 7927 if (qla24xx_risc_firmware_invalid(dcode)) {
7c3df132 7928 ql_log(ql_log_fatal, vha, 0x0093,
5b5e0928 7929 "Unable to verify integrity of firmware image (%zd).\n",
5433383e 7930 blob->fw->size);
7c3df132
SK
7931 ql_log(ql_log_fatal, vha, 0x0095,
7932 "Firmware data: %08x %08x %08x %08x.\n",
7933 dcode[0], dcode[1], dcode[2], dcode[3]);
f73cb695 7934 return QLA_FUNCTION_FAILED;
0107109e
AV
7935 }
7936
a28d9e4e
JC
7937 dcode = (void *)req->ring;
7938 *srisc_addr = 0;
7939 segments = FA_RISC_CODE_SEGMENTS;
7940 for (j = 0; j < segments; j++) {
7941 ql_dbg(ql_dbg_init, vha, 0x0096,
7942 "-> Loading segment %u...\n", j);
0107109e 7943 risc_addr = be32_to_cpu(fwcode[2]);
0107109e
AV
7944 risc_size = be32_to_cpu(fwcode[3]);
7945
a28d9e4e
JC
7946 if (!*srisc_addr) {
7947 *srisc_addr = risc_addr;
7948 risc_attr = be32_to_cpu(fwcode[9]);
0107109e
AV
7949 }
7950
a28d9e4e
JC
7951 dlen = ha->fw_transfer_size >> 2;
7952 for (fragment = 0; risc_size; fragment++) {
0107109e
AV
7953 if (dlen > risc_size)
7954 dlen = risc_size;
7955
7c3df132 7956 ql_dbg(ql_dbg_init, vha, 0x0097,
a28d9e4e
JC
7957 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
7958 fragment, risc_addr,
7959 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
7960 dlen);
0107109e
AV
7961
7962 for (i = 0; i < dlen; i++)
7963 dcode[i] = swab32(fwcode[i]);
7964
a28d9e4e 7965 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
0107109e 7966 if (rval) {
7c3df132 7967 ql_log(ql_log_fatal, vha, 0x0098,
a28d9e4e 7968 "-> Failed load firmware fragment %u.\n",
7c3df132 7969 fragment);
f261f7af 7970 return QLA_FUNCTION_FAILED;
0107109e
AV
7971 }
7972
7973 fwcode += dlen;
7974 risc_addr += dlen;
7975 risc_size -= dlen;
0107109e 7976 }
0107109e 7977 }
f73cb695 7978
ecc89f25 7979 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
a28d9e4e 7980 return QLA_SUCCESS;
f73cb695 7981
a28d9e4e
JC
7982 templates = (risc_attr & BIT_9) ? 2 : 1;
7983 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
7984 for (j = 0; j < templates; j++, fwdt++) {
7985 if (fwdt->template)
7986 vfree(fwdt->template);
7987 fwdt->template = NULL;
7988 fwdt->length = 0;
7989
7990 risc_size = be32_to_cpu(fwcode[2]);
7991 ql_dbg(ql_dbg_init, vha, 0x0171,
7992 "-> fwdt%u template array at %#x (%#x dwords)\n",
7993 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
7994 risc_size);
7995 if (!risc_size || !~risc_size) {
7996 ql_dbg(ql_dbg_init, vha, 0x0172,
7997 "-> fwdt%u failed to read array\n", j);
7998 goto failed;
7999 }
f73cb695 8000
a28d9e4e
JC
8001 /* skip header and ignore checksum */
8002 fwcode += 7;
8003 risc_size -= 8;
8004
8005 ql_dbg(ql_dbg_init, vha, 0x0173,
8006 "-> fwdt%u template allocate template %#x words...\n",
8007 j, risc_size);
8008 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8009 if (!fwdt->template) {
8010 ql_log(ql_log_warn, vha, 0x0174,
8011 "-> fwdt%u failed allocate template.\n", j);
8012 goto failed;
8013 }
f73cb695 8014
a28d9e4e
JC
8015 dcode = fwdt->template;
8016 for (i = 0; i < risc_size; i++)
5fa8774c 8017 dcode[i] = fwcode[i];
f73cb695 8018
a28d9e4e
JC
8019 if (!qla27xx_fwdt_template_valid(dcode)) {
8020 ql_log(ql_log_warn, vha, 0x0175,
8021 "-> fwdt%u failed template validate\n", j);
8022 goto failed;
8023 }
f73cb695 8024
a28d9e4e
JC
8025 dlen = qla27xx_fwdt_template_size(dcode);
8026 ql_dbg(ql_dbg_init, vha, 0x0176,
8027 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8028 j, dlen, dlen / sizeof(*dcode));
8029 if (dlen > risc_size * sizeof(*dcode)) {
8030 ql_log(ql_log_warn, vha, 0x0177,
8031 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8032 j, dlen - risc_size * sizeof(*dcode));
8033 goto failed;
8034 }
8035
8036 fwdt->length = dlen;
8037 ql_dbg(ql_dbg_init, vha, 0x0178,
8038 "-> fwdt%u loaded template ok\n", j);
8039
8040 fwcode += risc_size + 1;
f73cb695 8041 }
a28d9e4e
JC
8042
8043 return QLA_SUCCESS;
f73cb695 8044
2ff6ae85 8045failed:
a28d9e4e
JC
8046 if (fwdt->template)
8047 vfree(fwdt->template);
8048 fwdt->template = NULL;
8049 fwdt->length = 0;
8050
8051 return QLA_SUCCESS;
0107109e 8052}
18c6c127 8053
eaac30be
AV
8054int
8055qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8056{
8057 int rval;
8058
e337d907
AV
8059 if (ql2xfwloadbin == 1)
8060 return qla81xx_load_risc(vha, srisc_addr);
8061
eaac30be
AV
8062 /*
8063 * FW Load priority:
8064 * 1) Firmware via request-firmware interface (.bin file).
8065 * 2) Firmware residing in flash.
8066 */
8067 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8068 if (rval == QLA_SUCCESS)
8069 return rval;
8070
cbc8eb67
AV
8071 return qla24xx_load_risc_flash(vha, srisc_addr,
8072 vha->hw->flt_region_fw);
eaac30be
AV
8073}
8074
8075int
8076qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8077{
8078 int rval;
cbc8eb67 8079 struct qla_hw_data *ha = vha->hw;
5fa8774c 8080 struct active_regions active_regions = { };
eaac30be 8081
e337d907 8082 if (ql2xfwloadbin == 2)
cbc8eb67 8083 goto try_blob_fw;
e337d907 8084
f8f97b0c 8085 /* FW Load priority:
eaac30be
AV
8086 * 1) Firmware residing in flash.
8087 * 2) Firmware via request-firmware interface (.bin file).
f8f97b0c 8088 * 3) Golden-Firmware residing in flash -- (limited operation).
eaac30be 8089 */
f8f97b0c 8090
5fa8774c 8091 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
f8f97b0c
JC
8092 goto try_primary_fw;
8093
5fa8774c
JC
8094 qla27xx_get_active_image(vha, &active_regions);
8095
8096 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
f8f97b0c
JC
8097 goto try_primary_fw;
8098
8099 ql_dbg(ql_dbg_init, vha, 0x008b,
8100 "Loading secondary firmware image.\n");
8101 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8102 if (!rval)
8103 return rval;
8104
8105try_primary_fw:
8106 ql_dbg(ql_dbg_init, vha, 0x008b,
8107 "Loading primary firmware image.\n");
cbc8eb67 8108 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
f8f97b0c 8109 if (!rval)
eaac30be
AV
8110 return rval;
8111
cbc8eb67
AV
8112try_blob_fw:
8113 rval = qla24xx_load_risc_blob(vha, srisc_addr);
f8f97b0c 8114 if (!rval || !ha->flt_region_gold_fw)
cbc8eb67
AV
8115 return rval;
8116
7c3df132
SK
8117 ql_log(ql_log_info, vha, 0x0099,
8118 "Attempting to fallback to golden firmware.\n");
cbc8eb67 8119 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
f8f97b0c 8120 if (rval)
cbc8eb67
AV
8121 return rval;
8122
f8f97b0c 8123 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
cbc8eb67 8124 ha->flags.running_gold_fw = 1;
cbc8eb67 8125 return rval;
eaac30be
AV
8126}
8127
18c6c127 8128void
e315cd28 8129qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
8130{
8131 int ret, retries;
e315cd28 8132 struct qla_hw_data *ha = vha->hw;
18c6c127 8133
85880801
AV
8134 if (ha->flags.pci_channel_io_perm_failure)
8135 return;
e428924c 8136 if (!IS_FWI2_CAPABLE(ha))
18c6c127 8137 return;
75edf81d
AV
8138 if (!ha->fw_major_version)
8139 return;
ec7193e2
QT
8140 if (!ha->flags.fw_started)
8141 return;
18c6c127 8142
e315cd28 8143 ret = qla2x00_stop_firmware(vha);
7c7f1f29 8144 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 8145 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
8146 ha->isp_ops->reset_chip(vha);
8147 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 8148 continue;
e315cd28 8149 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 8150 continue;
7c3df132
SK
8151 ql_log(ql_log_info, vha, 0x8015,
8152 "Attempting retry of stop-firmware command.\n");
e315cd28 8153 ret = qla2x00_stop_firmware(vha);
18c6c127 8154 }
ec7193e2 8155
4b60c827 8156 QLA_FW_STOPPED(ha);
ec7193e2 8157 ha->flags.fw_init_done = 0;
18c6c127 8158}
2c3dfe3f
SJ
8159
8160int
e315cd28 8161qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
8162{
8163 int rval = QLA_SUCCESS;
0b91d116 8164 int rval2;
2c3dfe3f 8165 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
8166 struct qla_hw_data *ha = vha->hw;
8167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2c3dfe3f 8168
e315cd28 8169 if (!vha->vp_idx)
2c3dfe3f
SJ
8170 return -EINVAL;
8171
e315cd28 8172 rval = qla2x00_fw_ready(base_vha);
67c2e93a 8173
2c3dfe3f 8174 if (rval == QLA_SUCCESS) {
e315cd28 8175 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9eb9c6dc 8176 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
8177 }
8178
e315cd28 8179 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
8180
8181 /* Login to SNS first */
0b91d116
CD
8182 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8183 BIT_1);
8184 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8185 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8186 ql_dbg(ql_dbg_init, vha, 0x0120,
8187 "Failed SNS login: loop_id=%x, rval2=%d\n",
8188 NPH_SNS, rval2);
8189 else
8190 ql_dbg(ql_dbg_init, vha, 0x0103,
8191 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8192 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8193 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
8194 return (QLA_FUNCTION_FAILED);
8195 }
8196
e315cd28
AC
8197 atomic_set(&vha->loop_down_timer, 0);
8198 atomic_set(&vha->loop_state, LOOP_UP);
8199 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8200 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8201 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
8202
8203 return rval;
8204}
4d4df193
HK
8205
8206/* 84XX Support **************************************************************/
8207
8208static LIST_HEAD(qla_cs84xx_list);
8209static DEFINE_MUTEX(qla_cs84xx_mutex);
8210
8211static struct qla_chip_state_84xx *
e315cd28 8212qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
8213{
8214 struct qla_chip_state_84xx *cs84xx;
e315cd28 8215 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
8216
8217 mutex_lock(&qla_cs84xx_mutex);
8218
8219 /* Find any shared 84xx chip. */
8220 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8221 if (cs84xx->bus == ha->pdev->bus) {
8222 kref_get(&cs84xx->kref);
8223 goto done;
8224 }
8225 }
8226
8227 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8228 if (!cs84xx)
8229 goto done;
8230
8231 kref_init(&cs84xx->kref);
8232 spin_lock_init(&cs84xx->access_lock);
8233 mutex_init(&cs84xx->fw_update_mutex);
8234 cs84xx->bus = ha->pdev->bus;
8235
8236 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8237done:
8238 mutex_unlock(&qla_cs84xx_mutex);
8239 return cs84xx;
8240}
8241
8242static void
8243__qla84xx_chip_release(struct kref *kref)
8244{
8245 struct qla_chip_state_84xx *cs84xx =
8246 container_of(kref, struct qla_chip_state_84xx, kref);
8247
8248 mutex_lock(&qla_cs84xx_mutex);
8249 list_del(&cs84xx->list);
8250 mutex_unlock(&qla_cs84xx_mutex);
8251 kfree(cs84xx);
8252}
8253
8254void
e315cd28 8255qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 8256{
e315cd28 8257 struct qla_hw_data *ha = vha->hw;
bd432bb5 8258
4d4df193
HK
8259 if (ha->cs84xx)
8260 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8261}
8262
8263static int
e315cd28 8264qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
8265{
8266 int rval;
8267 uint16_t status[2];
e315cd28 8268 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
8269
8270 mutex_lock(&ha->cs84xx->fw_update_mutex);
8271
e315cd28 8272 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
8273
8274 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8275
58e2753c 8276 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
4d4df193
HK
8277 QLA_SUCCESS;
8278}
3a03eb79
AV
8279
8280/* 81XX Support **************************************************************/
8281
8282int
8283qla81xx_nvram_config(scsi_qla_host_t *vha)
8284{
8285 int rval;
8286 struct init_cb_81xx *icb;
8287 struct nvram_81xx *nv;
8288 uint32_t *dptr;
8289 uint8_t *dptr1, *dptr2;
8290 uint32_t chksum;
8291 uint16_t cnt;
8292 struct qla_hw_data *ha = vha->hw;
5fa8774c
JC
8293 uint32_t faddr;
8294 struct active_regions active_regions = { };
3a03eb79
AV
8295
8296 rval = QLA_SUCCESS;
8297 icb = (struct init_cb_81xx *)ha->init_cb;
8298 nv = ha->nvram;
8299
8300 /* Determine NVRAM starting address. */
f8f97b0c 8301 ha->nvram_size = sizeof(*nv);
3a03eb79 8302 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7ec0effd
AD
8303 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8304 ha->vpd_size = FA_VPD_SIZE_82XX;
3a03eb79 8305
3f006ac3 8306 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
5fa8774c
JC
8307 qla28xx_get_aux_images(vha, &active_regions);
8308
3a03eb79
AV
8309 /* Get VPD data into cache */
8310 ha->vpd = ha->nvram + VPD_OFFSET;
5fa8774c
JC
8311
8312 faddr = ha->flt_region_vpd;
8313 if (IS_QLA28XX(ha)) {
8314 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8315 faddr = ha->flt_region_vpd_sec;
8316 ql_dbg(ql_dbg_init, vha, 0x0110,
8317 "Loading %s nvram image.\n",
8318 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8319 "primary" : "secondary");
8320 }
8321 qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2);
3a03eb79
AV
8322
8323 /* Get NVRAM data into cache and calculate checksum. */
5fa8774c
JC
8324 faddr = ha->flt_region_nvram;
8325 if (IS_QLA28XX(ha)) {
8326 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8327 faddr = ha->flt_region_nvram_sec;
8328 }
8329 ql_dbg(ql_dbg_init, vha, 0x0110,
8330 "Loading %s nvram image.\n",
8331 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8332 "primary" : "secondary");
8333 qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2);
8334
3d79038f 8335 dptr = (uint32_t *)nv;
da08ef5c
JC
8336 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8337 chksum += le32_to_cpu(*dptr);
3a03eb79 8338
7c3df132
SK
8339 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8340 "Contents of NVRAM:\n");
8341 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
f8f97b0c 8342 nv, ha->nvram_size);
3a03eb79
AV
8343
8344 /* Bad NVRAM data, set defaults parameters. */
a28d9e4e
JC
8345 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8346 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
3a03eb79 8347 /* Reset NVRAM data. */
7c3df132 8348 ql_log(ql_log_info, vha, 0x0073,
3695310e
JC
8349 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8350 chksum, nv->id, le16_to_cpu(nv->nvram_version));
8351 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
7c3df132
SK
8352 ql_log(ql_log_info, vha, 0x0074,
8353 "Falling back to functioning (yet invalid -- WWPN) "
8354 "defaults.\n");
3a03eb79
AV
8355
8356 /*
8357 * Set default initialization control block.
8358 */
8359 memset(nv, 0, ha->nvram_size);
ad950360
BVA
8360 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8361 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 8362 nv->frame_payload_size = 2048;
ad950360
BVA
8363 nv->execution_throttle = cpu_to_le16(0xFFFF);
8364 nv->exchange_count = cpu_to_le16(0);
3a03eb79 8365 nv->port_name[0] = 0x21;
f73cb695 8366 nv->port_name[1] = 0x00 + ha->port_no + 1;
3a03eb79
AV
8367 nv->port_name[2] = 0x00;
8368 nv->port_name[3] = 0xe0;
8369 nv->port_name[4] = 0x8b;
8370 nv->port_name[5] = 0x1c;
8371 nv->port_name[6] = 0x55;
8372 nv->port_name[7] = 0x86;
8373 nv->node_name[0] = 0x20;
8374 nv->node_name[1] = 0x00;
8375 nv->node_name[2] = 0x00;
8376 nv->node_name[3] = 0xe0;
8377 nv->node_name[4] = 0x8b;
8378 nv->node_name[5] = 0x1c;
8379 nv->node_name[6] = 0x55;
8380 nv->node_name[7] = 0x86;
ad950360
BVA
8381 nv->login_retry_count = cpu_to_le16(8);
8382 nv->interrupt_delay_timer = cpu_to_le16(0);
8383 nv->login_timeout = cpu_to_le16(0);
3a03eb79 8384 nv->firmware_options_1 =
ad950360
BVA
8385 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8386 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8387 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8388 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8389 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8390 nv->efi_parameters = cpu_to_le32(0);
3a03eb79 8391 nv->reset_delay = 5;
ad950360
BVA
8392 nv->max_luns_per_target = cpu_to_le16(128);
8393 nv->port_down_retry_count = cpu_to_le16(30);
8394 nv->link_down_timeout = cpu_to_le16(180);
eeebcc92 8395 nv->enode_mac[0] = 0x00;
6246b8a1
GM
8396 nv->enode_mac[1] = 0xC0;
8397 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
8398 nv->enode_mac[3] = 0x04;
8399 nv->enode_mac[4] = 0x05;
f73cb695 8400 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
8401
8402 rval = 1;
8403 }
8404
9e522cd8
AE
8405 if (IS_T10_PI_CAPABLE(ha))
8406 nv->frame_payload_size &= ~7;
8407
aa230bc5
AE
8408 qlt_81xx_config_nvram_stage1(vha, nv);
8409
3a03eb79 8410 /* Reset Initialization control block */
773120e4 8411 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
8412
8413 /* Copy 1st segment. */
8414 dptr1 = (uint8_t *)icb;
8415 dptr2 = (uint8_t *)&nv->version;
8416 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8417 while (cnt--)
8418 *dptr1++ = *dptr2++;
8419
8420 icb->login_retry_count = nv->login_retry_count;
8421
8422 /* Copy 2nd segment. */
8423 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8424 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8425 cnt = (uint8_t *)&icb->reserved_5 -
8426 (uint8_t *)&icb->interrupt_delay_timer;
8427 while (cnt--)
8428 *dptr1++ = *dptr2++;
8429
8430 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8431 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8432 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
8433 icb->enode_mac[0] = 0x00;
8434 icb->enode_mac[1] = 0xC0;
8435 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
8436 icb->enode_mac[3] = 0x04;
8437 icb->enode_mac[4] = 0x05;
f73cb695 8438 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
8439 }
8440
b64b0e8f
AV
8441 /* Use extended-initialization control block. */
8442 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
0eaaca4c 8443 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
3a03eb79
AV
8444 /*
8445 * Setup driver NVRAM options.
8446 */
8447 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 8448 "QLE8XXX");
3a03eb79 8449
aa230bc5
AE
8450 qlt_81xx_config_nvram_stage2(vha, icb);
8451
3a03eb79 8452 /* Use alternate WWN? */
ad950360 8453 if (nv->host_p & cpu_to_le32(BIT_15)) {
3a03eb79
AV
8454 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8455 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8456 }
8457
8458 /* Prepare nodename */
ad950360 8459 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
3a03eb79
AV
8460 /*
8461 * Firmware will apply the following mask if the nodename was
8462 * not provided.
8463 */
8464 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8465 icb->node_name[0] &= 0xF0;
8466 }
8467
8468 /* Set host adapter parameters. */
8469 ha->flags.disable_risc_code_load = 0;
8470 ha->flags.enable_lip_reset = 0;
8471 ha->flags.enable_lip_full_login =
58e2753c 8472 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
3a03eb79 8473 ha->flags.enable_target_reset =
58e2753c 8474 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
3a03eb79 8475 ha->flags.enable_led_scheme = 0;
58e2753c 8476 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
3a03eb79
AV
8477
8478 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8479 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8480
8481 /* save HBA serial number */
8482 ha->serial0 = icb->port_name[5];
8483 ha->serial1 = icb->port_name[6];
8484 ha->serial2 = icb->port_name[7];
8485 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8486 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8487
ad950360 8488 icb->execution_throttle = cpu_to_le16(0xFFFF);
3a03eb79
AV
8489
8490 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8491
8492 /* Set minimum login_timeout to 4 seconds. */
8493 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8494 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8495 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 8496 nv->login_timeout = cpu_to_le16(4);
3a03eb79 8497 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3a03eb79
AV
8498
8499 /* Set minimum RATOV to 100 tenths of a second. */
8500 ha->r_a_tov = 100;
8501
8502 ha->loop_reset_delay = nv->reset_delay;
8503
8504 /* Link Down Timeout = 0:
8505 *
7ec0effd 8506 * When Port Down timer expires we will start returning
3a03eb79
AV
8507 * I/O's to OS with "DID_NO_CONNECT".
8508 *
8509 * Link Down Timeout != 0:
8510 *
8511 * The driver waits for the link to come up after link down
8512 * before returning I/Os to OS with "DID_NO_CONNECT".
8513 */
8514 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8515 ha->loop_down_abort_time =
8516 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8517 } else {
8518 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8519 ha->loop_down_abort_time =
8520 (LOOP_DOWN_TIME - ha->link_down_timeout);
8521 }
8522
8523 /* Need enough time to try and get the port back. */
8524 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8525 if (qlport_down_retry)
8526 ha->port_down_retry_count = qlport_down_retry;
8527
8528 /* Set login_retry_count */
8529 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8530 if (ha->port_down_retry_count ==
8531 le16_to_cpu(nv->port_down_retry_count) &&
8532 ha->port_down_retry_count > 3)
8533 ha->login_retry_count = ha->port_down_retry_count;
8534 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8535 ha->login_retry_count = ha->port_down_retry_count;
8536 if (ql2xloginretrycount)
8537 ha->login_retry_count = ql2xloginretrycount;
8538
6246b8a1 8539 /* if not running MSI-X we need handshaking on interrupts */
ecc89f25
JC
8540 if (!vha->hw->flags.msix_enabled &&
8541 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
ad950360 8542 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6246b8a1 8543
3a03eb79
AV
8544 /* Enable ZIO. */
8545 if (!vha->flags.init_done) {
8546 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8547 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8548 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
58e2753c 8549 le16_to_cpu(icb->interrupt_delay_timer) : 2;
3a03eb79 8550 }
ad950360 8551 icb->firmware_options_2 &= cpu_to_le32(
3a03eb79
AV
8552 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8553 vha->flags.process_response_queue = 0;
8554 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8555 ha->zio_mode = QLA_ZIO_MODE_6;
8556
7c3df132 8557 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 8558 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
8559 ha->zio_mode,
8560 ha->zio_timer * 100);
3a03eb79
AV
8561
8562 icb->firmware_options_2 |= cpu_to_le32(
8563 (uint32_t)ha->zio_mode);
8564 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8565 vha->flags.process_response_queue = 1;
8566 }
8567
41dc529a 8568 /* enable RIDA Format2 */
48acad09 8569 icb->firmware_options_3 |= BIT_0;
41dc529a 8570
8777e431
QT
8571 /* N2N: driver will initiate Login instead of FW */
8572 icb->firmware_options_3 |= BIT_8;
41dc529a 8573
3a03eb79 8574 if (rval) {
7c3df132
SK
8575 ql_log(ql_log_warn, vha, 0x0076,
8576 "NVRAM configuration failed.\n");
3a03eb79
AV
8577 }
8578 return (rval);
8579}
8580
a9083016
GM
8581int
8582qla82xx_restart_isp(scsi_qla_host_t *vha)
8583{
8584 int status, rval;
a9083016 8585 struct qla_hw_data *ha = vha->hw;
a9083016 8586 struct scsi_qla_host *vp;
feafb7b1 8587 unsigned long flags;
a9083016
GM
8588
8589 status = qla2x00_init_rings(vha);
8590 if (!status) {
8591 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8592 ha->flags.chip_reset_done = 1;
8593
8594 status = qla2x00_fw_ready(vha);
8595 if (!status) {
a9083016 8596 /* Issue a marker after FW becomes ready. */
9eb9c6dc 8597 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
a9083016 8598 vha->flags.online = 1;
7108b76e 8599 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
a9083016
GM
8600 }
8601
8602 /* if no cable then assume it's good */
8603 if ((vha->device_flags & DFLG_NO_CABLE))
8604 status = 0;
a9083016
GM
8605 }
8606
8607 if (!status) {
8608 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8609
8610 if (!atomic_read(&vha->loop_down_timer)) {
8611 /*
8612 * Issue marker command only when we are going
8613 * to start the I/O .
8614 */
8615 vha->marker_needed = 1;
8616 }
8617
a9083016
GM
8618 ha->isp_ops->enable_intrs(ha);
8619
8620 ha->isp_abort_cnt = 0;
8621 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8622
53296788 8623 /* Update the firmware version */
3173167f 8624 status = qla82xx_check_md_needed(vha);
53296788 8625
a9083016
GM
8626 if (ha->fce) {
8627 ha->flags.fce_enabled = 1;
8628 memset(ha->fce, 0,
8629 fce_calc_size(ha->fce_bufs));
8630 rval = qla2x00_enable_fce_trace(vha,
8631 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8632 &ha->fce_bufs);
8633 if (rval) {
cfb0919c 8634 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
8635 "Unable to reinitialize FCE (%d).\n",
8636 rval);
a9083016
GM
8637 ha->flags.fce_enabled = 0;
8638 }
8639 }
8640
8641 if (ha->eft) {
8642 memset(ha->eft, 0, EFT_SIZE);
8643 rval = qla2x00_enable_eft_trace(vha,
8644 ha->eft_dma, EFT_NUM_BUFFERS);
8645 if (rval) {
cfb0919c 8646 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
8647 "Unable to reinitialize EFT (%d).\n",
8648 rval);
a9083016
GM
8649 }
8650 }
a9083016
GM
8651 }
8652
8653 if (!status) {
cfb0919c 8654 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 8655 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
8656
8657 spin_lock_irqsave(&ha->vport_slock, flags);
8658 list_for_each_entry(vp, &ha->vp_list, list) {
8659 if (vp->vp_idx) {
8660 atomic_inc(&vp->vref_count);
8661 spin_unlock_irqrestore(&ha->vport_slock, flags);
8662
a9083016 8663 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
8664
8665 spin_lock_irqsave(&ha->vport_slock, flags);
8666 atomic_dec(&vp->vref_count);
8667 }
a9083016 8668 }
feafb7b1
AE
8669 spin_unlock_irqrestore(&ha->vport_slock, flags);
8670
a9083016 8671 } else {
cfb0919c 8672 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 8673 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
8674 }
8675
8676 return status;
8677}
8678
3a03eb79 8679void
ae97c91e 8680qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 8681{
ae97c91e
AV
8682 struct qla_hw_data *ha = vha->hw;
8683
f198cafa
HM
8684 /* Hold status IOCBs until ABTS response received. */
8685 if (ql2xfwholdabts)
8686 ha->fw_options[3] |= BIT_12;
8687
088d09d4
GM
8688 /* Set Retry FLOGI in case of P2P connection */
8689 if (ha->operating_mode == P2P) {
8690 ha->fw_options[2] |= BIT_3;
8691 ql_dbg(ql_dbg_disc, vha, 0x2103,
8692 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
8693 __func__, ha->fw_options[2]);
8694 }
8695
41dc529a
QT
8696 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
8697 if (ql2xmvasynctoatio) {
8698 if (qla_tgt_mode_enabled(vha) ||
8699 qla_dual_mode_enabled(vha))
8700 ha->fw_options[2] |= BIT_11;
8701 else
8702 ha->fw_options[2] &= ~BIT_11;
8703 }
8704
f7e761f5 8705 if (qla_tgt_mode_enabled(vha) ||
2da52737
QT
8706 qla_dual_mode_enabled(vha)) {
8707 /* FW auto send SCSI status during */
8708 ha->fw_options[1] |= BIT_8;
8709 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
8710
8711 /* FW perform Exchange validation */
f7e761f5 8712 ha->fw_options[2] |= BIT_4;
2da52737
QT
8713 } else {
8714 ha->fw_options[1] &= ~BIT_8;
8715 ha->fw_options[10] &= 0x00ff;
8716
f7e761f5 8717 ha->fw_options[2] &= ~BIT_4;
2da52737 8718 }
f7e761f5 8719
41dc529a
QT
8720 if (ql2xetsenable) {
8721 /* Enable ETS Burst. */
8722 memset(ha->fw_options, 0, sizeof(ha->fw_options));
8723 ha->fw_options[2] |= BIT_9;
8724 }
8725
83548fe2
QT
8726 ql_dbg(ql_dbg_init, vha, 0x00e9,
8727 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
8728 __func__, ha->fw_options[1], ha->fw_options[2],
8729 ha->fw_options[3], vha->host->active_mode);
ae97c91e 8730
ae97c91e 8731 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 8732}
09ff701a
SR
8733
8734/*
8735 * qla24xx_get_fcp_prio
8736 * Gets the fcp cmd priority value for the logged in port.
8737 * Looks for a match of the port descriptors within
8738 * each of the fcp prio config entries. If a match is found,
8739 * the tag (priority) value is returned.
8740 *
8741 * Input:
21090cbe 8742 * vha = scsi host structure pointer.
09ff701a
SR
8743 * fcport = port structure pointer.
8744 *
8745 * Return:
6c452a45 8746 * non-zero (if found)
f28a0a96 8747 * -1 (if not found)
09ff701a
SR
8748 *
8749 * Context:
8750 * Kernel context
8751 */
f28a0a96 8752static int
09ff701a
SR
8753qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8754{
8755 int i, entries;
8756 uint8_t pid_match, wwn_match;
f28a0a96 8757 int priority;
09ff701a
SR
8758 uint32_t pid1, pid2;
8759 uint64_t wwn1, wwn2;
8760 struct qla_fcp_prio_entry *pri_entry;
8761 struct qla_hw_data *ha = vha->hw;
8762
8763 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 8764 return -1;
09ff701a 8765
f28a0a96 8766 priority = -1;
09ff701a
SR
8767 entries = ha->fcp_prio_cfg->num_entries;
8768 pri_entry = &ha->fcp_prio_cfg->entry[0];
8769
8770 for (i = 0; i < entries; i++) {
8771 pid_match = wwn_match = 0;
8772
8773 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8774 pri_entry++;
8775 continue;
8776 }
8777
8778 /* check source pid for a match */
8779 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8780 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8781 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8782 if (pid1 == INVALID_PORT_ID)
8783 pid_match++;
8784 else if (pid1 == pid2)
8785 pid_match++;
8786 }
8787
8788 /* check destination pid for a match */
8789 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8790 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8791 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8792 if (pid1 == INVALID_PORT_ID)
8793 pid_match++;
8794 else if (pid1 == pid2)
8795 pid_match++;
8796 }
8797
8798 /* check source WWN for a match */
8799 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8800 wwn1 = wwn_to_u64(vha->port_name);
8801 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8802 if (wwn2 == (uint64_t)-1)
8803 wwn_match++;
8804 else if (wwn1 == wwn2)
8805 wwn_match++;
8806 }
8807
8808 /* check destination WWN for a match */
8809 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8810 wwn1 = wwn_to_u64(fcport->port_name);
8811 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8812 if (wwn2 == (uint64_t)-1)
8813 wwn_match++;
8814 else if (wwn1 == wwn2)
8815 wwn_match++;
8816 }
8817
8818 if (pid_match == 2 || wwn_match == 2) {
8819 /* Found a matching entry */
8820 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8821 priority = pri_entry->tag;
8822 break;
8823 }
8824
8825 pri_entry++;
8826 }
8827
8828 return priority;
8829}
8830
8831/*
8832 * qla24xx_update_fcport_fcp_prio
8833 * Activates fcp priority for the logged in fc port
8834 *
8835 * Input:
21090cbe 8836 * vha = scsi host structure pointer.
09ff701a
SR
8837 * fcp = port structure pointer.
8838 *
8839 * Return:
8840 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8841 *
8842 * Context:
8843 * Kernel context.
8844 */
8845int
21090cbe 8846qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
8847{
8848 int ret;
f28a0a96 8849 int priority;
09ff701a
SR
8850 uint16_t mb[5];
8851
21090cbe
MI
8852 if (fcport->port_type != FCT_TARGET ||
8853 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
8854 return QLA_FUNCTION_FAILED;
8855
21090cbe 8856 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
8857 if (priority < 0)
8858 return QLA_FUNCTION_FAILED;
8859
7ec0effd 8860 if (IS_P3P_TYPE(vha->hw)) {
a00f6296
SK
8861 fcport->fcp_prio = priority & 0xf;
8862 return QLA_SUCCESS;
8863 }
8864
21090cbe 8865 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
8866 if (ret == QLA_SUCCESS) {
8867 if (fcport->fcp_prio != priority)
8868 ql_dbg(ql_dbg_user, vha, 0x709e,
8869 "Updated FCP_CMND priority - value=%d loop_id=%d "
8870 "port_id=%02x%02x%02x.\n", priority,
8871 fcport->loop_id, fcport->d_id.b.domain,
8872 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 8873 fcport->fcp_prio = priority & 0xf;
cfb0919c 8874 } else
7c3df132 8875 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
8876 "Unable to update FCP_CMND priority - ret=0x%x for "
8877 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8878 fcport->d_id.b.domain, fcport->d_id.b.area,
8879 fcport->d_id.b.al_pa);
09ff701a
SR
8880 return ret;
8881}
8882
8883/*
8884 * qla24xx_update_all_fcp_prio
8885 * Activates fcp priority for all the logged in ports
8886 *
8887 * Input:
8888 * ha = adapter block pointer.
8889 *
8890 * Return:
8891 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8892 *
8893 * Context:
8894 * Kernel context.
8895 */
8896int
8897qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8898{
8899 int ret;
8900 fc_port_t *fcport;
8901
8902 ret = QLA_FUNCTION_FAILED;
8903 /* We need to set priority for all logged in ports */
8904 list_for_each_entry(fcport, &vha->vp_fcports, list)
8905 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8906
8907 return ret;
8908}
d7459527 8909
82de802a
QT
8910struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8911 int vp_idx, bool startqp)
d7459527
MH
8912{
8913 int rsp_id = 0;
8914 int req_id = 0;
8915 int i;
8916 struct qla_hw_data *ha = vha->hw;
8917 uint16_t qpair_id = 0;
8918 struct qla_qpair *qpair = NULL;
8919 struct qla_msix_entry *msix;
8920
8921 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8922 ql_log(ql_log_warn, vha, 0x00181,
8923 "FW/Driver is not multi-queue capable.\n");
8924 return NULL;
8925 }
8926
c38d1baf 8927 if (ql2xmqsupport || ql2xnvmeenable) {
d7459527
MH
8928 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8929 if (qpair == NULL) {
8930 ql_log(ql_log_warn, vha, 0x0182,
8931 "Failed to allocate memory for queue pair.\n");
8932 return NULL;
8933 }
d7459527
MH
8934
8935 qpair->hw = vha->hw;
25ff6af1 8936 qpair->vha = vha;
82de802a
QT
8937 qpair->qp_lock_ptr = &qpair->qp_lock;
8938 spin_lock_init(&qpair->qp_lock);
af7bb382 8939 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
d7459527
MH
8940
8941 /* Assign available que pair id */
8942 mutex_lock(&ha->mq_lock);
8943 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
b95b9452 8944 if (ha->num_qpairs >= ha->max_qpairs) {
d7459527
MH
8945 mutex_unlock(&ha->mq_lock);
8946 ql_log(ql_log_warn, vha, 0x0183,
8947 "No resources to create additional q pair.\n");
8948 goto fail_qid_map;
8949 }
b95b9452 8950 ha->num_qpairs++;
d7459527
MH
8951 set_bit(qpair_id, ha->qpair_qid_map);
8952 ha->queue_pair_map[qpair_id] = qpair;
8953 qpair->id = qpair_id;
8954 qpair->vp_idx = vp_idx;
e6373f33 8955 qpair->fw_started = ha->flags.fw_started;
e326d22a 8956 INIT_LIST_HEAD(&qpair->hints_list);
7c3f8fd1
QT
8957 qpair->chip_reset = ha->base_qpair->chip_reset;
8958 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8959 qpair->enable_explicit_conf =
8960 ha->base_qpair->enable_explicit_conf;
d7459527
MH
8961
8962 for (i = 0; i < ha->msix_count; i++) {
093df737 8963 msix = &ha->msix_entries[i];
d7459527
MH
8964 if (msix->in_use)
8965 continue;
8966 qpair->msix = msix;
83548fe2 8967 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
d7459527
MH
8968 "Vector %x selected for qpair\n", msix->vector);
8969 break;
8970 }
8971 if (!qpair->msix) {
8972 ql_log(ql_log_warn, vha, 0x0184,
8973 "Out of MSI-X vectors!.\n");
8974 goto fail_msix;
8975 }
8976
8977 qpair->msix->in_use = 1;
8978 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8abfa9e2 8979 qpair->pdev = ha->pdev;
ecc89f25 8980 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
8abfa9e2 8981 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
d7459527
MH
8982
8983 mutex_unlock(&ha->mq_lock);
8984
8985 /* Create response queue first */
82de802a 8986 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
d7459527
MH
8987 if (!rsp_id) {
8988 ql_log(ql_log_warn, vha, 0x0185,
8989 "Failed to create response queue.\n");
8990 goto fail_rsp;
8991 }
8992
8993 qpair->rsp = ha->rsp_q_map[rsp_id];
8994
8995 /* Create request queue */
82de802a
QT
8996 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8997 startqp);
d7459527
MH
8998 if (!req_id) {
8999 ql_log(ql_log_warn, vha, 0x0186,
9000 "Failed to create request queue.\n");
9001 goto fail_req;
9002 }
9003
9004 qpair->req = ha->req_q_map[req_id];
9005 qpair->rsp->req = qpair->req;
82de802a 9006 qpair->rsp->qpair = qpair;
e326d22a
QT
9007 /* init qpair to this cpu. Will adjust at run time. */
9008 qla_cpu_update(qpair, smp_processor_id());
d7459527
MH
9009
9010 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9011 if (ha->fw_attributes & BIT_4)
9012 qpair->difdix_supported = 1;
9013 }
9014
9015 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9016 if (!qpair->srb_mempool) {
83548fe2 9017 ql_log(ql_log_warn, vha, 0xd036,
d7459527
MH
9018 "Failed to create srb mempool for qpair %d\n",
9019 qpair->id);
9020 goto fail_mempool;
9021 }
9022
9023 /* Mark as online */
9024 qpair->online = 1;
9025
9026 if (!vha->flags.qpairs_available)
9027 vha->flags.qpairs_available = 1;
9028
9029 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9030 "Request/Response queue pair created, id %d\n",
9031 qpair->id);
9032 ql_dbg(ql_dbg_init, vha, 0x0187,
9033 "Request/Response queue pair created, id %d\n",
9034 qpair->id);
9035 }
9036 return qpair;
9037
9038fail_mempool:
9039fail_req:
9040 qla25xx_delete_rsp_que(vha, qpair->rsp);
9041fail_rsp:
9042 mutex_lock(&ha->mq_lock);
9043 qpair->msix->in_use = 0;
9044 list_del(&qpair->qp_list_elem);
9045 if (list_empty(&vha->qp_list))
9046 vha->flags.qpairs_available = 0;
9047fail_msix:
9048 ha->queue_pair_map[qpair_id] = NULL;
9049 clear_bit(qpair_id, ha->qpair_qid_map);
b95b9452 9050 ha->num_qpairs--;
d7459527
MH
9051 mutex_unlock(&ha->mq_lock);
9052fail_qid_map:
9053 kfree(qpair);
9054 return NULL;
9055}
9056
9057int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9058{
d65237c7 9059 int ret = QLA_FUNCTION_FAILED;
d7459527
MH
9060 struct qla_hw_data *ha = qpair->hw;
9061
9062 qpair->delete_in_progress = 1;
9063 while (atomic_read(&qpair->ref_count))
9064 msleep(500);
9065
9066 ret = qla25xx_delete_req_que(vha, qpair->req);
9067 if (ret != QLA_SUCCESS)
9068 goto fail;
7867b98d 9069
d7459527
MH
9070 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9071 if (ret != QLA_SUCCESS)
9072 goto fail;
9073
9074 mutex_lock(&ha->mq_lock);
9075 ha->queue_pair_map[qpair->id] = NULL;
9076 clear_bit(qpair->id, ha->qpair_qid_map);
b95b9452 9077 ha->num_qpairs--;
d7459527 9078 list_del(&qpair->qp_list_elem);
d65237c7 9079 if (list_empty(&vha->qp_list)) {
d7459527 9080 vha->flags.qpairs_available = 0;
d65237c7
SC
9081 vha->flags.qpairs_req_created = 0;
9082 vha->flags.qpairs_rsp_created = 0;
9083 }
d7459527
MH
9084 mempool_destroy(qpair->srb_mempool);
9085 kfree(qpair);
9086 mutex_unlock(&ha->mq_lock);
9087
9088 return QLA_SUCCESS;
9089fail:
9090 return ret;
9091}