scsi: lpfc: Remove set but not used variable 'phys_id'
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4 32static int qla2x00_configure_fabric(scsi_qla_host_t *);
726b8548 33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
1da177e4 34static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 35
4d4df193
HK
36static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 38static int qla25xx_init_queues(struct qla_hw_data *);
a5d42f4c 39static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
726b8548
QT
40static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
a5d42f4c
DG
42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
a4239945 44static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
4d4df193 45
ac280b67
AV
46/* SRB Extensions ---------------------------------------------------------- */
47
9ba56b95 48void
8e5f4ba0 49qla2x00_sp_timeout(struct timer_list *t)
ac280b67 50{
8e5f4ba0 51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
4916392b 52 struct srb_iocb *iocb;
ac280b67
AV
53 struct req_que *req;
54 unsigned long flags;
bcc71cc3 55 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 56
ef801f07 57 WARN_ON_ONCE(irqs_disabled());
bcc71cc3 58 spin_lock_irqsave(&ha->hardware_lock, flags);
f6145e86 59 req = sp->qpair->req;
ac280b67 60 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 61 iocb = &sp->u.iocb_cmd;
bcc71cc3 62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4916392b 63 iocb->timeout(sp);
ac280b67
AV
64}
65
9ba56b95 66void
25ff6af1 67qla2x00_sp_free(void *ptr)
ac280b67 68{
25ff6af1 69 srb_t *sp = ptr;
9ba56b95 70 struct srb_iocb *iocb = &sp->u.iocb_cmd;
ac280b67 71
4d97cc53 72 del_timer(&iocb->timer);
25ff6af1 73 qla2x00_rel_sp(sp);
ac280b67
AV
74}
75
ac280b67
AV
76/* Asynchronous Login/Logout Routines -------------------------------------- */
77
a9b6f722 78unsigned long
5b91490e
AV
79qla2x00_get_async_timeout(struct scsi_qla_host *vha)
80{
81 unsigned long tmo;
82 struct qla_hw_data *ha = vha->hw;
83
84 /* Firmware should use switch negotiated r_a_tov for timeout. */
85 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
86 if (IS_QLAFX00(ha)) {
87 tmo = FX00_DEF_RATOV * 2;
88 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
89 /*
90 * Except for earlier ISPs where the timeout is seeded from the
91 * initialization control block.
92 */
93 tmo = ha->login_timeout;
94 }
95 return tmo;
96}
ac280b67 97
726b8548 98void
9ba56b95 99qla2x00_async_iocb_timeout(void *data)
ac280b67 100{
25ff6af1 101 srb_t *sp = data;
ac280b67 102 fc_port_t *fcport = sp->fcport;
726b8548 103 struct srb_iocb *lio = &sp->u.iocb_cmd;
f6145e86
QT
104 int rc, h;
105 unsigned long flags;
ac280b67 106
5c25d451
QT
107 if (fcport) {
108 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
109 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
110 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
111
6d674927 112 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
5c25d451
QT
113 } else {
114 pr_info("Async-%s timeout - hdl=%x.\n",
115 sp->name, sp->handle);
116 }
726b8548
QT
117
118 switch (sp->type) {
119 case SRB_LOGIN_CMD:
f6145e86
QT
120 rc = qla24xx_async_abort_cmd(sp, false);
121 if (rc) {
122 /* Retry as needed. */
123 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
124 lio->u.logio.data[1] =
125 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
126 QLA_LOGIO_LOGIN_RETRIED : 0;
127 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
128 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
129 h++) {
130 if (sp->qpair->req->outstanding_cmds[h] ==
131 sp) {
132 sp->qpair->req->outstanding_cmds[h] =
133 NULL;
134 break;
135 }
136 }
137 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
138 sp->done(sp, QLA_FUNCTION_TIMEOUT);
139 }
726b8548
QT
140 break;
141 case SRB_LOGOUT_CMD:
726b8548
QT
142 case SRB_CT_PTHRU_CMD:
143 case SRB_MB_IOCB:
144 case SRB_NACK_PLOGI:
145 case SRB_NACK_PRLI:
146 case SRB_NACK_LOGO:
2853192e 147 case SRB_CTRL_VP:
f6145e86
QT
148 rc = qla24xx_async_abort_cmd(sp, false);
149 if (rc) {
150 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
151 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
152 h++) {
153 if (sp->qpair->req->outstanding_cmds[h] ==
154 sp) {
155 sp->qpair->req->outstanding_cmds[h] =
156 NULL;
157 break;
158 }
159 }
160 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
161 sp->done(sp, QLA_FUNCTION_TIMEOUT);
162 }
726b8548 163 break;
6ac52608 164 }
ac280b67
AV
165}
166
99b0bec7 167static void
25ff6af1 168qla2x00_async_login_sp_done(void *ptr, int res)
99b0bec7 169{
25ff6af1
JC
170 srb_t *sp = ptr;
171 struct scsi_qla_host *vha = sp->vha;
9ba56b95 172 struct srb_iocb *lio = &sp->u.iocb_cmd;
726b8548 173 struct event_arg ea;
9ba56b95 174
83548fe2 175 ql_dbg(ql_dbg_disc, vha, 0x20dd,
25ff6af1 176 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
726b8548 177
6d674927
QT
178 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
179
726b8548
QT
180 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
181 memset(&ea, 0, sizeof(ea));
182 ea.event = FCME_PLOGI_DONE;
183 ea.fcport = sp->fcport;
184 ea.data[0] = lio->u.logio.data[0];
185 ea.data[1] = lio->u.logio.data[1];
186 ea.iop[0] = lio->u.logio.iop[0];
187 ea.iop[1] = lio->u.logio.iop[1];
188 ea.sp = sp;
189 qla2x00_fcport_event_handler(vha, &ea);
190 }
9ba56b95 191
25ff6af1 192 sp->free(sp);
99b0bec7
AV
193}
194
48acad09
QT
195static inline bool
196fcport_is_smaller(fc_port_t *fcport)
197{
198 if (wwn_to_u64(fcport->port_name) <
199 wwn_to_u64(fcport->vha->port_name))
200 return true;
201 else
202 return false;
203}
204
205static inline bool
206fcport_is_bigger(fc_port_t *fcport)
207{
208 return !fcport_is_smaller(fcport);
209}
210
ac280b67
AV
211int
212qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
213 uint16_t *data)
214{
ac280b67 215 srb_t *sp;
4916392b 216 struct srb_iocb *lio;
726b8548
QT
217 int rval = QLA_FUNCTION_FAILED;
218
219 if (!vha->flags.online)
220 goto done;
221
9ba56b95 222 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
223 if (!sp)
224 goto done;
225
726b8548
QT
226 fcport->flags |= FCF_ASYNC_SENT;
227 fcport->logout_completed = 0;
228
a4239945 229 fcport->disc_state = DSC_LOGIN_PEND;
9ba56b95
GM
230 sp->type = SRB_LOGIN_CMD;
231 sp->name = "login";
a4239945
QT
232 sp->gen1 = fcport->rscn_gen;
233 sp->gen2 = fcport->login_gen;
9ba56b95
GM
234
235 lio = &sp->u.iocb_cmd;
3822263e 236 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
237 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
238
9ba56b95 239 sp->done = qla2x00_async_login_sp_done;
835aa4f2 240 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
48acad09 241 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
835aa4f2 242 else
48acad09 243 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
a5d42f4c 244
835aa4f2
GM
245 if (fcport->fc4f_nvme)
246 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
a5d42f4c 247
9fe278f4
GM
248 ql_dbg(ql_dbg_disc, vha, 0x2072,
249 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
250 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
251 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
252 fcport->login_retry);
253
ac280b67 254 rval = qla2x00_start_sp(sp);
080c9517 255 if (rval != QLA_SUCCESS) {
080c9517
CD
256 fcport->flags |= FCF_LOGIN_NEEDED;
257 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ac280b67 258 goto done_free_sp;
080c9517 259 }
ac280b67 260
ac280b67
AV
261 return rval;
262
263done_free_sp:
25ff6af1 264 sp->free(sp);
726b8548 265 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 266done:
fa83e658 267 fcport->flags &= ~FCF_ASYNC_ACTIVE;
ac280b67
AV
268 return rval;
269}
270
99b0bec7 271static void
25ff6af1 272qla2x00_async_logout_sp_done(void *ptr, int res)
99b0bec7 273{
25ff6af1 274 srb_t *sp = ptr;
9ba56b95 275
6d674927 276 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
eaf75d18
QT
277 sp->fcport->login_gen++;
278 qlt_logo_completion_handler(sp->fcport, res);
25ff6af1 279 sp->free(sp);
99b0bec7
AV
280}
281
ac280b67
AV
282int
283qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
284{
ac280b67 285 srb_t *sp;
4916392b 286 struct srb_iocb *lio;
3dbec59b
QT
287 int rval = QLA_FUNCTION_FAILED;
288
289 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
290 return rval;
ac280b67 291
726b8548 292 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 293 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
294 if (!sp)
295 goto done;
296
9ba56b95
GM
297 sp->type = SRB_LOGOUT_CMD;
298 sp->name = "logout";
9ba56b95
GM
299
300 lio = &sp->u.iocb_cmd;
3822263e 301 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
302 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
303
9ba56b95 304 sp->done = qla2x00_async_logout_sp_done;
ac280b67 305
7c3df132 306 ql_dbg(ql_dbg_disc, vha, 0x2070,
726b8548 307 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
cfb0919c 308 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
726b8548
QT
309 fcport->d_id.b.area, fcport->d_id.b.al_pa,
310 fcport->port_name);
9fe278f4
GM
311
312 rval = qla2x00_start_sp(sp);
313 if (rval != QLA_SUCCESS)
314 goto done_free_sp;
ac280b67
AV
315 return rval;
316
317done_free_sp:
25ff6af1 318 sp->free(sp);
ac280b67 319done:
fa83e658 320 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ac280b67
AV
321 return rval;
322}
11aea16a
QT
323
324void
325qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
326 uint16_t *data)
327{
fa83e658 328 fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
329 /* Don't re-login in target mode */
330 if (!fcport->tgt_session)
331 qla2x00_mark_device_lost(vha, fcport, 1, 0);
332 qlt_logo_completion_handler(fcport, data[0]);
333}
334
335static void
336qla2x00_async_prlo_sp_done(void *s, int res)
337{
338 srb_t *sp = (srb_t *)s;
339 struct srb_iocb *lio = &sp->u.iocb_cmd;
340 struct scsi_qla_host *vha = sp->vha;
341
fa83e658 342 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
343 if (!test_bit(UNLOADING, &vha->dpc_flags))
344 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
345 lio->u.logio.data);
346 sp->free(sp);
347}
348
349int
350qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
351{
352 srb_t *sp;
353 struct srb_iocb *lio;
354 int rval;
355
356 rval = QLA_FUNCTION_FAILED;
357 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
358 if (!sp)
359 goto done;
360
361 sp->type = SRB_PRLO_CMD;
362 sp->name = "prlo";
11aea16a
QT
363
364 lio = &sp->u.iocb_cmd;
365 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
366 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
367
11aea16a
QT
368 sp->done = qla2x00_async_prlo_sp_done;
369 rval = qla2x00_start_sp(sp);
370 if (rval != QLA_SUCCESS)
371 goto done_free_sp;
372
373 ql_dbg(ql_dbg_disc, vha, 0x2070,
374 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
375 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
376 fcport->d_id.b.area, fcport->d_id.b.al_pa);
377 return rval;
378
379done_free_sp:
380 sp->free(sp);
381done:
fa83e658 382 fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
383 return rval;
384}
385
f13515ac
QT
386static
387void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
388{
0616e965
QT
389 struct fc_port *fcport = ea->fcport;
390
391 ql_dbg(ql_dbg_disc, vha, 0x20d2,
392 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
393 __func__, fcport->port_name, fcport->disc_state,
394 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
395 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
396
397 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
a4239945
QT
398 ql_dbg(ql_dbg_disc, vha, 0x2066,
399 "%s %8phC: adisc fail: post delete\n",
400 __func__, ea->fcport->port_name);
2d3fdbeb
QT
401 /* deleted = 0 & logout_on_delete = force fw cleanup */
402 fcport->deleted = 0;
403 fcport->logout_on_delete = 1;
94cff6e1 404 qlt_schedule_sess_for_deletion(ea->fcport);
a4239945
QT
405 return;
406 }
a4239945
QT
407
408 if (ea->fcport->disc_state == DSC_DELETE_PEND)
409 return;
410
411 if (ea->sp->gen2 != ea->fcport->login_gen) {
412 /* target side must have changed it. */
413 ql_dbg(ql_dbg_disc, vha, 0x20d3,
0616e965
QT
414 "%s %8phC generation changed\n",
415 __func__, ea->fcport->port_name);
a4239945
QT
416 return;
417 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
d4f7a16a 418 qla_rscn_replay(fcport);
861d483d 419 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
420 return;
421 }
422
423 __qla24xx_handle_gpdb_event(vha, ea);
f13515ac 424}
ac280b67 425
8f9a2148 426static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
8777e431
QT
427{
428 struct qla_work_evt *e;
429
430 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
431 if (!e)
432 return QLA_FUNCTION_FAILED;
433
434 e->u.fcport.fcport = fcport;
435 fcport->flags |= FCF_ASYNC_ACTIVE;
436 return qla2x00_post_work(vha, e);
437}
438
5ff1d584 439static void
25ff6af1 440qla2x00_async_adisc_sp_done(void *ptr, int res)
5ff1d584 441{
25ff6af1
JC
442 srb_t *sp = ptr;
443 struct scsi_qla_host *vha = sp->vha;
f13515ac 444 struct event_arg ea;
0616e965 445 struct srb_iocb *lio = &sp->u.iocb_cmd;
f13515ac
QT
446
447 ql_dbg(ql_dbg_disc, vha, 0x2066,
448 "Async done-%s res %x %8phC\n",
449 sp->name, res, sp->fcport->port_name);
450
15b6c3c9 451 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
fa83e658 452
f13515ac
QT
453 memset(&ea, 0, sizeof(ea));
454 ea.event = FCME_ADISC_DONE;
455 ea.rc = res;
0616e965
QT
456 ea.data[0] = lio->u.logio.data[0];
457 ea.data[1] = lio->u.logio.data[1];
458 ea.iop[0] = lio->u.logio.iop[0];
459 ea.iop[1] = lio->u.logio.iop[1];
f13515ac
QT
460 ea.fcport = sp->fcport;
461 ea.sp = sp;
462
463 qla2x00_fcport_event_handler(vha, &ea);
9ba56b95 464
25ff6af1 465 sp->free(sp);
5ff1d584
AV
466}
467
468int
469qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
470 uint16_t *data)
471{
5ff1d584 472 srb_t *sp;
4916392b 473 struct srb_iocb *lio;
5ff1d584
AV
474 int rval;
475
476 rval = QLA_FUNCTION_FAILED;
726b8548 477 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 478 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
479 if (!sp)
480 goto done;
481
9ba56b95
GM
482 sp->type = SRB_ADISC_CMD;
483 sp->name = "adisc";
9ba56b95
GM
484
485 lio = &sp->u.iocb_cmd;
3822263e 486 lio->timeout = qla2x00_async_iocb_timeout;
8777e431
QT
487 sp->gen1 = fcport->rscn_gen;
488 sp->gen2 = fcport->login_gen;
e74e7d95
BH
489 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
490
9ba56b95 491 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 492 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 493 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584 494
7c3df132 495 ql_dbg(ql_dbg_disc, vha, 0x206f,
f13515ac
QT
496 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
497 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
9fe278f4
GM
498
499 rval = qla2x00_start_sp(sp);
500 if (rval != QLA_SUCCESS)
501 goto done_free_sp;
502
5ff1d584
AV
503 return rval;
504
505done_free_sp:
25ff6af1 506 sp->free(sp);
5ff1d584 507done:
fa83e658 508 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
f13515ac 509 qla2x00_post_async_adisc_work(vha, fcport, data);
726b8548
QT
510 return rval;
511}
512
513static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
514 struct event_arg *ea)
515{
516 fc_port_t *fcport, *conflict_fcport;
517 struct get_name_list_extended *e;
518 u16 i, n, found = 0, loop_id;
519 port_id_t id;
520 u64 wwn;
a4239945
QT
521 u16 data[2];
522 u8 current_login_state;
726b8548
QT
523
524 fcport = ea->fcport;
f352eeb7
QT
525 ql_dbg(ql_dbg_disc, vha, 0xffff,
526 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
527 __func__, fcport->port_name, fcport->disc_state,
528 fcport->fw_login_state, ea->rc,
529 fcport->login_gen, fcport->last_login_gen,
530 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
726b8548 531
a4239945
QT
532 if (fcport->disc_state == DSC_DELETE_PEND)
533 return;
534
726b8548
QT
535 if (ea->rc) { /* rval */
536 if (fcport->login_retry == 0) {
83548fe2
QT
537 ql_dbg(ql_dbg_disc, vha, 0x20de,
538 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
539 fcport->port_name, fcport->login_retry);
726b8548
QT
540 }
541 return;
542 }
543
544 if (fcport->last_rscn_gen != fcport->rscn_gen) {
d4f7a16a 545 qla_rscn_replay(fcport);
861d483d 546 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
547 return;
548 } else if (fcport->last_login_gen != fcport->login_gen) {
83548fe2 549 ql_dbg(ql_dbg_disc, vha, 0x20e0,
f352eeb7
QT
550 "%s %8phC login gen changed\n",
551 __func__, fcport->port_name);
726b8548
QT
552 return;
553 }
554
555 n = ea->data[0] / sizeof(struct get_name_list_extended);
556
83548fe2 557 ql_dbg(ql_dbg_disc, vha, 0x20e1,
726b8548
QT
558 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
559 __func__, __LINE__, fcport->port_name, n,
560 fcport->d_id.b.domain, fcport->d_id.b.area,
561 fcport->d_id.b.al_pa, fcport->loop_id);
562
563 for (i = 0; i < n; i++) {
564 e = &vha->gnl.l[i];
565 wwn = wwn_to_u64(e->port_name);
48acad09
QT
566 id.b.domain = e->port_id[2];
567 id.b.area = e->port_id[1];
568 id.b.al_pa = e->port_id[0];
569 id.b.rsvd_1 = 0;
726b8548
QT
570
571 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
572 continue;
573
48acad09
QT
574 if (IS_SW_RESV_ADDR(id))
575 continue;
576
726b8548 577 found = 1;
726b8548
QT
578
579 loop_id = le16_to_cpu(e->nport_handle);
580 loop_id = (loop_id & 0x7fff);
8777e431
QT
581 if (fcport->fc4f_nvme)
582 current_login_state = e->current_login_state >> 4;
583 else
584 current_login_state = e->current_login_state & 0xf;
585
726b8548 586
83548fe2 587 ql_dbg(ql_dbg_disc, vha, 0x20e2,
8777e431 588 "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
83548fe2
QT
589 __func__, fcport->port_name,
590 e->current_login_state, fcport->fw_login_state,
8777e431 591 fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
83548fe2
QT
592 fcport->d_id.b.domain, fcport->d_id.b.area,
593 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
726b8548 594
48acad09
QT
595 switch (fcport->disc_state) {
596 case DSC_DELETE_PEND:
597 case DSC_DELETED:
598 break;
599 default:
8777e431
QT
600 if ((id.b24 != fcport->d_id.b24 &&
601 fcport->d_id.b24) ||
602 (fcport->loop_id != FC_NO_LOOP_ID &&
603 fcport->loop_id != loop_id)) {
604 ql_dbg(ql_dbg_disc, vha, 0x20e3,
605 "%s %d %8phC post del sess\n",
606 __func__, __LINE__, fcport->port_name);
48acad09
QT
607 qlt_schedule_sess_for_deletion(fcport);
608 return;
609 }
610 break;
726b8548
QT
611 }
612
613 fcport->loop_id = loop_id;
614
615 wwn = wwn_to_u64(fcport->port_name);
616 qlt_find_sess_invalidate_other(vha, wwn,
617 id, loop_id, &conflict_fcport);
618
619 if (conflict_fcport) {
620 /*
621 * Another share fcport share the same loop_id &
622 * nport id. Conflict fcport needs to finish
623 * cleanup before this fcport can proceed to login.
624 */
625 conflict_fcport->conflict = fcport;
626 fcport->login_pause = 1;
627 }
628
48acad09
QT
629 switch (vha->hw->current_topology) {
630 default:
631 switch (current_login_state) {
632 case DSC_LS_PRLI_COMP:
633 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
634 vha, 0x20e4, "%s %d %8phC post gpdb\n",
635 __func__, __LINE__, fcport->port_name);
a4239945 636
48acad09
QT
637 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
638 fcport->port_type = FCT_INITIATOR;
639 else
640 fcport->port_type = FCT_TARGET;
641 data[0] = data[1] = 0;
642 qla2x00_post_async_adisc_work(vha, fcport,
643 data);
644 break;
645 case DSC_LS_PORT_UNAVAIL:
646 default:
647 if (fcport->loop_id != FC_NO_LOOP_ID)
648 qla2x00_clear_loop_id(fcport);
a4239945 649
48acad09 650 fcport->loop_id = loop_id;
726b8548 651 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
48acad09
QT
652 qla24xx_fcport_handle_login(vha, fcport);
653 break;
726b8548 654 }
726b8548 655 break;
48acad09 656 case ISP_CFG_N:
8777e431
QT
657 fcport->fw_login_state = current_login_state;
658 fcport->d_id = id;
48acad09
QT
659 switch (current_login_state) {
660 case DSC_LS_PRLI_COMP:
661 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
662 fcport->port_type = FCT_INITIATOR;
663 else
664 fcport->port_type = FCT_TARGET;
665
666 data[0] = data[1] = 0;
667 qla2x00_post_async_adisc_work(vha, fcport,
668 data);
669 break;
670 case DSC_LS_PLOGI_COMP:
671 if (fcport_is_bigger(fcport)) {
672 /* local adapter is smaller */
673 if (fcport->loop_id != FC_NO_LOOP_ID)
674 qla2x00_clear_loop_id(fcport);
675
676 fcport->loop_id = loop_id;
677 qla24xx_fcport_handle_login(vha,
678 fcport);
679 break;
680 }
50435d42 681 /* fall through */
48acad09
QT
682 default:
683 if (fcport_is_smaller(fcport)) {
684 /* local adapter is bigger */
685 if (fcport->loop_id != FC_NO_LOOP_ID)
686 qla2x00_clear_loop_id(fcport);
687
688 fcport->loop_id = loop_id;
689 qla24xx_fcport_handle_login(vha,
690 fcport);
691 }
692 break;
693 }
694 break;
695 } /* switch (ha->current_topology) */
726b8548
QT
696 }
697
698 if (!found) {
48acad09
QT
699 switch (vha->hw->current_topology) {
700 case ISP_CFG_F:
701 case ISP_CFG_FL:
702 for (i = 0; i < n; i++) {
703 e = &vha->gnl.l[i];
704 id.b.domain = e->port_id[0];
705 id.b.area = e->port_id[1];
706 id.b.al_pa = e->port_id[2];
707 id.b.rsvd_1 = 0;
708 loop_id = le16_to_cpu(e->nport_handle);
709
710 if (fcport->d_id.b24 == id.b24) {
711 conflict_fcport =
712 qla2x00_find_fcport_by_wwpn(vha,
713 e->port_name, 0);
72f02ba6
LT
714 if (conflict_fcport) {
715 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
716 vha, 0x20e5,
717 "%s %d %8phC post del sess\n",
718 __func__, __LINE__,
719 conflict_fcport->port_name);
720 qlt_schedule_sess_for_deletion
721 (conflict_fcport);
722 }
36eb8ff6 723 }
48acad09
QT
724 /*
725 * FW already picked this loop id for
726 * another fcport
727 */
728 if (fcport->loop_id == loop_id)
729 fcport->loop_id = FC_NO_LOOP_ID;
726b8548 730 }
48acad09
QT
731 qla24xx_fcport_handle_login(vha, fcport);
732 break;
733 case ISP_CFG_N:
8777e431
QT
734 fcport->disc_state = DSC_DELETED;
735 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
736 if (fcport->n2n_link_reset_cnt < 2) {
737 fcport->n2n_link_reset_cnt++;
738 /*
739 * remote port is not sending PLOGI.
740 * Reset link to kick start his state
741 * machine
742 */
743 set_bit(N2N_LINK_RESET,
744 &vha->dpc_flags);
745 } else {
746 if (fcport->n2n_chip_reset < 1) {
747 ql_log(ql_log_info, vha, 0x705d,
748 "Chip reset to bring laser down");
749 set_bit(ISP_ABORT_NEEDED,
750 &vha->dpc_flags);
751 fcport->n2n_chip_reset++;
752 } else {
753 ql_log(ql_log_info, vha, 0x705d,
754 "Remote port %8ph is not coming back\n",
755 fcport->port_name);
756 fcport->scan_state = 0;
757 }
758 }
759 qla2xxx_wake_dpc(vha);
760 } else {
761 /*
762 * report port suppose to do PLOGI. Give him
763 * more time. FW will catch it.
764 */
765 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
766 }
48acad09
QT
767 break;
768 default:
769 break;
726b8548 770 }
726b8548
QT
771 }
772} /* gnl_event */
773
774static void
25ff6af1 775qla24xx_async_gnl_sp_done(void *s, int res)
726b8548 776{
25ff6af1
JC
777 struct srb *sp = s;
778 struct scsi_qla_host *vha = sp->vha;
726b8548
QT
779 unsigned long flags;
780 struct fc_port *fcport = NULL, *tf;
781 u16 i, n = 0, loop_id;
782 struct event_arg ea;
783 struct get_name_list_extended *e;
784 u64 wwn;
785 struct list_head h;
a4239945 786 bool found = false;
726b8548 787
83548fe2 788 ql_dbg(ql_dbg_disc, vha, 0x20e7,
726b8548
QT
789 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
790 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
791 sp->u.iocb_cmd.u.mbx.in_mb[2]);
792
ef801f07
HM
793 if (res == QLA_FUNCTION_TIMEOUT)
794 return;
795
0aca7784 796 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
726b8548
QT
797 memset(&ea, 0, sizeof(ea));
798 ea.sp = sp;
799 ea.rc = res;
800 ea.event = FCME_GNL_DONE;
801
802 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
803 sizeof(struct get_name_list_extended)) {
804 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
805 sizeof(struct get_name_list_extended);
806 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
807 }
808
809 for (i = 0; i < n; i++) {
810 e = &vha->gnl.l[i];
811 loop_id = le16_to_cpu(e->nport_handle);
812 /* mask out reserve bit */
813 loop_id = (loop_id & 0x7fff);
814 set_bit(loop_id, vha->hw->loop_id_map);
815 wwn = wwn_to_u64(e->port_name);
816
83548fe2 817 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
726b8548
QT
818 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
819 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
820 e->port_id[0], e->current_login_state, e->last_login_state,
821 (loop_id & 0x7fff));
822 }
823
0aca7784 824 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
825
826 INIT_LIST_HEAD(&h);
827 fcport = tf = NULL;
828 if (!list_empty(&vha->gnl.fcports))
829 list_splice_init(&vha->gnl.fcports, &h);
0aca7784 830 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
831
832 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
833 list_del_init(&fcport->gnl_entry);
0aca7784 834 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6d674927 835 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0aca7784 836 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
837 ea.fcport = fcport;
838
839 qla2x00_fcport_event_handler(vha, &ea);
840 }
841
a4239945
QT
842 /* create new fcport if fw has knowledge of new sessions */
843 for (i = 0; i < n; i++) {
844 port_id_t id;
845 u64 wwnn;
846
847 e = &vha->gnl.l[i];
848 wwn = wwn_to_u64(e->port_name);
849
850 found = false;
851 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
852 if (!memcmp((u8 *)&wwn, fcport->port_name,
853 WWN_SIZE)) {
854 found = true;
855 break;
856 }
857 }
858
cf055fb0 859 id.b.domain = e->port_id[2];
a4239945 860 id.b.area = e->port_id[1];
cf055fb0 861 id.b.al_pa = e->port_id[0];
a4239945
QT
862 id.b.rsvd_1 = 0;
863
864 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
865 ql_dbg(ql_dbg_disc, vha, 0x2065,
cf055fb0
QT
866 "%s %d %8phC %06x post new sess\n",
867 __func__, __LINE__, (u8 *)&wwn, id.b24);
a4239945
QT
868 wwnn = wwn_to_u64(e->node_name);
869 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
870 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
871 }
872 }
873
0aca7784
QT
874 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
875 vha->gnl.sent = 0;
726b8548
QT
876 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
877
25ff6af1 878 sp->free(sp);
726b8548
QT
879}
880
881int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
882{
883 srb_t *sp;
884 struct srb_iocb *mbx;
885 int rval = QLA_FUNCTION_FAILED;
886 unsigned long flags;
887 u16 *mb;
888
3dbec59b
QT
889 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
890 return rval;
726b8548 891
83548fe2 892 ql_dbg(ql_dbg_disc, vha, 0x20d9,
726b8548
QT
893 "Async-gnlist WWPN %8phC \n", fcport->port_name);
894
0aca7784
QT
895 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
896 fcport->flags |= FCF_ASYNC_SENT;
726b8548
QT
897 fcport->disc_state = DSC_GNL;
898 fcport->last_rscn_gen = fcport->rscn_gen;
899 fcport->last_login_gen = fcport->login_gen;
900
901 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
0aca7784
QT
902 if (vha->gnl.sent) {
903 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
904 return QLA_SUCCESS;
905 }
906 vha->gnl.sent = 1;
907 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
726b8548
QT
908
909 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
910 if (!sp)
911 goto done;
3dbec59b 912
726b8548
QT
913 sp->type = SRB_MB_IOCB;
914 sp->name = "gnlist";
915 sp->gen1 = fcport->rscn_gen;
916 sp->gen2 = fcport->login_gen;
917
e74e7d95
BH
918 mbx = &sp->u.iocb_cmd;
919 mbx->timeout = qla2x00_async_iocb_timeout;
726b8548
QT
920 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
921
922 mb = sp->u.iocb_cmd.u.mbx.out_mb;
923 mb[0] = MBC_PORT_NODE_NAME_LIST;
924 mb[1] = BIT_2 | BIT_3;
925 mb[2] = MSW(vha->gnl.ldma);
926 mb[3] = LSW(vha->gnl.ldma);
927 mb[6] = MSW(MSD(vha->gnl.ldma));
928 mb[7] = LSW(MSD(vha->gnl.ldma));
929 mb[8] = vha->gnl.size;
930 mb[9] = vha->vp_idx;
931
726b8548
QT
932 sp->done = qla24xx_async_gnl_sp_done;
933
934 rval = qla2x00_start_sp(sp);
935 if (rval != QLA_SUCCESS)
936 goto done_free_sp;
937
83548fe2
QT
938 ql_dbg(ql_dbg_disc, vha, 0x20da,
939 "Async-%s - OUT WWPN %8phC hndl %x\n",
940 sp->name, fcport->port_name, sp->handle);
726b8548
QT
941
942 return rval;
943
944done_free_sp:
25ff6af1 945 sp->free(sp);
726b8548 946 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 947done:
726b8548
QT
948 return rval;
949}
950
951int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
952{
953 struct qla_work_evt *e;
954
955 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
956 if (!e)
957 return QLA_FUNCTION_FAILED;
958
959 e->u.fcport.fcport = fcport;
6d674927 960 fcport->flags |= FCF_ASYNC_ACTIVE;
726b8548
QT
961 return qla2x00_post_work(vha, e);
962}
963
964static
25ff6af1 965void qla24xx_async_gpdb_sp_done(void *s, int res)
726b8548 966{
25ff6af1
JC
967 struct srb *sp = s;
968 struct scsi_qla_host *vha = sp->vha;
726b8548 969 struct qla_hw_data *ha = vha->hw;
726b8548
QT
970 fc_port_t *fcport = sp->fcport;
971 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
726b8548
QT
972 struct event_arg ea;
973
83548fe2 974 ql_dbg(ql_dbg_disc, vha, 0x20db,
726b8548
QT
975 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
976 sp->name, res, fcport->port_name, mb[1], mb[2]);
977
bcc71cc3
GM
978 if (res == QLA_FUNCTION_TIMEOUT) {
979 dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
980 sp->u.iocb_cmd.u.mbx.in_dma);
981 return;
982 }
983
ef801f07 984 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
726b8548
QT
985 memset(&ea, 0, sizeof(ea));
986 ea.event = FCME_GPDB_DONE;
726b8548
QT
987 ea.fcport = fcport;
988 ea.sp = sp;
989
990 qla2x00_fcport_event_handler(vha, &ea);
991
992 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
993 sp->u.iocb_cmd.u.mbx.in_dma);
994
25ff6af1 995 sp->free(sp);
726b8548
QT
996}
997
a5d42f4c
DG
998static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
999{
1000 struct qla_work_evt *e;
1001
1002 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1003 if (!e)
1004 return QLA_FUNCTION_FAILED;
1005
1006 e->u.fcport.fcport = fcport;
1007
1008 return qla2x00_post_work(vha, e);
1009}
1010
1011static void
1012qla2x00_async_prli_sp_done(void *ptr, int res)
1013{
1014 srb_t *sp = ptr;
1015 struct scsi_qla_host *vha = sp->vha;
1016 struct srb_iocb *lio = &sp->u.iocb_cmd;
1017 struct event_arg ea;
1018
1019 ql_dbg(ql_dbg_disc, vha, 0x2129,
1020 "%s %8phC res %d \n", __func__,
1021 sp->fcport->port_name, res);
1022
1023 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1024
1025 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1026 memset(&ea, 0, sizeof(ea));
1027 ea.event = FCME_PRLI_DONE;
1028 ea.fcport = sp->fcport;
1029 ea.data[0] = lio->u.logio.data[0];
1030 ea.data[1] = lio->u.logio.data[1];
1031 ea.iop[0] = lio->u.logio.iop[0];
1032 ea.iop[1] = lio->u.logio.iop[1];
1033 ea.sp = sp;
1034
1035 qla2x00_fcport_event_handler(vha, &ea);
1036 }
1037
1038 sp->free(sp);
1039}
1040
1041int
1042qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1043{
1044 srb_t *sp;
1045 struct srb_iocb *lio;
1046 int rval = QLA_FUNCTION_FAILED;
1047
1048 if (!vha->flags.online)
1049 return rval;
1050
1051 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
a5d42f4c
DG
1052 fcport->fw_login_state == DSC_LS_PRLI_PEND)
1053 return rval;
1054
1055 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1056 if (!sp)
1057 return rval;
1058
1059 fcport->flags |= FCF_ASYNC_SENT;
1060 fcport->logout_completed = 0;
1061
1062 sp->type = SRB_PRLI_CMD;
1063 sp->name = "prli";
a5d42f4c
DG
1064
1065 lio = &sp->u.iocb_cmd;
1066 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
1067 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1068
a5d42f4c
DG
1069 sp->done = qla2x00_async_prli_sp_done;
1070 lio->u.logio.flags = 0;
1071
1072 if (fcport->fc4f_nvme)
1073 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1074
1075 rval = qla2x00_start_sp(sp);
1076 if (rval != QLA_SUCCESS) {
a5d42f4c
DG
1077 fcport->flags |= FCF_LOGIN_NEEDED;
1078 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1079 goto done_free_sp;
1080 }
1081
1082 ql_dbg(ql_dbg_disc, vha, 0x211b,
8777e431
QT
1083 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
1084 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1085 fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
a5d42f4c
DG
1086
1087 return rval;
1088
1089done_free_sp:
1090 sp->free(sp);
1091 fcport->flags &= ~FCF_ASYNC_SENT;
1092 return rval;
1093}
1094
a07fc0a4 1095int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
726b8548
QT
1096{
1097 struct qla_work_evt *e;
1098
1099 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1100 if (!e)
1101 return QLA_FUNCTION_FAILED;
1102
1103 e->u.fcport.fcport = fcport;
1104 e->u.fcport.opt = opt;
6d674927 1105 fcport->flags |= FCF_ASYNC_ACTIVE;
726b8548
QT
1106 return qla2x00_post_work(vha, e);
1107}
1108
1109int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1110{
1111 srb_t *sp;
1112 struct srb_iocb *mbx;
1113 int rval = QLA_FUNCTION_FAILED;
1114 u16 *mb;
1115 dma_addr_t pd_dma;
1116 struct port_database_24xx *pd;
1117 struct qla_hw_data *ha = vha->hw;
1118
3dbec59b
QT
1119 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1120 return rval;
726b8548 1121
726b8548
QT
1122 fcport->disc_state = DSC_GPDB;
1123
1124 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1125 if (!sp)
1126 goto done;
1127
3dbec59b 1128 fcport->flags |= FCF_ASYNC_SENT;
e0824e69
JC
1129 sp->type = SRB_MB_IOCB;
1130 sp->name = "gpdb";
1131 sp->gen1 = fcport->rscn_gen;
1132 sp->gen2 = fcport->login_gen;
e74e7d95
BH
1133
1134 mbx = &sp->u.iocb_cmd;
1135 mbx->timeout = qla2x00_async_iocb_timeout;
e0824e69
JC
1136 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1137
08eb7f45 1138 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
726b8548 1139 if (pd == NULL) {
83548fe2
QT
1140 ql_log(ql_log_warn, vha, 0xd043,
1141 "Failed to allocate port database structure.\n");
726b8548
QT
1142 goto done_free_sp;
1143 }
726b8548 1144
726b8548
QT
1145 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1146 mb[0] = MBC_GET_PORT_DATABASE;
1147 mb[1] = fcport->loop_id;
1148 mb[2] = MSW(pd_dma);
1149 mb[3] = LSW(pd_dma);
1150 mb[6] = MSW(MSD(pd_dma));
1151 mb[7] = LSW(MSD(pd_dma));
1152 mb[9] = vha->vp_idx;
1153 mb[10] = opt;
1154
726b8548
QT
1155 mbx->u.mbx.in = (void *)pd;
1156 mbx->u.mbx.in_dma = pd_dma;
1157
1158 sp->done = qla24xx_async_gpdb_sp_done;
1159
83548fe2
QT
1160 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1161 "Async-%s %8phC hndl %x opt %x\n",
1162 sp->name, fcport->port_name, sp->handle, opt);
726b8548 1163
9fe278f4
GM
1164 rval = qla2x00_start_sp(sp);
1165 if (rval != QLA_SUCCESS)
1166 goto done_free_sp;
726b8548
QT
1167 return rval;
1168
1169done_free_sp:
1170 if (pd)
1171 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1172
25ff6af1 1173 sp->free(sp);
726b8548 1174 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 1175done:
726b8548 1176 qla24xx_post_gpdb_work(vha, fcport, opt);
5ff1d584
AV
1177 return rval;
1178}
1179
726b8548 1180static
a4239945 1181void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1182{
726b8548
QT
1183 unsigned long flags;
1184
726b8548 1185 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
f13515ac 1186 ea->fcport->login_gen++;
726b8548
QT
1187 ea->fcport->deleted = 0;
1188 ea->fcport->logout_on_delete = 1;
1189
1190 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1191 vha->fcport_count++;
1192 ea->fcport->login_succ = 1;
1193
0aca7784 1194 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
cd4ed6b4 1195 qla24xx_sched_upd_fcport(ea->fcport);
0aca7784 1196 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
414d9ff3
QT
1197 } else if (ea->fcport->login_succ) {
1198 /*
1199 * We have an existing session. A late RSCN delivery
1200 * must have triggered the session to be re-validate.
a4239945 1201 * Session is still valid.
414d9ff3 1202 */
5ef696aa
QT
1203 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1204 "%s %d %8phC session revalidate success\n",
a4239945 1205 __func__, __LINE__, ea->fcport->port_name);
8a7eac2f 1206 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
726b8548
QT
1207 }
1208 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
a4239945
QT
1209}
1210
1211static
1212void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1213{
a4239945
QT
1214 fc_port_t *fcport = ea->fcport;
1215 struct port_database_24xx *pd;
1216 struct srb *sp = ea->sp;
2b5b9647 1217 uint8_t ls;
a4239945
QT
1218
1219 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1220
1221 fcport->flags &= ~FCF_ASYNC_SENT;
1222
1223 ql_dbg(ql_dbg_disc, vha, 0x20d2,
8777e431
QT
1224 "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
1225 fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
1226 ea->rc);
a4239945
QT
1227
1228 if (fcport->disc_state == DSC_DELETE_PEND)
1229 return;
726b8548 1230
2b5b9647
DT
1231 if (fcport->fc4f_nvme)
1232 ls = pd->current_login_state >> 4;
1233 else
1234 ls = pd->current_login_state & 0xf;
1235
d4f7a16a
HM
1236 if (ea->sp->gen2 != fcport->login_gen) {
1237 /* target side must have changed it. */
1238
1239 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1240 "%s %8phC generation changed\n",
1241 __func__, fcport->port_name);
1242 return;
1243 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1244 qla_rscn_replay(fcport);
861d483d 1245 qlt_schedule_sess_for_deletion(fcport);
d4f7a16a
HM
1246 return;
1247 }
1248
2b5b9647 1249 switch (ls) {
a4239945
QT
1250 case PDS_PRLI_COMPLETE:
1251 __qla24xx_parse_gpdb(vha, fcport, pd);
1252 break;
1253 case PDS_PLOGI_PENDING:
1254 case PDS_PLOGI_COMPLETE:
1255 case PDS_PRLI_PENDING:
1256 case PDS_PRLI2_PENDING:
8fde6977
QT
1257 /* Set discovery state back to GNL to Relogin attempt */
1258 if (qla_dual_mode_enabled(vha) ||
1259 qla_ini_mode_enabled(vha)) {
1260 fcport->disc_state = DSC_GNL;
1261 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1262 }
a4239945
QT
1263 return;
1264 case PDS_LOGO_PENDING:
1265 case PDS_PORT_UNAVAILABLE:
1266 default:
1267 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1268 __func__, __LINE__, fcport->port_name);
d8630bb9 1269 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
1270 return;
1271 }
1272 __qla24xx_handle_gpdb_event(vha, ea);
1273} /* gpdb event */
9cd883f0
QT
1274
1275static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1276{
1277 u8 login = 0;
040036bb 1278 int rc;
9cd883f0
QT
1279
1280 if (qla_tgt_mode_enabled(vha))
1281 return;
1282
1283 if (qla_dual_mode_enabled(vha)) {
1284 if (N2N_TOPO(vha->hw)) {
1285 u64 mywwn, wwn;
1286
1287 mywwn = wwn_to_u64(vha->port_name);
1288 wwn = wwn_to_u64(fcport->port_name);
1289 if (mywwn > wwn)
1290 login = 1;
1291 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1292 && time_after_eq(jiffies,
1293 fcport->plogi_nack_done_deadline))
1294 login = 1;
1295 } else {
1296 login = 1;
1297 }
1298 } else {
1299 /* initiator mode */
1300 login = 1;
1301 }
1302
0754d5e0
QT
1303 if (login && fcport->login_retry) {
1304 fcport->login_retry--;
040036bb
QT
1305 if (fcport->loop_id == FC_NO_LOOP_ID) {
1306 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1307 rc = qla2x00_find_new_loop_id(vha, fcport);
1308 if (rc) {
1309 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1310 "%s %d %8phC post del sess - out of loopid\n",
1311 __func__, __LINE__, fcport->port_name);
1312 fcport->scan_state = 0;
94cff6e1 1313 qlt_schedule_sess_for_deletion(fcport);
040036bb
QT
1314 return;
1315 }
1316 }
9cd883f0
QT
1317 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1318 "%s %d %8phC post login\n",
1319 __func__, __LINE__, fcport->port_name);
9cd883f0
QT
1320 qla2x00_post_async_login_work(vha, fcport, NULL);
1321 }
1322}
1323
726b8548
QT
1324int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1325{
f13515ac 1326 u16 data[2];
a4239945 1327 u64 wwn;
cd4ed6b4 1328 u16 sec;
726b8548 1329
0754d5e0
QT
1330 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
1331 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
726b8548
QT
1332 __func__, fcport->port_name, fcport->disc_state,
1333 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1334 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
0754d5e0 1335 fcport->login_gen, fcport->loop_id, fcport->scan_state);
726b8548 1336
a4239945
QT
1337 if (fcport->scan_state != QLA_FCPORT_FOUND)
1338 return 0;
726b8548 1339
07ea4b60
HR
1340 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1341 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1342 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
726b8548
QT
1343 return 0;
1344
5b33469a 1345 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
9cd883f0
QT
1346 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1347 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5b33469a 1348 return 0;
9cd883f0 1349 }
5b33469a
QT
1350 }
1351
726b8548
QT
1352 /* for pure Target Mode. Login will not be initiated */
1353 if (vha->host->active_mode == MODE_TARGET)
1354 return 0;
1355
1356 if (fcport->flags & FCF_ASYNC_SENT) {
1357 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1358 return 0;
1359 }
1360
1361 switch (fcport->disc_state) {
1362 case DSC_DELETED:
a4239945 1363 wwn = wwn_to_u64(fcport->node_name);
8777e431
QT
1364 switch (vha->hw->current_topology) {
1365 case ISP_CFG_N:
1366 if (fcport_is_smaller(fcport)) {
1367 /* this adapter is bigger */
1368 if (fcport->login_retry) {
1369 if (fcport->loop_id == FC_NO_LOOP_ID) {
1370 qla2x00_find_new_loop_id(vha,
1371 fcport);
1372 fcport->fw_login_state =
1373 DSC_LS_PORT_UNAVAIL;
1374 }
1375 fcport->login_retry--;
1376 qla_post_els_plogi_work(vha, fcport);
1377 } else {
1378 ql_log(ql_log_info, vha, 0x705d,
1379 "Unable to reach remote port %8phC",
1380 fcport->port_name);
1381 }
1382 } else {
1383 qla24xx_post_gnl_work(vha, fcport);
1384 }
1385 break;
1386 default:
1387 if (wwn == 0) {
1388 ql_dbg(ql_dbg_disc, vha, 0xffff,
1389 "%s %d %8phC post GNNID\n",
1390 __func__, __LINE__, fcport->port_name);
1391 qla24xx_post_gnnid_work(vha, fcport);
1392 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1393 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1394 "%s %d %8phC post gnl\n",
1395 __func__, __LINE__, fcport->port_name);
1396 qla24xx_post_gnl_work(vha, fcport);
1397 } else {
1398 qla_chk_n2n_b4_login(vha, fcport);
1399 }
1400 break;
726b8548
QT
1401 }
1402 break;
1403
1404 case DSC_GNL:
8777e431
QT
1405 switch (vha->hw->current_topology) {
1406 case ISP_CFG_N:
1407 if ((fcport->current_login_state & 0xf) == 0x6) {
1408 ql_dbg(ql_dbg_disc, vha, 0x2118,
1409 "%s %d %8phC post GPDB work\n",
1410 __func__, __LINE__, fcport->port_name);
1411 fcport->chip_reset =
1412 vha->hw->base_qpair->chip_reset;
1413 qla24xx_post_gpdb_work(vha, fcport, 0);
1414 } else {
1415 ql_dbg(ql_dbg_disc, vha, 0x2118,
1416 "%s %d %8phC post NVMe PRLI\n",
1417 __func__, __LINE__, fcport->port_name);
1418 qla24xx_post_prli_work(vha, fcport);
1419 }
1420 break;
1421 default:
1422 if (fcport->login_pause) {
1423 fcport->last_rscn_gen = fcport->rscn_gen;
1424 fcport->last_login_gen = fcport->login_gen;
1425 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1426 break;
1427 }
1428 qla_chk_n2n_b4_login(vha, fcport);
726b8548
QT
1429 break;
1430 }
726b8548
QT
1431 break;
1432
1433 case DSC_LOGIN_FAILED:
9cd883f0
QT
1434 if (N2N_TOPO(vha->hw))
1435 qla_chk_n2n_b4_login(vha, fcport);
1436 else
d4f7a16a 1437 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
1438 break;
1439
1440 case DSC_LOGIN_COMPLETE:
1441 /* recheck login state */
f13515ac
QT
1442 data[0] = data[1] = 0;
1443 qla2x00_post_async_adisc_work(vha, fcport, data);
726b8548
QT
1444 break;
1445
1cbc0efc
DT
1446 case DSC_LOGIN_PEND:
1447 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1448 qla24xx_post_prli_work(vha, fcport);
1449 break;
1450
cd4ed6b4
QT
1451 case DSC_UPD_FCPORT:
1452 sec = jiffies_to_msecs(jiffies -
1453 fcport->jiffies_at_registration)/1000;
1454 if (fcport->sec_since_registration < sec && sec &&
1455 !(sec % 60)) {
1456 fcport->sec_since_registration = sec;
1457 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1458 "%s %8phC - Slow Rport registration(%d Sec)\n",
1459 __func__, fcport->port_name, sec);
1460 }
1461
1462 if (fcport->next_disc_state != DSC_DELETE_PEND)
1463 fcport->next_disc_state = DSC_ADISC;
1464 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1465 break;
1466
726b8548
QT
1467 default:
1468 break;
1469 }
1470
1471 return 0;
1472}
1473
726b8548 1474int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
a4239945 1475 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
726b8548
QT
1476{
1477 struct qla_work_evt *e;
1478 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1479 if (!e)
1480 return QLA_FUNCTION_FAILED;
1481
1482 e->u.new_sess.id = *id;
1483 e->u.new_sess.pla = pla;
a4239945 1484 e->u.new_sess.fc4_type = fc4_type;
726b8548 1485 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
a4239945
QT
1486 if (node_name)
1487 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
726b8548
QT
1488
1489 return qla2x00_post_work(vha, e);
1490}
1491
726b8548
QT
1492static
1493void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1494 struct event_arg *ea)
1495{
1496 fc_port_t *fcport = ea->fcport;
1497
83548fe2
QT
1498 ql_dbg(ql_dbg_disc, vha, 0x2102,
1499 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1500 __func__, fcport->port_name, fcport->disc_state,
1501 fcport->fw_login_state, fcport->login_pause,
1502 fcport->deleted, fcport->conflict,
1503 fcport->last_rscn_gen, fcport->rscn_gen,
1504 fcport->last_login_gen, fcport->login_gen,
1505 fcport->flags);
726b8548
QT
1506
1507 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
1508 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1509 return;
1510
5b33469a 1511 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
9cd883f0
QT
1512 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1513 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5b33469a 1514 return;
9cd883f0 1515 }
5b33469a
QT
1516 }
1517
726b8548 1518 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 1519 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
726b8548
QT
1520 __func__, __LINE__, fcport->port_name);
1521
726b8548
QT
1522 return;
1523 }
1524
1525 qla24xx_fcport_handle_login(vha, fcport);
1526}
1527
8777e431 1528
8f9a2148
BVA
1529static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1530 struct event_arg *ea)
8777e431
QT
1531{
1532 ql_dbg(ql_dbg_disc, vha, 0x2118,
1533 "%s %d %8phC post PRLI\n",
1534 __func__, __LINE__, ea->fcport->port_name);
1535 qla24xx_post_prli_work(vha, ea->fcport);
1536}
1537
41dc529a 1538void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1539{
bee8b846 1540 fc_port_t *fcport;
726b8548
QT
1541
1542 switch (ea->event) {
1543 case FCME_RELOGIN:
1544 if (test_bit(UNLOADING, &vha->dpc_flags))
1545 return;
5ff1d584 1546
726b8548
QT
1547 qla24xx_handle_relogin_event(vha, ea);
1548 break;
1549 case FCME_RSCN:
1550 if (test_bit(UNLOADING, &vha->dpc_flags))
1551 return;
d4f7a16a
HM
1552 {
1553 unsigned long flags;
bee8b846
QT
1554 fcport = qla2x00_find_fcport_by_nportid
1555 (vha, &ea->id, 1);
cd4ed6b4 1556 if (fcport) {
cb873ba4 1557 fcport->scan_needed = 1;
cd4ed6b4
QT
1558 fcport->rscn_gen++;
1559 }
bee8b846 1560
f352eeb7
QT
1561 spin_lock_irqsave(&vha->work_lock, flags);
1562 if (vha->scan.scan_flags == 0) {
1563 ql_dbg(ql_dbg_disc, vha, 0xffff,
1564 "%s: schedule\n", __func__);
1565 vha->scan.scan_flags |= SF_QUEUED;
1566 schedule_delayed_work(&vha->scan.scan_work, 5);
41dc529a 1567 }
f352eeb7 1568 spin_unlock_irqrestore(&vha->work_lock, flags);
d4f7a16a 1569 }
726b8548 1570 break;
726b8548
QT
1571 case FCME_GNL_DONE:
1572 qla24xx_handle_gnl_done_event(vha, ea);
1573 break;
1574 case FCME_GPSC_DONE:
a4239945 1575 qla24xx_handle_gpsc_event(vha, ea);
726b8548
QT
1576 break;
1577 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1578 qla24xx_handle_plogi_done_event(vha, ea);
1579 break;
a5d42f4c
DG
1580 case FCME_PRLI_DONE:
1581 qla24xx_handle_prli_done_event(vha, ea);
1582 break;
726b8548
QT
1583 case FCME_GPDB_DONE:
1584 qla24xx_handle_gpdb_event(vha, ea);
1585 break;
1586 case FCME_GPNID_DONE:
1587 qla24xx_handle_gpnid_event(vha, ea);
1588 break;
d3bae931
DG
1589 case FCME_GFFID_DONE:
1590 qla24xx_handle_gffid_event(vha, ea);
1591 break;
f13515ac
QT
1592 case FCME_ADISC_DONE:
1593 qla24xx_handle_adisc_event(vha, ea);
1594 break;
a4239945
QT
1595 case FCME_GNNID_DONE:
1596 qla24xx_handle_gnnid_event(vha, ea);
1597 break;
1598 case FCME_GFPNID_DONE:
1599 qla24xx_handle_gfpnid_event(vha, ea);
1600 break;
8777e431
QT
1601 case FCME_ELS_PLOGI_DONE:
1602 qla_handle_els_plogi_done(vha, ea);
1603 break;
726b8548
QT
1604 default:
1605 BUG_ON(1);
1606 break;
1607 }
5ff1d584
AV
1608}
1609
d4f7a16a
HM
1610/*
1611 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1612 * to be consumed by the fcport
1613 */
1614void qla_rscn_replay(fc_port_t *fcport)
1615{
1616 struct event_arg ea;
1617
1618 switch (fcport->disc_state) {
1619 case DSC_DELETE_PEND:
1620 return;
1621 default:
1622 break;
1623 }
1624
1625 if (fcport->scan_needed) {
1626 memset(&ea, 0, sizeof(ea));
1627 ea.event = FCME_RSCN;
1628 ea.id = fcport->d_id;
1629 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
d4f7a16a 1630 qla2x00_fcport_event_handler(fcport->vha, &ea);
861d483d 1631 }
d4f7a16a
HM
1632}
1633
3822263e 1634static void
faef62d1 1635qla2x00_tmf_iocb_timeout(void *data)
3822263e 1636{
25ff6af1 1637 srb_t *sp = data;
faef62d1 1638 struct srb_iocb *tmf = &sp->u.iocb_cmd;
3822263e 1639
faef62d1
AB
1640 tmf->u.tmf.comp_status = CS_TIMEOUT;
1641 complete(&tmf->u.tmf.comp);
1642}
9ba56b95 1643
faef62d1 1644static void
25ff6af1 1645qla2x00_tmf_sp_done(void *ptr, int res)
faef62d1 1646{
25ff6af1 1647 srb_t *sp = ptr;
faef62d1 1648 struct srb_iocb *tmf = &sp->u.iocb_cmd;
25ff6af1 1649
faef62d1 1650 complete(&tmf->u.tmf.comp);
3822263e
MI
1651}
1652
1653int
faef62d1 1654qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
3822263e
MI
1655 uint32_t tag)
1656{
1657 struct scsi_qla_host *vha = fcport->vha;
faef62d1 1658 struct srb_iocb *tm_iocb;
3822263e 1659 srb_t *sp;
faef62d1 1660 int rval = QLA_FUNCTION_FAILED;
3822263e 1661
9ba56b95 1662 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
1663 if (!sp)
1664 goto done;
1665
faef62d1 1666 tm_iocb = &sp->u.iocb_cmd;
9ba56b95
GM
1667 sp->type = SRB_TM_CMD;
1668 sp->name = "tmf";
e74e7d95
BH
1669
1670 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1671 init_completion(&tm_iocb->u.tmf.comp);
faef62d1 1672 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
e74e7d95 1673
faef62d1
AB
1674 tm_iocb->u.tmf.flags = flags;
1675 tm_iocb->u.tmf.lun = lun;
1676 tm_iocb->u.tmf.data = tag;
1677 sp->done = qla2x00_tmf_sp_done;
3822263e 1678
7c3df132 1679 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
1680 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1681 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1682 fcport->d_id.b.area, fcport->d_id.b.al_pa);
faef62d1 1683
9fe278f4
GM
1684 rval = qla2x00_start_sp(sp);
1685 if (rval != QLA_SUCCESS)
1686 goto done_free_sp;
faef62d1
AB
1687 wait_for_completion(&tm_iocb->u.tmf.comp);
1688
b4146c49 1689 rval = tm_iocb->u.tmf.data;
faef62d1 1690
b4146c49
AG
1691 if (rval != QLA_SUCCESS) {
1692 ql_log(ql_log_warn, vha, 0x8030,
faef62d1
AB
1693 "TM IOCB failed (%x).\n", rval);
1694 }
1695
1696 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1697 flags = tm_iocb->u.tmf.flags;
1698 lun = (uint16_t)tm_iocb->u.tmf.lun;
1699
1700 /* Issue Marker IOCB */
1701 qla2x00_marker(vha, vha->hw->req_q_map[0],
1702 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1703 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1704 }
3822263e
MI
1705
1706done_free_sp:
25ff6af1 1707 sp->free(sp);
6d674927 1708 sp->fcport->flags &= ~FCF_ASYNC_SENT;
3822263e
MI
1709done:
1710 return rval;
1711}
1712
4440e46d
AB
1713static void
1714qla24xx_abort_iocb_timeout(void *data)
1715{
25ff6af1 1716 srb_t *sp = data;
4440e46d
AB
1717 struct srb_iocb *abt = &sp->u.iocb_cmd;
1718
1719 abt->u.abt.comp_status = CS_TIMEOUT;
f6145e86 1720 sp->done(sp, QLA_FUNCTION_TIMEOUT);
4440e46d
AB
1721}
1722
1723static void
25ff6af1 1724qla24xx_abort_sp_done(void *ptr, int res)
4440e46d 1725{
25ff6af1 1726 srb_t *sp = ptr;
4440e46d
AB
1727 struct srb_iocb *abt = &sp->u.iocb_cmd;
1728
f6145e86
QT
1729 if (del_timer(&sp->u.iocb_cmd.timer)) {
1730 if (sp->flags & SRB_WAKEUP_ON_COMP)
1731 complete(&abt->u.abt.comp);
1732 else
1733 sp->free(sp);
1734 }
4440e46d
AB
1735}
1736
15f30a57 1737int
f6145e86 1738qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
4440e46d 1739{
25ff6af1 1740 scsi_qla_host_t *vha = cmd_sp->vha;
4440e46d
AB
1741 struct srb_iocb *abt_iocb;
1742 srb_t *sp;
1743 int rval = QLA_FUNCTION_FAILED;
1744
6a629468 1745 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
97a93cea 1746 GFP_ATOMIC);
4440e46d
AB
1747 if (!sp)
1748 goto done;
1749
1750 abt_iocb = &sp->u.iocb_cmd;
1751 sp->type = SRB_ABT_CMD;
1752 sp->name = "abort";
49cecca7 1753 sp->qpair = cmd_sp->qpair;
f6145e86
QT
1754 if (wait)
1755 sp->flags = SRB_WAKEUP_ON_COMP;
e74e7d95
BH
1756
1757 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1758 init_completion(&abt_iocb->u.abt.comp);
8bccfe0d
QT
1759 /* FW can send 2 x ABTS's timeout/20s */
1760 qla2x00_init_timer(sp, 42);
e74e7d95 1761
4440e46d 1762 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
49cecca7 1763 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
b027a5ac 1764
4440e46d 1765 sp->done = qla24xx_abort_sp_done;
4440e46d 1766
4440e46d 1767 ql_dbg(ql_dbg_async, vha, 0x507c,
49cecca7
QT
1768 "Abort command issued - hdl=%x, type=%x\n",
1769 cmd_sp->handle, cmd_sp->type);
4440e46d 1770
9fe278f4
GM
1771 rval = qla2x00_start_sp(sp);
1772 if (rval != QLA_SUCCESS)
1773 goto done_free_sp;
1774
f6145e86
QT
1775 if (wait) {
1776 wait_for_completion(&abt_iocb->u.abt.comp);
1777 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1778 QLA_SUCCESS : QLA_FUNCTION_FAILED;
8235f4b5
QT
1779 } else {
1780 goto done;
f6145e86 1781 }
4440e46d
AB
1782
1783done_free_sp:
25ff6af1 1784 sp->free(sp);
4440e46d
AB
1785done:
1786 return rval;
1787}
1788
1789int
1790qla24xx_async_abort_command(srb_t *sp)
1791{
1792 unsigned long flags = 0;
1793
1794 uint32_t handle;
1795 fc_port_t *fcport = sp->fcport;
585def9b 1796 struct qla_qpair *qpair = sp->qpair;
4440e46d 1797 struct scsi_qla_host *vha = fcport->vha;
585def9b 1798 struct req_que *req = qpair->req;
b027a5ac 1799
585def9b 1800 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4440e46d
AB
1801 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1802 if (req->outstanding_cmds[handle] == sp)
1803 break;
1804 }
585def9b
QT
1805 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1806
4440e46d
AB
1807 if (handle == req->num_outstanding_cmds) {
1808 /* Command not found. */
1809 return QLA_FUNCTION_FAILED;
1810 }
1811 if (sp->type == SRB_FXIOCB_DCMD)
1812 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1813 FXDISC_ABORT_IOCTL);
1814
f6145e86 1815 return qla24xx_async_abort_cmd(sp, true);
4440e46d
AB
1816}
1817
a5d42f4c
DG
1818static void
1819qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1820{
1821 switch (ea->data[0]) {
1822 case MBS_COMMAND_COMPLETE:
1823 ql_dbg(ql_dbg_disc, vha, 0x2118,
1824 "%s %d %8phC post gpdb\n",
1825 __func__, __LINE__, ea->fcport->port_name);
1826
1827 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1828 ea->fcport->logout_on_delete = 1;
1829 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1830 break;
1831 default:
1cbc0efc
DT
1832 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1833 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
1834 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1835 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1836 break;
1837 }
1838
edd05de1
DG
1839 if (ea->fcport->n2n_flag) {
1840 ql_dbg(ql_dbg_disc, vha, 0x2118,
1841 "%s %d %8phC post fc4 prli\n",
1842 __func__, __LINE__, ea->fcport->port_name);
1843 ea->fcport->fc4f_nvme = 0;
1844 ea->fcport->n2n_flag = 0;
1845 qla24xx_post_prli_work(vha, ea->fcport);
1846 }
a5d42f4c
DG
1847 ql_dbg(ql_dbg_disc, vha, 0x2119,
1848 "%s %d %8phC unhandle event of %x\n",
1849 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1850 break;
1851 }
1852}
1853
726b8548
QT
1854static void
1855qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ac280b67 1856{
726b8548 1857 port_id_t cid; /* conflict Nport id */
a084fd68
QT
1858 u16 lid;
1859 struct fc_port *conflict_fcport;
82abdcaf 1860 unsigned long flags;
a4239945
QT
1861 struct fc_port *fcport = ea->fcport;
1862
f352eeb7
QT
1863 ql_dbg(ql_dbg_disc, vha, 0xffff,
1864 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1865 __func__, fcport->port_name, fcport->disc_state,
1866 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
861d483d 1867 ea->sp->gen1, fcport->rscn_gen,
f352eeb7
QT
1868 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1869
a4239945
QT
1870 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1871 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1872 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1873 "%s %d %8phC Remote is trying to login\n",
1874 __func__, __LINE__, fcport->port_name);
1875 return;
1876 }
1877
1878 if (fcport->disc_state == DSC_DELETE_PEND)
1879 return;
1880
1881 if (ea->sp->gen2 != fcport->login_gen) {
1882 /* target side must have changed it. */
1883 ql_dbg(ql_dbg_disc, vha, 0x20d3,
f352eeb7
QT
1884 "%s %8phC generation changed\n",
1885 __func__, fcport->port_name);
1886 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
a4239945
QT
1887 return;
1888 } else if (ea->sp->gen1 != fcport->rscn_gen) {
861d483d
QT
1889 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1890 "%s %8phC RSCN generation changed\n",
1891 __func__, fcport->port_name);
d4f7a16a 1892 qla_rscn_replay(fcport);
861d483d 1893 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
1894 return;
1895 }
ac280b67 1896
726b8548 1897 switch (ea->data[0]) {
ac280b67 1898 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
1899 /*
1900 * Driver must validate login state - If PRLI not complete,
1901 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1902 * requests.
1903 */
a5d42f4c
DG
1904 if (ea->fcport->fc4f_nvme) {
1905 ql_dbg(ql_dbg_disc, vha, 0x2117,
1906 "%s %d %8phC post prli\n",
1907 __func__, __LINE__, ea->fcport->port_name);
1908 qla24xx_post_prli_work(vha, ea->fcport);
1909 } else {
1910 ql_dbg(ql_dbg_disc, vha, 0x20ea,
a084fd68
QT
1911 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1912 __func__, __LINE__, ea->fcport->port_name,
1913 ea->fcport->loop_id, ea->fcport->d_id.b24);
1914
1915 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
82abdcaf 1916 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
a5d42f4c
DG
1917 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1918 ea->fcport->logout_on_delete = 1;
3515832c 1919 ea->fcport->send_els_logo = 0;
82abdcaf
QT
1920 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
1921 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1922
a5d42f4c
DG
1923 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1924 }
ac280b67
AV
1925 break;
1926 case MBS_COMMAND_ERROR:
83548fe2 1927 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
726b8548
QT
1928 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1929
1930 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1931 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1932 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
ac280b67
AV
1933 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1934 else
726b8548 1935 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
ac280b67
AV
1936 break;
1937 case MBS_LOOP_ID_USED:
726b8548
QT
1938 /* data[1] = IO PARAM 1 = nport ID */
1939 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1940 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1941 cid.b.al_pa = ea->iop[1] & 0xff;
1942 cid.b.rsvd_1 = 0;
1943
83548fe2 1944 ql_dbg(ql_dbg_disc, vha, 0x20ec,
5c640053 1945 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
83548fe2 1946 __func__, __LINE__, ea->fcport->port_name,
5c640053 1947 ea->fcport->loop_id, cid.b24);
726b8548 1948
5c640053
QT
1949 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1950 ea->fcport->loop_id = FC_NO_LOOP_ID;
726b8548
QT
1951 qla24xx_post_gnl_work(vha, ea->fcport);
1952 break;
1953 case MBS_PORT_ID_USED:
a084fd68
QT
1954 lid = ea->iop[1] & 0xffff;
1955 qlt_find_sess_invalidate_other(vha,
1956 wwn_to_u64(ea->fcport->port_name),
1957 ea->fcport->d_id, lid, &conflict_fcport);
1958
1959 if (conflict_fcport) {
1960 /*
1961 * Another fcport share the same loop_id/nport id.
1962 * Conflict fcport needs to finish cleanup before this
1963 * fcport can proceed to login.
1964 */
1965 conflict_fcport->conflict = ea->fcport;
1966 ea->fcport->login_pause = 1;
1967
1968 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1969 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1970 __func__, __LINE__, ea->fcport->port_name,
1971 ea->fcport->d_id.b24, lid);
a084fd68
QT
1972 } else {
1973 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1974 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
1975 __func__, __LINE__, ea->fcport->port_name,
1976 ea->fcport->d_id.b24, lid);
1977
1978 qla2x00_clear_loop_id(ea->fcport);
1979 set_bit(lid, vha->hw->loop_id_map);
1980 ea->fcport->loop_id = lid;
1981 ea->fcport->keep_nport_handle = 0;
94cff6e1 1982 qlt_schedule_sess_for_deletion(ea->fcport);
a084fd68 1983 }
ac280b67
AV
1984 break;
1985 }
4916392b 1986 return;
ac280b67
AV
1987}
1988
4916392b 1989void
ac280b67
AV
1990qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1991 uint16_t *data)
1992{
a6ca8878 1993 qlt_logo_completion_handler(fcport, data[0]);
726b8548 1994 fcport->login_gen++;
fa83e658 1995 fcport->flags &= ~FCF_ASYNC_ACTIVE;
4916392b 1996 return;
ac280b67
AV
1997}
1998
1da177e4
LT
1999/****************************************************************************/
2000/* QLogic ISP2x00 Hardware Support Functions. */
2001/****************************************************************************/
2002
fa492630 2003static int
7d613ac6
SV
2004qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2005{
2006 int rval = QLA_SUCCESS;
2007 struct qla_hw_data *ha = vha->hw;
2008 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 2009 uint16_t config[4];
7d613ac6
SV
2010
2011 qla83xx_idc_lock(vha, 0);
2012
2013 /* SV: TODO: Assign initialization timeout from
2014 * flash-info / other param
2015 */
2016 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2017 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2018
2019 /* Set our fcoe function presence */
2020 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2021 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2022 "Error while setting DRV-Presence.\n");
2023 rval = QLA_FUNCTION_FAILED;
2024 goto exit;
2025 }
2026
2027 /* Decide the reset ownership */
2028 qla83xx_reset_ownership(vha);
2029
2030 /*
2031 * On first protocol driver load:
2032 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2033 * register.
2034 * Others: Check compatibility with current IDC Major version.
2035 */
2036 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2037 if (ha->flags.nic_core_reset_owner) {
2038 /* Set IDC Major version */
2039 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2040 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2041
2042 /* Clearing IDC-Lock-Recovery register */
2043 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2044 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2045 /*
2046 * Clear further IDC participation if we are not compatible with
2047 * the current IDC Major Version.
2048 */
2049 ql_log(ql_log_warn, vha, 0xb07d,
2050 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2051 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2052 __qla83xx_clear_drv_presence(vha);
2053 rval = QLA_FUNCTION_FAILED;
2054 goto exit;
2055 }
2056 /* Each function sets its supported Minor version. */
2057 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2058 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2059 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2060
711aa7f7
SK
2061 if (ha->flags.nic_core_reset_owner) {
2062 memset(config, 0, sizeof(config));
2063 if (!qla81xx_get_port_config(vha, config))
2064 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2065 QLA8XXX_DEV_READY);
2066 }
2067
7d613ac6
SV
2068 rval = qla83xx_idc_state_handler(vha);
2069
2070exit:
2071 qla83xx_idc_unlock(vha, 0);
2072
2073 return rval;
2074}
2075
1da177e4
LT
2076/*
2077* qla2x00_initialize_adapter
2078* Initialize board.
2079*
2080* Input:
2081* ha = adapter block pointer.
2082*
2083* Returns:
2084* 0 = success
2085*/
2086int
e315cd28 2087qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
2088{
2089 int rval;
e315cd28 2090 struct qla_hw_data *ha = vha->hw;
73208dfd 2091 struct req_que *req = ha->req_q_map[0];
2533cf67 2092
fc90adaf
JC
2093 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2094 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2095
1da177e4 2096 /* Clear adapter flags. */
e315cd28 2097 vha->flags.online = 0;
2533cf67 2098 ha->flags.chip_reset_done = 0;
e315cd28 2099 vha->flags.reset_active = 0;
85880801
AV
2100 ha->flags.pci_channel_io_perm_failure = 0;
2101 ha->flags.eeh_busy = 0;
fabbb8df 2102 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
e315cd28
AC
2103 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2104 atomic_set(&vha->loop_state, LOOP_DOWN);
2105 vha->device_flags = DFLG_NO_CABLE;
2106 vha->dpc_flags = 0;
2107 vha->flags.management_server_logged_in = 0;
2108 vha->marker_needed = 0;
1da177e4
LT
2109 ha->isp_abort_cnt = 0;
2110 ha->beacon_blink_led = 0;
2111
73208dfd
AC
2112 set_bit(0, ha->req_qid_map);
2113 set_bit(0, ha->rsp_qid_map);
2114
cfb0919c 2115 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 2116 "Configuring PCI space...\n");
e315cd28 2117 rval = ha->isp_ops->pci_config(vha);
1da177e4 2118 if (rval) {
7c3df132
SK
2119 ql_log(ql_log_warn, vha, 0x0044,
2120 "Unable to configure PCI space.\n");
1da177e4
LT
2121 return (rval);
2122 }
2123
e315cd28 2124 ha->isp_ops->reset_chip(vha);
1da177e4 2125
e315cd28 2126 rval = qla2xxx_get_flash_info(vha);
c00d8994 2127 if (rval) {
7c3df132
SK
2128 ql_log(ql_log_fatal, vha, 0x004f,
2129 "Unable to validate FLASH data.\n");
7ec0effd
AD
2130 return rval;
2131 }
2132
2133 if (IS_QLA8044(ha)) {
2134 qla8044_read_reset_template(vha);
2135
2136 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2137 * If DONRESET_BIT0 is set, drivers should not set dev_state
2138 * to NEED_RESET. But if NEED_RESET is set, drivers should
2139 * should honor the reset. */
2140 if (ql2xdontresethba == 1)
2141 qla8044_set_idc_dontreset(vha);
c00d8994
AV
2142 }
2143
73208dfd 2144 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 2145 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 2146 "Configure NVRAM parameters...\n");
0107109e 2147
e315cd28 2148 ha->isp_ops->nvram_config(vha);
1da177e4 2149
d4c760c2
AV
2150 if (ha->flags.disable_serdes) {
2151 /* Mask HBA via NVRAM settings? */
7c3df132 2152 ql_log(ql_log_info, vha, 0x0077,
7b833558 2153 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
d4c760c2
AV
2154 return QLA_FUNCTION_FAILED;
2155 }
2156
cfb0919c 2157 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 2158 "Verifying loaded RISC code...\n");
1da177e4 2159
e315cd28
AC
2160 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2161 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
2162 if (rval)
2163 return (rval);
e315cd28 2164 rval = qla2x00_setup_chip(vha);
d19044c3
AV
2165 if (rval)
2166 return (rval);
1da177e4 2167 }
a9083016 2168
4d4df193 2169 if (IS_QLA84XX(ha)) {
e315cd28 2170 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 2171 if (!ha->cs84xx) {
7c3df132 2172 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
2173 "Unable to configure ISP84XX.\n");
2174 return QLA_FUNCTION_FAILED;
2175 }
2176 }
2d70c103 2177
ead03855 2178 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2d70c103
NB
2179 rval = qla2x00_init_rings(vha);
2180
2533cf67 2181 ha->flags.chip_reset_done = 1;
1da177e4 2182
9a069e19 2183 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 2184 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
2185 rval = qla84xx_init_chip(vha);
2186 if (rval != QLA_SUCCESS) {
7c3df132
SK
2187 ql_log(ql_log_warn, vha, 0x00d4,
2188 "Unable to initialize ISP84XX.\n");
8d2b21db 2189 qla84xx_put_chip(vha);
9a069e19
GM
2190 }
2191 }
2192
7d613ac6
SV
2193 /* Load the NIC Core f/w if we are the first protocol driver. */
2194 if (IS_QLA8031(ha)) {
2195 rval = qla83xx_nic_core_fw_load(vha);
2196 if (rval)
2197 ql_log(ql_log_warn, vha, 0x0124,
2198 "Error in initializing NIC Core f/w.\n");
2199 }
2200
2f0f3f4f
MI
2201 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2202 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 2203
c46e65c7
JC
2204 if (IS_P3P_TYPE(ha))
2205 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2206 else
2207 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2208
1da177e4
LT
2209 return (rval);
2210}
2211
2212/**
abbd8870 2213 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2db6228d 2214 * @vha: HA context
1da177e4
LT
2215 *
2216 * Returns 0 on success.
2217 */
abbd8870 2218int
e315cd28 2219qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 2220{
a157b101 2221 uint16_t w;
abbd8870 2222 unsigned long flags;
e315cd28 2223 struct qla_hw_data *ha = vha->hw;
3d71644c 2224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2225
1da177e4 2226 pci_set_master(ha->pdev);
af6177d8 2227 pci_try_set_mwi(ha->pdev);
1da177e4 2228
1da177e4 2229 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2230 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
2231 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2232
737faece 2233 pci_disable_rom(ha->pdev);
1da177e4
LT
2234
2235 /* Get PCI bus information. */
2236 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 2237 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
2238 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2239
abbd8870
AV
2240 return QLA_SUCCESS;
2241}
1da177e4 2242
abbd8870
AV
2243/**
2244 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2db6228d 2245 * @vha: HA context
abbd8870
AV
2246 *
2247 * Returns 0 on success.
2248 */
2249int
e315cd28 2250qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 2251{
a157b101 2252 uint16_t w;
abbd8870
AV
2253 unsigned long flags = 0;
2254 uint32_t cnt;
e315cd28 2255 struct qla_hw_data *ha = vha->hw;
3d71644c 2256 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2257
abbd8870 2258 pci_set_master(ha->pdev);
af6177d8 2259 pci_try_set_mwi(ha->pdev);
1da177e4 2260
abbd8870 2261 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2262 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 2263
abbd8870
AV
2264 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2265 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 2266 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 2267
abbd8870
AV
2268 /*
2269 * If this is a 2300 card and not 2312, reset the
2270 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2271 * the 2310 also reports itself as a 2300 so we need to get the
2272 * fb revision level -- a 6 indicates it really is a 2300 and
2273 * not a 2310.
2274 */
2275 if (IS_QLA2300(ha)) {
2276 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 2277
abbd8870 2278 /* Pause RISC. */
3d71644c 2279 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 2280 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 2281 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 2282 break;
1da177e4 2283
abbd8870
AV
2284 udelay(10);
2285 }
1da177e4 2286
abbd8870 2287 /* Select FPM registers. */
3d71644c
AV
2288 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2289 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2290
2291 /* Get the fb rev level */
3d71644c 2292 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
2293
2294 if (ha->fb_rev == FPM_2300)
a157b101 2295 pci_clear_mwi(ha->pdev);
abbd8870
AV
2296
2297 /* Deselect FPM registers. */
3d71644c
AV
2298 WRT_REG_WORD(&reg->ctrl_status, 0x0);
2299 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2300
2301 /* Release RISC module. */
3d71644c 2302 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 2303 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 2304 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
2305 break;
2306
2307 udelay(10);
1da177e4 2308 }
1da177e4 2309
abbd8870
AV
2310 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2311 }
1da177e4 2312
abbd8870
AV
2313 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2314
737faece 2315 pci_disable_rom(ha->pdev);
1da177e4 2316
abbd8870
AV
2317 /* Get PCI bus information. */
2318 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 2319 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2320 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2321
2322 return QLA_SUCCESS;
1da177e4
LT
2323}
2324
0107109e
AV
2325/**
2326 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2db6228d 2327 * @vha: HA context
0107109e
AV
2328 *
2329 * Returns 0 on success.
2330 */
2331int
e315cd28 2332qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 2333{
a157b101 2334 uint16_t w;
0107109e 2335 unsigned long flags = 0;
e315cd28 2336 struct qla_hw_data *ha = vha->hw;
0107109e 2337 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
2338
2339 pci_set_master(ha->pdev);
af6177d8 2340 pci_try_set_mwi(ha->pdev);
0107109e
AV
2341
2342 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2343 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
2344 w &= ~PCI_COMMAND_INTX_DISABLE;
2345 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2346
2347 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2348
2349 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
2350 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2351 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
2352
2353 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 2354 if (pci_is_pcie(ha->pdev))
5ffd3a52 2355 pcie_set_readrq(ha->pdev, 4096);
0107109e 2356
737faece 2357 pci_disable_rom(ha->pdev);
0107109e 2358
44c10138 2359 ha->chip_revision = ha->pdev->revision;
a8488abe 2360
0107109e
AV
2361 /* Get PCI bus information. */
2362 spin_lock_irqsave(&ha->hardware_lock, flags);
2363 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
2364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2365
2366 return QLA_SUCCESS;
2367}
2368
c3a2f0df
AV
2369/**
2370 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2db6228d 2371 * @vha: HA context
c3a2f0df
AV
2372 *
2373 * Returns 0 on success.
2374 */
2375int
e315cd28 2376qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
2377{
2378 uint16_t w;
e315cd28 2379 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
2380
2381 pci_set_master(ha->pdev);
2382 pci_try_set_mwi(ha->pdev);
2383
2384 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2385 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2386 w &= ~PCI_COMMAND_INTX_DISABLE;
2387 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2388
2389 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 2390 if (pci_is_pcie(ha->pdev))
5ffd3a52 2391 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 2392
737faece 2393 pci_disable_rom(ha->pdev);
c3a2f0df
AV
2394
2395 ha->chip_revision = ha->pdev->revision;
2396
2397 return QLA_SUCCESS;
2398}
2399
1da177e4
LT
2400/**
2401 * qla2x00_isp_firmware() - Choose firmware image.
2db6228d 2402 * @vha: HA context
1da177e4
LT
2403 *
2404 * Returns 0 on success.
2405 */
2406static int
e315cd28 2407qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
2408{
2409 int rval;
42e421b1
AV
2410 uint16_t loop_id, topo, sw_cap;
2411 uint8_t domain, area, al_pa;
e315cd28 2412 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2413
2414 /* Assume loading risc code */
fa2a1ce5 2415 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
2416
2417 if (ha->flags.disable_risc_code_load) {
7c3df132 2418 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
2419
2420 /* Verify checksum of loaded RISC code. */
e315cd28 2421 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
2422 if (rval == QLA_SUCCESS) {
2423 /* And, verify we are not in ROM code. */
e315cd28 2424 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
2425 &area, &domain, &topo, &sw_cap);
2426 }
1da177e4
LT
2427 }
2428
7c3df132
SK
2429 if (rval)
2430 ql_dbg(ql_dbg_init, vha, 0x007a,
2431 "**** Load RISC code ****.\n");
1da177e4
LT
2432
2433 return (rval);
2434}
2435
2436/**
2437 * qla2x00_reset_chip() - Reset ISP chip.
2db6228d 2438 * @vha: HA context
1da177e4
LT
2439 *
2440 * Returns 0 on success.
2441 */
abbd8870 2442void
e315cd28 2443qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
2444{
2445 unsigned long flags = 0;
e315cd28 2446 struct qla_hw_data *ha = vha->hw;
3d71644c 2447 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2448 uint32_t cnt;
1da177e4
LT
2449 uint16_t cmd;
2450
85880801
AV
2451 if (unlikely(pci_channel_offline(ha->pdev)))
2452 return;
2453
fd34f556 2454 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
2455
2456 spin_lock_irqsave(&ha->hardware_lock, flags);
2457
2458 /* Turn off master enable */
2459 cmd = 0;
2460 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2461 cmd &= ~PCI_COMMAND_MASTER;
2462 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2463
2464 if (!IS_QLA2100(ha)) {
2465 /* Pause RISC. */
2466 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2467 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2468 for (cnt = 0; cnt < 30000; cnt++) {
2469 if ((RD_REG_WORD(&reg->hccr) &
2470 HCCR_RISC_PAUSE) != 0)
2471 break;
2472 udelay(100);
2473 }
2474 } else {
2475 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2476 udelay(10);
2477 }
2478
2479 /* Select FPM registers. */
2480 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2481 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2482
2483 /* FPM Soft Reset. */
2484 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2485 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2486
2487 /* Toggle Fpm Reset. */
2488 if (!IS_QLA2200(ha)) {
2489 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2490 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2491 }
2492
2493 /* Select frame buffer registers. */
2494 WRT_REG_WORD(&reg->ctrl_status, 0x10);
2495 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2496
2497 /* Reset frame buffer FIFOs. */
2498 if (IS_QLA2200(ha)) {
2499 WRT_FB_CMD_REG(ha, reg, 0xa000);
2500 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2501 } else {
2502 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2503
2504 /* Read back fb_cmd until zero or 3 seconds max */
2505 for (cnt = 0; cnt < 3000; cnt++) {
2506 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2507 break;
2508 udelay(100);
2509 }
2510 }
2511
2512 /* Select RISC module registers. */
2513 WRT_REG_WORD(&reg->ctrl_status, 0);
2514 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2515
2516 /* Reset RISC processor. */
2517 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2518 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2519
2520 /* Release RISC processor. */
2521 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2522 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2523 }
2524
2525 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2526 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2527
2528 /* Reset ISP chip. */
2529 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2530
2531 /* Wait for RISC to recover from reset. */
2532 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2533 /*
2534 * It is necessary to for a delay here since the card doesn't
2535 * respond to PCI reads during a reset. On some architectures
2536 * this will result in an MCA.
2537 */
2538 udelay(20);
2539 for (cnt = 30000; cnt; cnt--) {
2540 if ((RD_REG_WORD(&reg->ctrl_status) &
2541 CSR_ISP_SOFT_RESET) == 0)
2542 break;
2543 udelay(100);
2544 }
2545 } else
2546 udelay(10);
2547
2548 /* Reset RISC processor. */
2549 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2550
2551 WRT_REG_WORD(&reg->semaphore, 0);
2552
2553 /* Release RISC processor. */
2554 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2555 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2556
2557 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2558 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 2559 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 2560 break;
1da177e4
LT
2561
2562 udelay(100);
2563 }
2564 } else
2565 udelay(100);
2566
2567 /* Turn on master enable */
2568 cmd |= PCI_COMMAND_MASTER;
2569 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2570
2571 /* Disable RISC pause on FPM parity error. */
2572 if (!IS_QLA2100(ha)) {
2573 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2574 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2575 }
2576
2577 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2578}
2579
b1d46989
MI
2580/**
2581 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2db6228d 2582 * @vha: HA context
b1d46989
MI
2583 *
2584 * Returns 0 on success.
2585 */
fa492630 2586static int
b1d46989
MI
2587qla81xx_reset_mpi(scsi_qla_host_t *vha)
2588{
2589 uint16_t mb[4] = {0x1010, 0, 1, 0};
2590
6246b8a1
GM
2591 if (!IS_QLA81XX(vha->hw))
2592 return QLA_SUCCESS;
2593
b1d46989
MI
2594 return qla81xx_write_mpi_register(vha, mb);
2595}
2596
0107109e 2597/**
88c26663 2598 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2db6228d 2599 * @vha: HA context
0107109e
AV
2600 *
2601 * Returns 0 on success.
2602 */
d14e72fb 2603static inline int
e315cd28 2604qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
2605{
2606 unsigned long flags = 0;
e315cd28 2607 struct qla_hw_data *ha = vha->hw;
0107109e 2608 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
52c82823 2609 uint32_t cnt;
335a1cc9 2610 uint16_t wd;
b1d46989 2611 static int abts_cnt; /* ISP abort retry counts */
d14e72fb 2612 int rval = QLA_SUCCESS;
0107109e 2613
0107109e
AV
2614 spin_lock_irqsave(&ha->hardware_lock, flags);
2615
2616 /* Reset RISC. */
2617 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2618 for (cnt = 0; cnt < 30000; cnt++) {
2619 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2620 break;
2621
2622 udelay(10);
2623 }
2624
d14e72fb
HM
2625 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2626 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2627
2628 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2629 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2630 RD_REG_DWORD(&reg->hccr),
2631 RD_REG_DWORD(&reg->ctrl_status),
2632 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2633
0107109e
AV
2634 WRT_REG_DWORD(&reg->ctrl_status,
2635 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 2636 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 2637
335a1cc9 2638 udelay(100);
d14e72fb 2639
88c26663 2640 /* Wait for firmware to complete NVRAM accesses. */
52c82823 2641 RD_REG_WORD(&reg->mailbox0);
d14e72fb
HM
2642 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2643 rval == QLA_SUCCESS; cnt--) {
88c26663 2644 barrier();
d14e72fb
HM
2645 if (cnt)
2646 udelay(5);
2647 else
2648 rval = QLA_FUNCTION_TIMEOUT;
88c26663
AV
2649 }
2650
d14e72fb
HM
2651 if (rval == QLA_SUCCESS)
2652 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2653
2654 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2655 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2656 RD_REG_DWORD(&reg->hccr),
2657 RD_REG_DWORD(&reg->mailbox0));
2658
335a1cc9 2659 /* Wait for soft-reset to complete. */
52c82823 2660 RD_REG_DWORD(&reg->ctrl_status);
200ffb15 2661 for (cnt = 0; cnt < 60; cnt++) {
0107109e 2662 barrier();
d14e72fb
HM
2663 if ((RD_REG_DWORD(&reg->ctrl_status) &
2664 CSRX_ISP_SOFT_RESET) == 0)
2665 break;
2666
2667 udelay(5);
0107109e 2668 }
d14e72fb
HM
2669 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2670 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2671
2672 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2673 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2674 RD_REG_DWORD(&reg->hccr),
2675 RD_REG_DWORD(&reg->ctrl_status));
0107109e 2676
b1d46989
MI
2677 /* If required, do an MPI FW reset now */
2678 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2679 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2680 if (++abts_cnt < 5) {
2681 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2682 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2683 } else {
2684 /*
2685 * We exhausted the ISP abort retries. We have to
2686 * set the board offline.
2687 */
2688 abts_cnt = 0;
2689 vha->flags.online = 0;
2690 }
2691 }
2692 }
2693
0107109e
AV
2694 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2695 RD_REG_DWORD(&reg->hccr);
2696
2697 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2698 RD_REG_DWORD(&reg->hccr);
2699
2700 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2701 RD_REG_DWORD(&reg->hccr);
2702
52c82823 2703 RD_REG_WORD(&reg->mailbox0);
200ffb15 2704 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
d14e72fb 2705 rval == QLA_SUCCESS; cnt--) {
0107109e 2706 barrier();
d14e72fb
HM
2707 if (cnt)
2708 udelay(5);
2709 else
2710 rval = QLA_FUNCTION_TIMEOUT;
0107109e 2711 }
d14e72fb
HM
2712 if (rval == QLA_SUCCESS)
2713 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2714
2715 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2716 "Host Risc 0x%x, mailbox0 0x%x\n",
2717 RD_REG_DWORD(&reg->hccr),
2718 RD_REG_WORD(&reg->mailbox0));
0107109e
AV
2719
2720 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6 2721
d14e72fb
HM
2722 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2723 "Driver in %s mode\n",
2724 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2725
124f85e6
AV
2726 if (IS_NOPOLLING_TYPE(ha))
2727 ha->isp_ops->enable_intrs(ha);
d14e72fb
HM
2728
2729 return rval;
0107109e
AV
2730}
2731
4ea2c9c7
JC
2732static void
2733qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2734{
2735 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2736
2737 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2738 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2739
2740}
2741
2742static void
2743qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2744{
2745 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2746
2747 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2748 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2749}
2750
2751static void
2752qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2753{
4ea2c9c7
JC
2754 uint32_t wd32 = 0;
2755 uint delta_msec = 100;
2756 uint elapsed_msec = 0;
2757 uint timeout_msec;
2758 ulong n;
2759
cc790764
JC
2760 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2761 vha->hw->pdev->subsystem_device != 0x0240)
4ea2c9c7
JC
2762 return;
2763
8dd7e3a5
JC
2764 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2765 udelay(100);
2766
4ea2c9c7
JC
2767attempt:
2768 timeout_msec = TIMEOUT_SEMAPHORE;
2769 n = timeout_msec / delta_msec;
2770 while (n--) {
2771 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2772 qla25xx_read_risc_sema_reg(vha, &wd32);
2773 if (wd32 & RISC_SEMAPHORE)
2774 break;
2775 msleep(delta_msec);
2776 elapsed_msec += delta_msec;
2777 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2778 goto force;
2779 }
2780
2781 if (!(wd32 & RISC_SEMAPHORE))
2782 goto force;
2783
2784 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2785 goto acquired;
2786
2787 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2788 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2789 n = timeout_msec / delta_msec;
2790 while (n--) {
2791 qla25xx_read_risc_sema_reg(vha, &wd32);
2792 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2793 break;
2794 msleep(delta_msec);
2795 elapsed_msec += delta_msec;
2796 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2797 goto force;
2798 }
2799
2800 if (wd32 & RISC_SEMAPHORE_FORCE)
2801 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2802
2803 goto attempt;
2804
2805force:
2806 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2807
2808acquired:
2809 return;
2810}
2811
88c26663
AV
2812/**
2813 * qla24xx_reset_chip() - Reset ISP24xx chip.
2db6228d 2814 * @vha: HA context
88c26663
AV
2815 *
2816 * Returns 0 on success.
2817 */
2818void
e315cd28 2819qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 2820{
e315cd28 2821 struct qla_hw_data *ha = vha->hw;
85880801
AV
2822
2823 if (pci_channel_offline(ha->pdev) &&
2824 ha->flags.pci_channel_io_perm_failure) {
2825 return;
2826 }
2827
fd34f556 2828 ha->isp_ops->disable_intrs(ha);
88c26663 2829
4ea2c9c7
JC
2830 qla25xx_manipulate_risc_semaphore(vha);
2831
88c26663 2832 /* Perform RISC reset. */
e315cd28 2833 qla24xx_reset_risc(vha);
88c26663
AV
2834}
2835
1da177e4
LT
2836/**
2837 * qla2x00_chip_diag() - Test chip for proper operation.
2db6228d 2838 * @vha: HA context
1da177e4
LT
2839 *
2840 * Returns 0 on success.
2841 */
abbd8870 2842int
e315cd28 2843qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
2844{
2845 int rval;
e315cd28 2846 struct qla_hw_data *ha = vha->hw;
3d71644c 2847 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
2848 unsigned long flags = 0;
2849 uint16_t data;
2850 uint32_t cnt;
2851 uint16_t mb[5];
73208dfd 2852 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
2853
2854 /* Assume a failed state */
2855 rval = QLA_FUNCTION_FAILED;
2856
da4704d9
BVA
2857 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
2858 &reg->flash_address);
1da177e4
LT
2859
2860 spin_lock_irqsave(&ha->hardware_lock, flags);
2861
2862 /* Reset ISP chip. */
2863 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2864
2865 /*
2866 * We need to have a delay here since the card will not respond while
2867 * in reset causing an MCA on some architectures.
2868 */
2869 udelay(20);
2870 data = qla2x00_debounce_register(&reg->ctrl_status);
2871 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2872 udelay(5);
2873 data = RD_REG_WORD(&reg->ctrl_status);
2874 barrier();
2875 }
2876
2877 if (!cnt)
2878 goto chip_diag_failed;
2879
7c3df132
SK
2880 ql_dbg(ql_dbg_init, vha, 0x007c,
2881 "Reset register cleared by chip reset.\n");
1da177e4
LT
2882
2883 /* Reset RISC processor. */
2884 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2885 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2886
2887 /* Workaround for QLA2312 PCI parity error */
2888 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2889 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2890 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2891 udelay(5);
2892 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 2893 barrier();
1da177e4
LT
2894 }
2895 } else
2896 udelay(10);
2897
2898 if (!cnt)
2899 goto chip_diag_failed;
2900
2901 /* Check product ID of chip */
5a68a1c2 2902 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
1da177e4
LT
2903
2904 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2905 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2906 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2907 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2908 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2909 mb[3] != PROD_ID_3) {
7c3df132
SK
2910 ql_log(ql_log_warn, vha, 0x0062,
2911 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2912 mb[1], mb[2], mb[3]);
1da177e4
LT
2913
2914 goto chip_diag_failed;
2915 }
2916 ha->product_id[0] = mb[1];
2917 ha->product_id[1] = mb[2];
2918 ha->product_id[2] = mb[3];
2919 ha->product_id[3] = mb[4];
2920
2921 /* Adjust fw RISC transfer size */
73208dfd 2922 if (req->length > 1024)
1da177e4
LT
2923 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2924 else
2925 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 2926 req->length;
1da177e4
LT
2927
2928 if (IS_QLA2200(ha) &&
2929 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2930 /* Limit firmware transfer size with a 2200A */
7c3df132 2931 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 2932
ea5b6382 2933 ha->device_type |= DT_ISP2200A;
1da177e4
LT
2934 ha->fw_transfer_size = 128;
2935 }
2936
2937 /* Wrap Incoming Mailboxes Test. */
2938 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2939
7c3df132 2940 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 2941 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
2942 if (rval)
2943 ql_log(ql_log_warn, vha, 0x0080,
2944 "Failed mailbox send register test.\n");
2945 else
1da177e4
LT
2946 /* Flag a successful rval */
2947 rval = QLA_SUCCESS;
1da177e4
LT
2948 spin_lock_irqsave(&ha->hardware_lock, flags);
2949
2950chip_diag_failed:
2951 if (rval)
7c3df132
SK
2952 ql_log(ql_log_info, vha, 0x0081,
2953 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
2954
2955 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2956
2957 return (rval);
2958}
2959
0107109e
AV
2960/**
2961 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2db6228d 2962 * @vha: HA context
0107109e
AV
2963 *
2964 * Returns 0 on success.
2965 */
2966int
e315cd28 2967qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
2968{
2969 int rval;
e315cd28 2970 struct qla_hw_data *ha = vha->hw;
73208dfd 2971 struct req_que *req = ha->req_q_map[0];
0107109e 2972
7ec0effd 2973 if (IS_P3P_TYPE(ha))
a9083016
GM
2974 return QLA_SUCCESS;
2975
73208dfd 2976 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 2977
e315cd28 2978 rval = qla2x00_mbx_reg_test(vha);
0107109e 2979 if (rval) {
7c3df132
SK
2980 ql_log(ql_log_warn, vha, 0x0082,
2981 "Failed mailbox send register test.\n");
0107109e
AV
2982 } else {
2983 /* Flag a successful rval */
2984 rval = QLA_SUCCESS;
2985 }
2986
2987 return rval;
2988}
2989
ad0a0b01
QT
2990static void
2991qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
0107109e 2992{
a7a167bf 2993 int rval;
df613b96
AV
2994 dma_addr_t tc_dma;
2995 void *tc;
e315cd28 2996 struct qla_hw_data *ha = vha->hw;
a7a167bf 2997
ad0a0b01 2998 if (ha->eft) {
7c3df132 2999 ql_dbg(ql_dbg_init, vha, 0x00bd,
ad0a0b01
QT
3000 "%s: Offload Mem is already allocated.\n",
3001 __func__);
a7a167bf
AV
3002 return;
3003 }
d4e3e04d 3004
ad0a0b01 3005 if (IS_FWI2_CAPABLE(ha)) {
df613b96 3006 /* Allocate memory for Fibre Channel Event Buffer. */
f73cb695
CD
3007 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3008 !IS_QLA27XX(ha))
436a7b11 3009 goto try_eft;
df613b96 3010
f73cb695
CD
3011 if (ha->fce)
3012 dma_free_coherent(&ha->pdev->dev,
3013 FCE_SIZE, ha->fce, ha->fce_dma);
3014
3015 /* Allocate memory for Fibre Channel Event Buffer. */
0ea85b50
JP
3016 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3017 GFP_KERNEL);
df613b96 3018 if (!tc) {
7c3df132
SK
3019 ql_log(ql_log_warn, vha, 0x00be,
3020 "Unable to allocate (%d KB) for FCE.\n",
3021 FCE_SIZE / 1024);
17d98630 3022 goto try_eft;
df613b96
AV
3023 }
3024
e315cd28 3025 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
3026 ha->fce_mb, &ha->fce_bufs);
3027 if (rval) {
7c3df132
SK
3028 ql_log(ql_log_warn, vha, 0x00bf,
3029 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
3030 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
3031 tc_dma);
3032 ha->flags.fce_enabled = 0;
17d98630 3033 goto try_eft;
df613b96 3034 }
cfb0919c 3035 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 3036 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 3037
df613b96
AV
3038 ha->flags.fce_enabled = 1;
3039 ha->fce_dma = tc_dma;
3040 ha->fce = tc;
f73cb695 3041
436a7b11 3042try_eft:
f73cb695
CD
3043 if (ha->eft)
3044 dma_free_coherent(&ha->pdev->dev,
3045 EFT_SIZE, ha->eft, ha->eft_dma);
3046
436a7b11 3047 /* Allocate memory for Extended Trace Buffer. */
0ea85b50
JP
3048 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3049 GFP_KERNEL);
436a7b11 3050 if (!tc) {
7c3df132
SK
3051 ql_log(ql_log_warn, vha, 0x00c1,
3052 "Unable to allocate (%d KB) for EFT.\n",
3053 EFT_SIZE / 1024);
ad0a0b01 3054 goto eft_err;
436a7b11
AV
3055 }
3056
e315cd28 3057 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 3058 if (rval) {
7c3df132
SK
3059 ql_log(ql_log_warn, vha, 0x00c2,
3060 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
3061 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
3062 tc_dma);
ad0a0b01 3063 goto eft_err;
436a7b11 3064 }
cfb0919c 3065 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 3066 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11 3067
436a7b11
AV
3068 ha->eft_dma = tc_dma;
3069 ha->eft = tc;
d4e3e04d 3070 }
f73cb695 3071
ad0a0b01
QT
3072eft_err:
3073 return;
3074}
3075
3076void
3077qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3078{
3079 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3080 eft_size, fce_size, mq_size;
3081 struct qla_hw_data *ha = vha->hw;
3082 struct req_que *req = ha->req_q_map[0];
3083 struct rsp_que *rsp = ha->rsp_q_map[0];
3084 struct qla2xxx_fw_dump *fw_dump;
3085
3086 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3087 req_q_size = rsp_q_size = 0;
3088
3089 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3090 fixed_size = sizeof(struct qla2100_fw_dump);
3091 } else if (IS_QLA23XX(ha)) {
3092 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3093 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3094 sizeof(uint16_t);
3095 } else if (IS_FWI2_CAPABLE(ha)) {
3096 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3097 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3098 else if (IS_QLA81XX(ha))
3099 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3100 else if (IS_QLA25XX(ha))
3101 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3102 else
3103 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3104
3105 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3106 sizeof(uint32_t);
3107 if (ha->mqenable) {
3108 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3109 mq_size = sizeof(struct qla2xxx_mq_chain);
3110 /*
3111 * Allocate maximum buffer size for all queues.
3112 * Resizing must be done at end-of-dump processing.
3113 */
3114 mq_size += ha->max_req_queues *
3115 (req->length * sizeof(request_t));
3116 mq_size += ha->max_rsp_queues *
3117 (rsp->length * sizeof(response_t));
3118 }
3119 if (ha->tgt.atio_ring)
3120 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3121 /* Allocate memory for Fibre Channel Event Buffer. */
3122 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3123 !IS_QLA27XX(ha))
3124 goto try_eft;
3125
3126 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3127try_eft:
3128 ql_dbg(ql_dbg_init, vha, 0x00c3,
3129 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3130 eft_size = EFT_SIZE;
3131 }
3132
f73cb695
CD
3133 if (IS_QLA27XX(ha)) {
3134 if (!ha->fw_dump_template) {
3135 ql_log(ql_log_warn, vha, 0x00ba,
3136 "Failed missing fwdump template\n");
3137 return;
3138 }
3139 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
3140 ql_dbg(ql_dbg_init, vha, 0x00fa,
3141 "-> allocating fwdump (%x bytes)...\n", dump_size);
3142 goto allocate;
3143 }
3144
73208dfd
AC
3145 req_q_size = req->length * sizeof(request_t);
3146 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf 3147 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 3148 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
3149 ha->chain_offset = dump_size;
3150 dump_size += mq_size + fce_size;
d4e3e04d 3151
b945e777
QT
3152 if (ha->exchoffld_buf)
3153 dump_size += sizeof(struct qla2xxx_offld_chain) +
3154 ha->exchoffld_size;
3155 if (ha->exlogin_buf)
3156 dump_size += sizeof(struct qla2xxx_offld_chain) +
3157 ha->exlogin_size;
3158
f73cb695 3159allocate:
ad0a0b01
QT
3160 if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
3161 fw_dump = vmalloc(dump_size);
3162 if (!fw_dump) {
3163 ql_log(ql_log_warn, vha, 0x00c4,
3164 "Unable to allocate (%d KB) for firmware dump.\n",
3165 dump_size / 1024);
3166 } else {
3167 if (ha->fw_dump)
3168 vfree(ha->fw_dump);
3169 ha->fw_dump = fw_dump;
3170
3171 ha->fw_dump_len = dump_size;
3172 ql_dbg(ql_dbg_init, vha, 0x00c5,
3173 "Allocated (%d KB) for firmware dump.\n",
3174 dump_size / 1024);
3175
3176 if (IS_QLA27XX(ha))
3177 return;
3178
3179 ha->fw_dump->signature[0] = 'Q';
3180 ha->fw_dump->signature[1] = 'L';
3181 ha->fw_dump->signature[2] = 'G';
3182 ha->fw_dump->signature[3] = 'C';
3183 ha->fw_dump->version = htonl(1);
3184
3185 ha->fw_dump->fixed_size = htonl(fixed_size);
3186 ha->fw_dump->mem_size = htonl(mem_size);
3187 ha->fw_dump->req_q_size = htonl(req_q_size);
3188 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3189
3190 ha->fw_dump->eft_size = htonl(eft_size);
3191 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
3192 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
3193
3194 ha->fw_dump->header_size =
3195 htonl(offsetof(struct qla2xxx_fw_dump, isp));
a7a167bf 3196 }
a7a167bf 3197 }
0107109e
AV
3198}
3199
18e7555a
AV
3200static int
3201qla81xx_mpi_sync(scsi_qla_host_t *vha)
3202{
3203#define MPS_MASK 0xe0
3204 int rval;
3205 uint16_t dc;
3206 uint32_t dw;
18e7555a
AV
3207
3208 if (!IS_QLA81XX(vha->hw))
3209 return QLA_SUCCESS;
3210
3211 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3212 if (rval != QLA_SUCCESS) {
7c3df132
SK
3213 ql_log(ql_log_warn, vha, 0x0105,
3214 "Unable to acquire semaphore.\n");
18e7555a
AV
3215 goto done;
3216 }
3217
3218 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3219 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3220 if (rval != QLA_SUCCESS) {
7c3df132 3221 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
3222 goto done_release;
3223 }
3224
3225 dc &= MPS_MASK;
3226 if (dc == (dw & MPS_MASK))
3227 goto done_release;
3228
3229 dw &= ~MPS_MASK;
3230 dw |= dc;
3231 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3232 if (rval != QLA_SUCCESS) {
7c3df132 3233 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
3234 }
3235
3236done_release:
3237 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3238 if (rval != QLA_SUCCESS) {
7c3df132
SK
3239 ql_log(ql_log_warn, vha, 0x006d,
3240 "Unable to release semaphore.\n");
18e7555a
AV
3241 }
3242
3243done:
3244 return rval;
3245}
3246
8d93f550
CD
3247int
3248qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3249{
3250 /* Don't try to reallocate the array */
3251 if (req->outstanding_cmds)
3252 return QLA_SUCCESS;
3253
d7459527 3254 if (!IS_FWI2_CAPABLE(ha))
8d93f550
CD
3255 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3256 else {
03e8c680
QT
3257 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3258 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
8d93f550 3259 else
03e8c680 3260 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
8d93f550
CD
3261 }
3262
6396bb22
KC
3263 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3264 sizeof(srb_t *),
3265 GFP_KERNEL);
8d93f550
CD
3266
3267 if (!req->outstanding_cmds) {
3268 /*
3269 * Try to allocate a minimal size just so we can get through
3270 * initialization.
3271 */
3272 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
6396bb22
KC
3273 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3274 sizeof(srb_t *),
3275 GFP_KERNEL);
8d93f550
CD
3276
3277 if (!req->outstanding_cmds) {
3278 ql_log(ql_log_fatal, NULL, 0x0126,
3279 "Failed to allocate memory for "
3280 "outstanding_cmds for req_que %p.\n", req);
3281 req->num_outstanding_cmds = 0;
3282 return QLA_FUNCTION_FAILED;
3283 }
3284 }
3285
3286 return QLA_SUCCESS;
3287}
3288
e4e3a2ce
QT
3289#define PRINT_FIELD(_field, _flag, _str) { \
3290 if (a0->_field & _flag) {\
3291 if (p) {\
3292 strcat(ptr, "|");\
3293 ptr++;\
3294 leftover--;\
3295 } \
3296 len = snprintf(ptr, leftover, "%s", _str); \
3297 p = 1;\
3298 leftover -= len;\
3299 ptr += len; \
3300 } \
3301}
3302
3303static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3304{
3305#define STR_LEN 64
3306 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3307 u8 str[STR_LEN], *ptr, p;
3308 int leftover, len;
3309
3310 memset(str, 0, STR_LEN);
3311 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3312 ql_dbg(ql_dbg_init, vha, 0x015a,
3313 "SFP MFG Name: %s\n", str);
3314
3315 memset(str, 0, STR_LEN);
3316 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3317 ql_dbg(ql_dbg_init, vha, 0x015c,
3318 "SFP Part Name: %s\n", str);
3319
3320 /* media */
3321 memset(str, 0, STR_LEN);
3322 ptr = str;
3323 leftover = STR_LEN;
3324 p = len = 0;
3325 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3326 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3327 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3328 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3329 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3330 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3331 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3332 ql_dbg(ql_dbg_init, vha, 0x0160,
3333 "SFP Media: %s\n", str);
3334
3335 /* link length */
3336 memset(str, 0, STR_LEN);
3337 ptr = str;
3338 leftover = STR_LEN;
3339 p = len = 0;
3340 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3341 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3342 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3343 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3344 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3345 ql_dbg(ql_dbg_init, vha, 0x0196,
3346 "SFP Link Length: %s\n", str);
3347
3348 memset(str, 0, STR_LEN);
3349 ptr = str;
3350 leftover = STR_LEN;
3351 p = len = 0;
3352 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3353 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3354 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3355 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3356 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3357 ql_dbg(ql_dbg_init, vha, 0x016e,
3358 "SFP FC Link Tech: %s\n", str);
3359
3360 if (a0->length_km)
3361 ql_dbg(ql_dbg_init, vha, 0x016f,
3362 "SFP Distant: %d km\n", a0->length_km);
3363 if (a0->length_100m)
3364 ql_dbg(ql_dbg_init, vha, 0x0170,
3365 "SFP Distant: %d m\n", a0->length_100m*100);
3366 if (a0->length_50um_10m)
3367 ql_dbg(ql_dbg_init, vha, 0x0189,
3368 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3369 if (a0->length_62um_10m)
3370 ql_dbg(ql_dbg_init, vha, 0x018a,
3371 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3372 if (a0->length_om4_10m)
3373 ql_dbg(ql_dbg_init, vha, 0x0194,
3374 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3375 if (a0->length_om3_10m)
3376 ql_dbg(ql_dbg_init, vha, 0x0195,
3377 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3378}
3379
3380
3381/*
3382 * Return Code:
3383 * QLA_SUCCESS: no action
3384 * QLA_INTERFACE_ERROR: SFP is not there.
3385 * QLA_FUNCTION_FAILED: detected New SFP
3386 */
3387int
3388qla24xx_detect_sfp(scsi_qla_host_t *vha)
3389{
3390 int rc = QLA_SUCCESS;
3391 struct sff_8247_a0 *a;
3392 struct qla_hw_data *ha = vha->hw;
3393
3394 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3395 goto out;
3396
3397 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3398 if (rc)
3399 goto out;
3400
3401 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3402 qla2xxx_print_sfp_info(vha);
3403
3404 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3405 /* long range */
3406 ha->flags.detected_lr_sfp = 1;
3407
3408 if (a->length_km > 5 || a->length_100m > 50)
3409 ha->long_range_distance = LR_DISTANCE_10K;
3410 else
3411 ha->long_range_distance = LR_DISTANCE_5K;
3412
3413 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3414 ql_dbg(ql_dbg_async, vha, 0x507b,
3415 "Detected Long Range SFP.\n");
3416 } else {
3417 /* short range */
3418 ha->flags.detected_lr_sfp = 0;
3419 if (ha->flags.using_lr_setting)
3420 ql_dbg(ql_dbg_async, vha, 0x5084,
3421 "Detected Short Range SFP.\n");
3422 }
3423
3424 if (!vha->flags.init_done)
3425 rc = QLA_SUCCESS;
3426out:
3427 return rc;
3428}
3429
1da177e4
LT
3430/**
3431 * qla2x00_setup_chip() - Load and start RISC firmware.
2db6228d 3432 * @vha: HA context
1da177e4
LT
3433 *
3434 * Returns 0 on success.
3435 */
3436static int
e315cd28 3437qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 3438{
0107109e
AV
3439 int rval;
3440 uint32_t srisc_address = 0;
e315cd28 3441 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
3442 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3443 unsigned long flags;
dda772e8 3444 uint16_t fw_major_version;
3db0652e 3445
7ec0effd 3446 if (IS_P3P_TYPE(ha)) {
a9083016 3447 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
3448 if (rval == QLA_SUCCESS) {
3449 qla2x00_stop_firmware(vha);
a9083016 3450 goto enable_82xx_npiv;
14e303d9 3451 } else
b963752f 3452 goto failed;
a9083016
GM
3453 }
3454
3db0652e
AV
3455 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3456 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3457 spin_lock_irqsave(&ha->hardware_lock, flags);
3458 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
3459 RD_REG_WORD(&reg->hccr);
3460 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3461 }
1da177e4 3462
18e7555a
AV
3463 qla81xx_mpi_sync(vha);
3464
1da177e4 3465 /* Load firmware sequences */
e315cd28 3466 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 3467 if (rval == QLA_SUCCESS) {
7c3df132
SK
3468 ql_dbg(ql_dbg_init, vha, 0x00c9,
3469 "Verifying Checksum of loaded RISC code.\n");
1da177e4 3470
e315cd28 3471 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
3472 if (rval == QLA_SUCCESS) {
3473 /* Start firmware execution. */
7c3df132
SK
3474 ql_dbg(ql_dbg_init, vha, 0x00ca,
3475 "Starting firmware.\n");
1da177e4 3476
b0d6cabd
HM
3477 if (ql2xexlogins)
3478 ha->flags.exlogins_enabled = 1;
3479
99e1b683 3480 if (qla_is_exch_offld_enabled(vha))
2f56a7f1
HM
3481 ha->flags.exchoffld_enabled = 1;
3482
e315cd28 3483 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 3484 /* Retrieve firmware information. */
dda772e8 3485 if (rval == QLA_SUCCESS) {
e4e3a2ce
QT
3486 qla24xx_detect_sfp(vha);
3487
8b4673ba
QT
3488 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3489 (ha->zio_mode == QLA_ZIO_MODE_6))
3490 qla27xx_set_zio_threshold(vha,
3491 ha->last_zio_threshold);
3492
b0d6cabd
HM
3493 rval = qla2x00_set_exlogins_buffer(vha);
3494 if (rval != QLA_SUCCESS)
3495 goto failed;
3496
2f56a7f1
HM
3497 rval = qla2x00_set_exchoffld_buffer(vha);
3498 if (rval != QLA_SUCCESS)
3499 goto failed;
3500
a9083016 3501enable_82xx_npiv:
dda772e8 3502 fw_major_version = ha->fw_major_version;
7ec0effd 3503 if (IS_P3P_TYPE(ha))
3173167f 3504 qla82xx_check_md_needed(vha);
6246b8a1
GM
3505 else
3506 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
3507 if (rval != QLA_SUCCESS)
3508 goto failed;
2c3dfe3f 3509 ha->flags.npiv_supported = 0;
e315cd28 3510 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 3511 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 3512 ha->flags.npiv_supported = 1;
4d0ea247
SJ
3513 if ((!ha->max_npiv_vports) ||
3514 ((ha->max_npiv_vports + 1) %
eb66dc60 3515 MIN_MULTI_ID_FABRIC))
4d0ea247 3516 ha->max_npiv_vports =
eb66dc60 3517 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 3518 }
03e8c680 3519 qla2x00_get_resource_cnts(vha);
d743de66 3520
8d93f550
CD
3521 /*
3522 * Allocate the array of outstanding commands
3523 * now that we know the firmware resources.
3524 */
3525 rval = qla2x00_alloc_outstanding_cmds(ha,
3526 vha->req);
3527 if (rval != QLA_SUCCESS)
3528 goto failed;
3529
ad0a0b01
QT
3530 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3531 qla2x00_alloc_offload_mem(vha);
3532
3533 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
08de2844 3534 qla2x00_alloc_fw_dump(vha);
ad0a0b01 3535
3b6e5b9d
CD
3536 } else {
3537 goto failed;
1da177e4
LT
3538 }
3539 } else {
7c3df132
SK
3540 ql_log(ql_log_fatal, vha, 0x00cd,
3541 "ISP Firmware failed checksum.\n");
3542 goto failed;
1da177e4 3543 }
c74d88a4
AV
3544 } else
3545 goto failed;
1da177e4 3546
3db0652e
AV
3547 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3548 /* Enable proper parity. */
3549 spin_lock_irqsave(&ha->hardware_lock, flags);
3550 if (IS_QLA2300(ha))
3551 /* SRAM parity */
3552 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
3553 else
3554 /* SRAM, Instruction RAM and GP RAM parity */
3555 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
3556 RD_REG_WORD(&reg->hccr);
3557 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3558 }
3559
f3982d89
CD
3560 if (IS_QLA27XX(ha))
3561 ha->flags.fac_supported = 1;
3562 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1d2874de
JC
3563 uint32_t size;
3564
3565 rval = qla81xx_fac_get_sector_size(vha, &size);
3566 if (rval == QLA_SUCCESS) {
3567 ha->flags.fac_supported = 1;
3568 ha->fdt_block_size = size << 2;
3569 } else {
7c3df132 3570 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
3571 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3572 ha->fw_major_version, ha->fw_minor_version,
3573 ha->fw_subminor_version);
1ca60e3b 3574
f73cb695 3575 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6246b8a1
GM
3576 ha->flags.fac_supported = 0;
3577 rval = QLA_SUCCESS;
3578 }
1d2874de
JC
3579 }
3580 }
ca9e9c3e 3581failed:
1da177e4 3582 if (rval) {
7c3df132
SK
3583 ql_log(ql_log_fatal, vha, 0x00cf,
3584 "Setup chip ****FAILED****.\n");
1da177e4
LT
3585 }
3586
3587 return (rval);
3588}
3589
3590/**
3591 * qla2x00_init_response_q_entries() - Initializes response queue entries.
2db6228d 3592 * @rsp: response queue
1da177e4
LT
3593 *
3594 * Beginning of request ring has initialization control block already built
3595 * by nvram config routine.
3596 *
3597 * Returns 0 on success.
3598 */
73208dfd
AC
3599void
3600qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
3601{
3602 uint16_t cnt;
3603 response_t *pkt;
3604
2afa19a9
AC
3605 rsp->ring_ptr = rsp->ring;
3606 rsp->ring_index = 0;
3607 rsp->status_srb = NULL;
e315cd28
AC
3608 pkt = rsp->ring_ptr;
3609 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
3610 pkt->signature = RESPONSE_PROCESSED;
3611 pkt++;
3612 }
1da177e4
LT
3613}
3614
3615/**
3616 * qla2x00_update_fw_options() - Read and process firmware options.
2db6228d 3617 * @vha: HA context
1da177e4
LT
3618 *
3619 * Returns 0 on success.
3620 */
abbd8870 3621void
e315cd28 3622qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
3623{
3624 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 3625 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3626
3627 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 3628 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
3629
3630 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3631 return;
3632
3633 /* Serial Link options. */
7c3df132
SK
3634 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3635 "Serial link options.\n");
3636 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3637 (uint8_t *)&ha->fw_seriallink_options,
3638 sizeof(ha->fw_seriallink_options));
1da177e4
LT
3639
3640 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3641 if (ha->fw_seriallink_options[3] & BIT_2) {
3642 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3643
3644 /* 1G settings */
3645 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3646 emphasis = (ha->fw_seriallink_options[2] &
3647 (BIT_4 | BIT_3)) >> 3;
3648 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 3649 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3650 rx_sens = (ha->fw_seriallink_options[0] &
3651 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3652 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3653 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3654 if (rx_sens == 0x0)
3655 rx_sens = 0x3;
3656 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3657 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3658 ha->fw_options[10] |= BIT_5 |
3659 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3660 (tx_sens & (BIT_1 | BIT_0));
3661
3662 /* 2G settings */
3663 swing = (ha->fw_seriallink_options[2] &
3664 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3665 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3666 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 3667 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3668 rx_sens = (ha->fw_seriallink_options[1] &
3669 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3670 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3671 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3672 if (rx_sens == 0x0)
3673 rx_sens = 0x3;
3674 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3675 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3676 ha->fw_options[11] |= BIT_5 |
3677 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3678 (tx_sens & (BIT_1 | BIT_0));
3679 }
3680
3681 /* FCP2 options. */
3682 /* Return command IOCBs without waiting for an ABTS to complete. */
3683 ha->fw_options[3] |= BIT_13;
3684
3685 /* LED scheme. */
3686 if (ha->flags.enable_led_scheme)
3687 ha->fw_options[2] |= BIT_12;
3688
48c02fde 3689 /* Detect ISP6312. */
3690 if (IS_QLA6312(ha))
3691 ha->fw_options[2] |= BIT_13;
3692
088d09d4
GM
3693 /* Set Retry FLOGI in case of P2P connection */
3694 if (ha->operating_mode == P2P) {
3695 ha->fw_options[2] |= BIT_3;
3696 ql_dbg(ql_dbg_disc, vha, 0x2100,
3697 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3698 __func__, ha->fw_options[2]);
3699 }
3700
1da177e4 3701 /* Update firmware options. */
e315cd28 3702 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
3703}
3704
0107109e 3705void
e315cd28 3706qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
3707{
3708 int rval;
e315cd28 3709 struct qla_hw_data *ha = vha->hw;
0107109e 3710
7ec0effd 3711 if (IS_P3P_TYPE(ha))
a9083016
GM
3712 return;
3713
f198cafa
HM
3714 /* Hold status IOCBs until ABTS response received. */
3715 if (ql2xfwholdabts)
3716 ha->fw_options[3] |= BIT_12;
3717
088d09d4
GM
3718 /* Set Retry FLOGI in case of P2P connection */
3719 if (ha->operating_mode == P2P) {
3720 ha->fw_options[2] |= BIT_3;
3721 ql_dbg(ql_dbg_disc, vha, 0x2101,
3722 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3723 __func__, ha->fw_options[2]);
3724 }
3725
41dc529a 3726 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3c4810ff
QT
3727 if (ql2xmvasynctoatio &&
3728 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
41dc529a
QT
3729 if (qla_tgt_mode_enabled(vha) ||
3730 qla_dual_mode_enabled(vha))
3731 ha->fw_options[2] |= BIT_11;
3732 else
3733 ha->fw_options[2] &= ~BIT_11;
3734 }
3735
f7e761f5
QT
3736 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3737 /*
3738 * Tell FW to track each exchange to prevent
3739 * driver from using stale exchange.
3740 */
3741 if (qla_tgt_mode_enabled(vha) ||
3742 qla_dual_mode_enabled(vha))
3743 ha->fw_options[2] |= BIT_4;
3744 else
3745 ha->fw_options[2] &= ~BIT_4;
9ecf0b0d
QT
3746
3747 /* Reserve 1/2 of emergency exchanges for ELS.*/
3748 if (qla2xuseresexchforels)
3749 ha->fw_options[2] |= BIT_8;
3750 else
3751 ha->fw_options[2] &= ~BIT_8;
f7e761f5
QT
3752 }
3753
83548fe2
QT
3754 ql_dbg(ql_dbg_init, vha, 0x00e8,
3755 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3756 __func__, ha->fw_options[1], ha->fw_options[2],
3757 ha->fw_options[3], vha->host->active_mode);
3c4810ff
QT
3758
3759 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3760 qla2x00_set_fw_options(vha, ha->fw_options);
41dc529a 3761
0107109e 3762 /* Update Serial Link options. */
f94097ed 3763 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
3764 return;
3765
e315cd28 3766 rval = qla2x00_set_serdes_params(vha,
f94097ed 3767 le16_to_cpu(ha->fw_seriallink_options24[1]),
3768 le16_to_cpu(ha->fw_seriallink_options24[2]),
3769 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 3770 if (rval != QLA_SUCCESS) {
7c3df132 3771 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
3772 "Unable to update Serial Link options (%x).\n", rval);
3773 }
3774}
3775
abbd8870 3776void
e315cd28 3777qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 3778{
e315cd28 3779 struct qla_hw_data *ha = vha->hw;
3d71644c 3780 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
3781 struct req_que *req = ha->req_q_map[0];
3782 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
3783
3784 /* Setup ring parameters in initialization control block. */
ad950360
BVA
3785 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3786 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3787 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3788 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3789 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3790 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3791 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3792 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
abbd8870
AV
3793
3794 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3795 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3796 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3797 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3798 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3799}
3800
0107109e 3801void
e315cd28 3802qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 3803{
e315cd28 3804 struct qla_hw_data *ha = vha->hw;
118e2ef9 3805 device_reg_t *reg = ISP_QUE_REG(ha, 0);
73208dfd
AC
3806 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3807 struct qla_msix_entry *msix;
0107109e 3808 struct init_cb_24xx *icb;
73208dfd
AC
3809 uint16_t rid = 0;
3810 struct req_que *req = ha->req_q_map[0];
3811 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 3812
6246b8a1 3813 /* Setup ring parameters in initialization control block. */
0107109e 3814 icb = (struct init_cb_24xx *)ha->init_cb;
ad950360
BVA
3815 icb->request_q_outpointer = cpu_to_le16(0);
3816 icb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3817 icb->request_q_length = cpu_to_le16(req->length);
3818 icb->response_q_length = cpu_to_le16(rsp->length);
3819 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3820 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3821 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3822 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
0107109e 3823
2d70c103 3824 /* Setup ATIO queue dma pointers for target mode */
ad950360 3825 icb->atio_q_inpointer = cpu_to_le16(0);
2d70c103
NB
3826 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3827 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3828 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3829
7c6300e3 3830 if (IS_SHADOW_REG_CAPABLE(ha))
ad950360 3831 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
7c6300e3 3832
f73cb695 3833 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ad950360
BVA
3834 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3835 icb->rid = cpu_to_le16(rid);
73208dfd
AC
3836 if (ha->flags.msix_enabled) {
3837 msix = &ha->msix_entries[1];
83548fe2 3838 ql_dbg(ql_dbg_init, vha, 0x0019,
7c3df132
SK
3839 "Registering vector 0x%x for base que.\n",
3840 msix->entry);
73208dfd
AC
3841 icb->msix = cpu_to_le16(msix->entry);
3842 }
3843 /* Use alternate PCI bus number */
3844 if (MSB(rid))
ad950360 3845 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
73208dfd
AC
3846 /* Use alternate PCI devfn */
3847 if (LSB(rid))
ad950360 3848 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
73208dfd 3849
3155754a 3850 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
3851 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3852 (ha->flags.msix_enabled)) {
ad950360 3853 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3155754a 3854 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
3855 ql_dbg(ql_dbg_init, vha, 0x00fe,
3856 "MSIX Handshake Disable Mode turned on.\n");
3155754a 3857 } else {
ad950360 3858 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3155754a 3859 }
ad950360 3860 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
73208dfd
AC
3861
3862 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3863 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3864 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3865 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3866 } else {
3867 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3868 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3869 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3870 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3871 }
aa230bc5 3872 qlt_24xx_config_rings(vha);
2d70c103 3873
73208dfd
AC
3874 /* PCI posting */
3875 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
3876}
3877
1da177e4
LT
3878/**
3879 * qla2x00_init_rings() - Initializes firmware.
2db6228d 3880 * @vha: HA context
1da177e4
LT
3881 *
3882 * Beginning of request ring has initialization control block already built
3883 * by nvram config routine.
3884 *
3885 * Returns 0 on success.
3886 */
8ae6d9c7 3887int
e315cd28 3888qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
3889{
3890 int rval;
3891 unsigned long flags = 0;
29bdccbe 3892 int cnt, que;
e315cd28 3893 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
3894 struct req_que *req;
3895 struct rsp_que *rsp;
2c3dfe3f
SJ
3896 struct mid_init_cb_24xx *mid_init_cb =
3897 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
3898
3899 spin_lock_irqsave(&ha->hardware_lock, flags);
3900
3901 /* Clear outstanding commands array. */
2afa19a9 3902 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 3903 req = ha->req_q_map[que];
cb43285f 3904 if (!req || !test_bit(que, ha->req_qid_map))
29bdccbe 3905 continue;
7c6300e3
JC
3906 req->out_ptr = (void *)(req->ring + req->length);
3907 *req->out_ptr = 0;
8d93f550 3908 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 3909 req->outstanding_cmds[cnt] = NULL;
1da177e4 3910
2afa19a9 3911 req->current_outstanding_cmd = 1;
1da177e4 3912
29bdccbe
AC
3913 /* Initialize firmware. */
3914 req->ring_ptr = req->ring;
3915 req->ring_index = 0;
3916 req->cnt = req->length;
3917 }
1da177e4 3918
2afa19a9 3919 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe 3920 rsp = ha->rsp_q_map[que];
cb43285f 3921 if (!rsp || !test_bit(que, ha->rsp_qid_map))
29bdccbe 3922 continue;
7c6300e3
JC
3923 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3924 *rsp->in_ptr = 0;
29bdccbe 3925 /* Initialize response queue entries */
8ae6d9c7
GM
3926 if (IS_QLAFX00(ha))
3927 qlafx00_init_response_q_entries(rsp);
3928 else
3929 qla2x00_init_response_q_entries(rsp);
29bdccbe 3930 }
1da177e4 3931
2d70c103
NB
3932 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3933 ha->tgt.atio_ring_index = 0;
3934 /* Initialize ATIO queue entries */
3935 qlt_init_atio_q_entries(vha);
3936
e315cd28 3937 ha->isp_ops->config_rings(vha);
1da177e4
LT
3938
3939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3940
8ae6d9c7
GM
3941 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3942
3943 if (IS_QLAFX00(ha)) {
3944 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3945 goto next_check;
3946 }
3947
1da177e4 3948 /* Update any ISP specific firmware options before initialization. */
e315cd28 3949 ha->isp_ops->update_fw_options(vha);
1da177e4 3950
605aa2bc 3951 if (ha->flags.npiv_supported) {
45980cc2 3952 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 3953 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 3954 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
3955 }
3956
24a08138 3957 if (IS_FWI2_CAPABLE(ha)) {
ad950360 3958 mid_init_cb->options = cpu_to_le16(BIT_1);
24a08138 3959 mid_init_cb->init_cb.execution_throttle =
03e8c680 3960 cpu_to_le16(ha->cur_fw_xcb_count);
40f3862b
JC
3961 ha->flags.dport_enabled =
3962 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3963 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3964 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3965 /* FA-WWPN Status */
2486c627 3966 ha->flags.fawwpn_enabled =
40f3862b 3967 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
83548fe2 3968 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
2486c627 3969 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
24a08138 3970 }
2c3dfe3f 3971
e315cd28 3972 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 3973next_check:
1da177e4 3974 if (rval) {
7c3df132
SK
3975 ql_log(ql_log_fatal, vha, 0x00d2,
3976 "Init Firmware **** FAILED ****.\n");
1da177e4 3977 } else {
7c3df132
SK
3978 ql_dbg(ql_dbg_init, vha, 0x00d3,
3979 "Init Firmware -- success.\n");
4b60c827 3980 QLA_FW_STARTED(ha);
0645cb83 3981 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
1da177e4
LT
3982 }
3983
3984 return (rval);
3985}
3986
3987/**
3988 * qla2x00_fw_ready() - Waits for firmware ready.
2db6228d 3989 * @vha: HA context
1da177e4
LT
3990 *
3991 * Returns 0 on success.
3992 */
3993static int
e315cd28 3994qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
3995{
3996 int rval;
4d4df193 3997 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
3998 uint16_t min_wait; /* Minimum wait time if loop is down */
3999 uint16_t wait_time; /* Wait time if loop is coming ready */
b5a340dd 4000 uint16_t state[6];
e315cd28 4001 struct qla_hw_data *ha = vha->hw;
1da177e4 4002
8ae6d9c7
GM
4003 if (IS_QLAFX00(vha->hw))
4004 return qlafx00_fw_ready(vha);
4005
1da177e4
LT
4006 rval = QLA_SUCCESS;
4007
33461491
CD
4008 /* Time to wait for loop down */
4009 if (IS_P3P_TYPE(ha))
4010 min_wait = 30;
4011 else
4012 min_wait = 20;
1da177e4
LT
4013
4014 /*
4015 * Firmware should take at most one RATOV to login, plus 5 seconds for
4016 * our own processing.
4017 */
4018 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4019 wait_time = min_wait;
4020 }
4021
4022 /* Min wait time if loop down */
4023 mtime = jiffies + (min_wait * HZ);
4024
4025 /* wait time before firmware ready */
4026 wtime = jiffies + (wait_time * HZ);
4027
4028 /* Wait for ISP to finish LIP */
e315cd28 4029 if (!vha->flags.init_done)
7c3df132
SK
4030 ql_log(ql_log_info, vha, 0x801e,
4031 "Waiting for LIP to complete.\n");
1da177e4
LT
4032
4033 do {
5b939038 4034 memset(state, -1, sizeof(state));
e315cd28 4035 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 4036 if (rval == QLA_SUCCESS) {
4d4df193 4037 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 4038 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 4039 }
4d4df193 4040 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
4041 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4042 "fw_state=%x 84xx=%x.\n", state[0],
4043 state[2]);
4d4df193
HK
4044 if ((state[2] & FSTATE_LOGGED_IN) &&
4045 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
4046 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4047 "Sending verify iocb.\n");
4d4df193
HK
4048
4049 cs84xx_time = jiffies;
e315cd28 4050 rval = qla84xx_init_chip(vha);
7c3df132
SK
4051 if (rval != QLA_SUCCESS) {
4052 ql_log(ql_log_warn,
cfb0919c 4053 vha, 0x8007,
7c3df132 4054 "Init chip failed.\n");
4d4df193 4055 break;
7c3df132 4056 }
4d4df193
HK
4057
4058 /* Add time taken to initialize. */
4059 cs84xx_time = jiffies - cs84xx_time;
4060 wtime += cs84xx_time;
4061 mtime += cs84xx_time;
cfb0919c 4062 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
4063 "Increasing wait time by %ld. "
4064 "New time %ld.\n", cs84xx_time,
4065 wtime);
4d4df193
HK
4066 }
4067 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
4068 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4069 "F/W Ready - OK.\n");
1da177e4 4070
e315cd28 4071 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
4072 &ha->login_timeout, &ha->r_a_tov);
4073
4074 rval = QLA_SUCCESS;
4075 break;
4076 }
4077
4078 rval = QLA_FUNCTION_FAILED;
4079
e315cd28 4080 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 4081 state[0] != FSTATE_READY) {
1da177e4 4082 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
4083 * other than Wait for Login.
4084 */
1da177e4 4085 if (time_after_eq(jiffies, mtime)) {
7c3df132 4086 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
4087 "Cable is unplugged...\n");
4088
e315cd28 4089 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
4090 break;
4091 }
4092 }
4093 } else {
4094 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 4095 if (time_after_eq(jiffies, mtime) ||
7190575f 4096 ha->flags.isp82xx_fw_hung)
1da177e4
LT
4097 break;
4098 }
4099
4100 if (time_after_eq(jiffies, wtime))
4101 break;
4102
4103 /* Delay for a while */
4104 msleep(500);
1da177e4
LT
4105 } while (1);
4106
7c3df132 4107 ql_dbg(ql_dbg_taskm, vha, 0x803a,
b5a340dd
JC
4108 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4109 state[1], state[2], state[3], state[4], state[5], jiffies);
1da177e4 4110
cfb0919c 4111 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
4112 ql_log(ql_log_warn, vha, 0x803b,
4113 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
4114 }
4115
4116 return (rval);
4117}
4118
4119/*
4120* qla2x00_configure_hba
4121* Setup adapter context.
4122*
4123* Input:
4124* ha = adapter state pointer.
4125*
4126* Returns:
4127* 0 = success
4128*
4129* Context:
4130* Kernel context.
4131*/
4132static int
e315cd28 4133qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
4134{
4135 int rval;
4136 uint16_t loop_id;
4137 uint16_t topo;
2c3dfe3f 4138 uint16_t sw_cap;
1da177e4
LT
4139 uint8_t al_pa;
4140 uint8_t area;
4141 uint8_t domain;
4142 char connect_type[22];
e315cd28 4143 struct qla_hw_data *ha = vha->hw;
61e1b269 4144 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
482c9dc7 4145 port_id_t id;
9d1aa4e1 4146 unsigned long flags;
1da177e4
LT
4147
4148 /* Get host addresses. */
e315cd28 4149 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 4150 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 4151 if (rval != QLA_SUCCESS) {
e315cd28 4152 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 4153 IS_CNA_CAPABLE(ha) ||
33135aa2 4154 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
4155 ql_dbg(ql_dbg_disc, vha, 0x2008,
4156 "Loop is in a transition state.\n");
33135aa2 4157 } else {
7c3df132
SK
4158 ql_log(ql_log_warn, vha, 0x2009,
4159 "Unable to get host loop ID.\n");
61e1b269
JC
4160 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4161 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4162 ql_log(ql_log_warn, vha, 0x1151,
4163 "Doing link init.\n");
4164 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4165 return rval;
4166 }
e315cd28 4167 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 4168 }
1da177e4
LT
4169 return (rval);
4170 }
4171
4172 if (topo == 4) {
7c3df132
SK
4173 ql_log(ql_log_info, vha, 0x200a,
4174 "Cannot get topology - retrying.\n");
1da177e4
LT
4175 return (QLA_FUNCTION_FAILED);
4176 }
4177
e315cd28 4178 vha->loop_id = loop_id;
1da177e4
LT
4179
4180 /* initialize */
4181 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4182 ha->operating_mode = LOOP;
2c3dfe3f 4183 ha->switch_cap = 0;
1da177e4
LT
4184
4185 switch (topo) {
4186 case 0:
7c3df132 4187 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
4188 ha->current_topology = ISP_CFG_NL;
4189 strcpy(connect_type, "(Loop)");
4190 break;
4191
4192 case 1:
7c3df132 4193 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 4194 ha->switch_cap = sw_cap;
1da177e4
LT
4195 ha->current_topology = ISP_CFG_FL;
4196 strcpy(connect_type, "(FL_Port)");
4197 break;
4198
4199 case 2:
7c3df132 4200 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
4201 ha->operating_mode = P2P;
4202 ha->current_topology = ISP_CFG_N;
4203 strcpy(connect_type, "(N_Port-to-N_Port)");
4204 break;
4205
4206 case 3:
7c3df132 4207 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 4208 ha->switch_cap = sw_cap;
1da177e4
LT
4209 ha->operating_mode = P2P;
4210 ha->current_topology = ISP_CFG_F;
4211 strcpy(connect_type, "(F_Port)");
4212 break;
4213
4214 default:
7c3df132
SK
4215 ql_dbg(ql_dbg_disc, vha, 0x200f,
4216 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
4217 ha->current_topology = ISP_CFG_NL;
4218 strcpy(connect_type, "(Loop)");
4219 break;
4220 }
4221
4222 /* Save Host port and loop ID. */
4223 /* byte order - Big Endian */
482c9dc7
QT
4224 id.b.domain = domain;
4225 id.b.area = area;
4226 id.b.al_pa = al_pa;
4227 id.b.rsvd_1 = 0;
9d1aa4e1 4228 spin_lock_irqsave(&ha->hardware_lock, flags);
8777e431
QT
4229 if (!(topo == 2 && ha->flags.n2n_bigger))
4230 qlt_update_host_map(vha, id);
9d1aa4e1 4231 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103 4232
e315cd28 4233 if (!vha->flags.init_done)
7c3df132
SK
4234 ql_log(ql_log_info, vha, 0x2010,
4235 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 4236 connect_type, vha->loop_id);
1da177e4 4237
1da177e4
LT
4238 return(rval);
4239}
4240
a9083016 4241inline void
e315cd28
AC
4242qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4243 char *def)
9bb9fcf2
AV
4244{
4245 char *st, *en;
4246 uint16_t index;
e315cd28 4247 struct qla_hw_data *ha = vha->hw;
ab671149 4248 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 4249 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2
AV
4250
4251 if (memcmp(model, BINZERO, len) != 0) {
4252 strncpy(ha->model_number, model, len);
4253 st = en = ha->model_number;
4254 en += len - 1;
4255 while (en > st) {
4256 if (*en != 0x20 && *en != 0x00)
4257 break;
4258 *en-- = '\0';
4259 }
4260
4261 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
4262 if (use_tbl &&
4263 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 4264 index < QLA_MODEL_NAMES)
1ee27146
JC
4265 strncpy(ha->model_desc,
4266 qla2x00_model_name[index * 2 + 1],
4267 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
4268 } else {
4269 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
4270 if (use_tbl &&
4271 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
4272 index < QLA_MODEL_NAMES) {
4273 strcpy(ha->model_number,
4274 qla2x00_model_name[index * 2]);
1ee27146
JC
4275 strncpy(ha->model_desc,
4276 qla2x00_model_name[index * 2 + 1],
4277 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
4278 } else {
4279 strcpy(ha->model_number, def);
4280 }
4281 }
1ee27146 4282 if (IS_FWI2_CAPABLE(ha))
e315cd28 4283 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 4284 sizeof(ha->model_desc));
9bb9fcf2
AV
4285}
4286
4e08df3f
DM
4287/* On sparc systems, obtain port and node WWN from firmware
4288 * properties.
4289 */
e315cd28 4290static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
4291{
4292#ifdef CONFIG_SPARC
e315cd28 4293 struct qla_hw_data *ha = vha->hw;
4e08df3f 4294 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
4295 struct device_node *dp = pci_device_to_OF_node(pdev);
4296 const u8 *val;
4e08df3f
DM
4297 int len;
4298
4299 val = of_get_property(dp, "port-wwn", &len);
4300 if (val && len >= WWN_SIZE)
4301 memcpy(nv->port_name, val, WWN_SIZE);
4302
4303 val = of_get_property(dp, "node-wwn", &len);
4304 if (val && len >= WWN_SIZE)
4305 memcpy(nv->node_name, val, WWN_SIZE);
4306#endif
4307}
4308
1da177e4
LT
4309/*
4310* NVRAM configuration for ISP 2xxx
4311*
4312* Input:
4313* ha = adapter block pointer.
4314*
4315* Output:
4316* initialization control block in response_ring
4317* host adapters parameters in host adapter block
4318*
4319* Returns:
4320* 0 = success.
4321*/
abbd8870 4322int
e315cd28 4323qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 4324{
4e08df3f 4325 int rval;
0107109e
AV
4326 uint8_t chksum = 0;
4327 uint16_t cnt;
4328 uint8_t *dptr1, *dptr2;
e315cd28 4329 struct qla_hw_data *ha = vha->hw;
0107109e 4330 init_cb_t *icb = ha->init_cb;
281afe19
SJ
4331 nvram_t *nv = ha->nvram;
4332 uint8_t *ptr = ha->nvram;
3d71644c 4333 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 4334
4e08df3f
DM
4335 rval = QLA_SUCCESS;
4336
1da177e4 4337 /* Determine NVRAM starting address. */
0107109e 4338 ha->nvram_size = sizeof(nvram_t);
1da177e4
LT
4339 ha->nvram_base = 0;
4340 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4341 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
4342 ha->nvram_base = 0x80;
4343
4344 /* Get NVRAM data and calculate checksum. */
e315cd28 4345 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
4346 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4347 chksum += *ptr++;
1da177e4 4348
7c3df132
SK
4349 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4350 "Contents of NVRAM.\n");
4351 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4352 (uint8_t *)nv, ha->nvram_size);
1da177e4
LT
4353
4354 /* Bad NVRAM data, set defaults parameters. */
4355 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
4356 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
4357 /* Reset NVRAM data. */
7c3df132 4358 ql_log(ql_log_warn, vha, 0x0064,
9e336520 4359 "Inconsistent NVRAM "
7c3df132
SK
4360 "detected: checksum=0x%x id=%c version=0x%x.\n",
4361 chksum, nv->id[0], nv->nvram_version);
4362 ql_log(ql_log_warn, vha, 0x0065,
4363 "Falling back to "
4364 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
4365
4366 /*
4367 * Set default initialization control block.
4368 */
4369 memset(nv, 0, ha->nvram_size);
4370 nv->parameter_block_version = ICB_VERSION;
4371
4372 if (IS_QLA23XX(ha)) {
4373 nv->firmware_options[0] = BIT_2 | BIT_1;
4374 nv->firmware_options[1] = BIT_7 | BIT_5;
4375 nv->add_firmware_options[0] = BIT_5;
4376 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 4377 nv->frame_payload_size = 2048;
4e08df3f
DM
4378 nv->special_options[1] = BIT_7;
4379 } else if (IS_QLA2200(ha)) {
4380 nv->firmware_options[0] = BIT_2 | BIT_1;
4381 nv->firmware_options[1] = BIT_7 | BIT_5;
4382 nv->add_firmware_options[0] = BIT_5;
4383 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 4384 nv->frame_payload_size = 1024;
4e08df3f
DM
4385 } else if (IS_QLA2100(ha)) {
4386 nv->firmware_options[0] = BIT_3 | BIT_1;
4387 nv->firmware_options[1] = BIT_5;
98aee70d 4388 nv->frame_payload_size = 1024;
4e08df3f
DM
4389 }
4390
ad950360
BVA
4391 nv->max_iocb_allocation = cpu_to_le16(256);
4392 nv->execution_throttle = cpu_to_le16(16);
4e08df3f
DM
4393 nv->retry_count = 8;
4394 nv->retry_delay = 1;
4395
4396 nv->port_name[0] = 33;
4397 nv->port_name[3] = 224;
4398 nv->port_name[4] = 139;
4399
e315cd28 4400 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
4401
4402 nv->login_timeout = 4;
4403
4404 /*
4405 * Set default host adapter parameters
4406 */
4407 nv->host_p[1] = BIT_2;
4408 nv->reset_delay = 5;
4409 nv->port_down_retry_count = 8;
ad950360 4410 nv->max_luns_per_target = cpu_to_le16(8);
4e08df3f
DM
4411 nv->link_down_timeout = 60;
4412
4413 rval = 1;
1da177e4
LT
4414 }
4415
4416#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
4417 /*
4418 * The SN2 does not provide BIOS emulation which means you can't change
4419 * potentially bogus BIOS settings. Force the use of default settings
4420 * for link rate and frame size. Hope that the rest of the settings
4421 * are valid.
4422 */
4423 if (ia64_platform_is("sn2")) {
98aee70d 4424 nv->frame_payload_size = 2048;
1da177e4
LT
4425 if (IS_QLA23XX(ha))
4426 nv->special_options[1] = BIT_7;
4427 }
4428#endif
4429
4430 /* Reset Initialization control block */
0107109e 4431 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
4432
4433 /*
4434 * Setup driver NVRAM options.
4435 */
4436 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4437 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4438 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4439 nv->firmware_options[1] &= ~BIT_4;
4440
4441 if (IS_QLA23XX(ha)) {
4442 nv->firmware_options[0] |= BIT_2;
4443 nv->firmware_options[0] &= ~BIT_3;
2d70c103 4444 nv->special_options[0] &= ~BIT_6;
0107109e 4445 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
4446
4447 if (IS_QLA2300(ha)) {
4448 if (ha->fb_rev == FPM_2310) {
4449 strcpy(ha->model_number, "QLA2310");
4450 } else {
4451 strcpy(ha->model_number, "QLA2300");
4452 }
4453 } else {
e315cd28 4454 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 4455 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
4456 }
4457 } else if (IS_QLA2200(ha)) {
4458 nv->firmware_options[0] |= BIT_2;
4459 /*
4460 * 'Point-to-point preferred, else loop' is not a safe
4461 * connection mode setting.
4462 */
4463 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4464 (BIT_5 | BIT_4)) {
4465 /* Force 'loop preferred, else point-to-point'. */
4466 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4467 nv->add_firmware_options[0] |= BIT_5;
4468 }
4469 strcpy(ha->model_number, "QLA22xx");
4470 } else /*if (IS_QLA2100(ha))*/ {
4471 strcpy(ha->model_number, "QLA2100");
4472 }
4473
4474 /*
4475 * Copy over NVRAM RISC parameter block to initialization control block.
4476 */
4477 dptr1 = (uint8_t *)icb;
4478 dptr2 = (uint8_t *)&nv->parameter_block_version;
4479 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4480 while (cnt--)
4481 *dptr1++ = *dptr2++;
4482
4483 /* Copy 2nd half. */
4484 dptr1 = (uint8_t *)icb->add_firmware_options;
4485 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4486 while (cnt--)
4487 *dptr1++ = *dptr2++;
0eaaca4c 4488 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5341e868
AV
4489 /* Use alternate WWN? */
4490 if (nv->host_p[1] & BIT_7) {
4491 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4492 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4493 }
4494
1da177e4
LT
4495 /* Prepare nodename */
4496 if ((icb->firmware_options[1] & BIT_6) == 0) {
4497 /*
4498 * Firmware will apply the following mask if the nodename was
4499 * not provided.
4500 */
4501 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4502 icb->node_name[0] &= 0xF0;
4503 }
4504
4505 /*
4506 * Set host adapter parameters.
4507 */
3ce8866c
SK
4508
4509 /*
4510 * BIT_7 in the host-parameters section allows for modification to
4511 * internal driver logging.
4512 */
0181944f 4513 if (nv->host_p[0] & BIT_7)
cfb0919c 4514 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
4515 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4516 /* Always load RISC code on non ISP2[12]00 chips. */
4517 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4518 ha->flags.disable_risc_code_load = 0;
4519 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4520 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4521 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 4522 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 4523 ha->flags.disable_serdes = 0;
1da177e4
LT
4524
4525 ha->operating_mode =
4526 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4527
4528 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4529 sizeof(ha->fw_seriallink_options));
4530
4531 /* save HBA serial number */
4532 ha->serial0 = icb->port_name[5];
4533 ha->serial1 = icb->port_name[6];
4534 ha->serial2 = icb->port_name[7];
e315cd28
AC
4535 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4536 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4 4537
ad950360 4538 icb->execution_throttle = cpu_to_le16(0xFFFF);
1da177e4
LT
4539
4540 ha->retry_count = nv->retry_count;
4541
4542 /* Set minimum login_timeout to 4 seconds. */
5b91490e 4543 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
4544 nv->login_timeout = ql2xlogintimeout;
4545 if (nv->login_timeout < 4)
4546 nv->login_timeout = 4;
4547 ha->login_timeout = nv->login_timeout;
1da177e4 4548
00a537b8
AV
4549 /* Set minimum RATOV to 100 tenths of a second. */
4550 ha->r_a_tov = 100;
1da177e4 4551
1da177e4
LT
4552 ha->loop_reset_delay = nv->reset_delay;
4553
1da177e4
LT
4554 /* Link Down Timeout = 0:
4555 *
4556 * When Port Down timer expires we will start returning
4557 * I/O's to OS with "DID_NO_CONNECT".
4558 *
4559 * Link Down Timeout != 0:
4560 *
4561 * The driver waits for the link to come up after link down
4562 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 4563 */
1da177e4
LT
4564 if (nv->link_down_timeout == 0) {
4565 ha->loop_down_abort_time =
354d6b21 4566 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
4567 } else {
4568 ha->link_down_timeout = nv->link_down_timeout;
4569 ha->loop_down_abort_time =
4570 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 4571 }
1da177e4 4572
1da177e4
LT
4573 /*
4574 * Need enough time to try and get the port back.
4575 */
4576 ha->port_down_retry_count = nv->port_down_retry_count;
4577 if (qlport_down_retry)
4578 ha->port_down_retry_count = qlport_down_retry;
4579 /* Set login_retry_count */
4580 ha->login_retry_count = nv->retry_count;
4581 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4582 ha->port_down_retry_count > 3)
4583 ha->login_retry_count = ha->port_down_retry_count;
4584 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4585 ha->login_retry_count = ha->port_down_retry_count;
4586 if (ql2xloginretrycount)
4587 ha->login_retry_count = ql2xloginretrycount;
4588
ad950360 4589 icb->lun_enables = cpu_to_le16(0);
1da177e4
LT
4590 icb->command_resource_count = 0;
4591 icb->immediate_notify_resource_count = 0;
ad950360 4592 icb->timeout = cpu_to_le16(0);
1da177e4
LT
4593
4594 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4595 /* Enable RIO */
4596 icb->firmware_options[0] &= ~BIT_3;
4597 icb->add_firmware_options[0] &=
4598 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4599 icb->add_firmware_options[0] |= BIT_2;
4600 icb->response_accumulation_timer = 3;
4601 icb->interrupt_delay_timer = 5;
4602
e315cd28 4603 vha->flags.process_response_queue = 1;
1da177e4 4604 } else {
4fdfefe5 4605 /* Enable ZIO. */
e315cd28 4606 if (!vha->flags.init_done) {
4fdfefe5
AV
4607 ha->zio_mode = icb->add_firmware_options[0] &
4608 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4609 ha->zio_timer = icb->interrupt_delay_timer ?
4610 icb->interrupt_delay_timer: 2;
4611 }
1da177e4
LT
4612 icb->add_firmware_options[0] &=
4613 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 4614 vha->flags.process_response_queue = 0;
4fdfefe5 4615 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 4616 ha->zio_mode = QLA_ZIO_MODE_6;
4617
7c3df132 4618 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
4619 "ZIO mode %d enabled; timer delay (%d us).\n",
4620 ha->zio_mode, ha->zio_timer * 100);
1da177e4 4621
4fdfefe5
AV
4622 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4623 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 4624 vha->flags.process_response_queue = 1;
1da177e4
LT
4625 }
4626 }
4627
4e08df3f 4628 if (rval) {
7c3df132
SK
4629 ql_log(ql_log_warn, vha, 0x0069,
4630 "NVRAM configuration failed.\n");
4e08df3f
DM
4631 }
4632 return (rval);
1da177e4
LT
4633}
4634
19a7b4ae
JSEC
4635static void
4636qla2x00_rport_del(void *data)
4637{
4638 fc_port_t *fcport = data;
d97994dc 4639 struct fc_rport *rport;
044d78e1 4640 unsigned long flags;
d97994dc 4641
044d78e1 4642 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
ac280b67 4643 rport = fcport->drport ? fcport->drport: fcport->rport;
d97994dc 4644 fcport->drport = NULL;
044d78e1 4645 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
726b8548 4646 if (rport) {
83548fe2
QT
4647 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4648 "%s %8phN. rport %p roles %x\n",
4649 __func__, fcport->port_name, rport,
4650 rport->roles);
726b8548 4651
d97994dc 4652 fc_remote_port_delete(rport);
726b8548 4653 }
19a7b4ae
JSEC
4654}
4655
1da177e4
LT
4656/**
4657 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2db6228d 4658 * @vha: HA context
1da177e4
LT
4659 * @flags: allocation flags
4660 *
4661 * Returns a pointer to the allocated fcport, or NULL, if none available.
4662 */
9a069e19 4663fc_port_t *
e315cd28 4664qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
4665{
4666 fc_port_t *fcport;
4667
bbfbbbc1
MK
4668 fcport = kzalloc(sizeof(fc_port_t), flags);
4669 if (!fcport)
4670 return NULL;
1da177e4 4671
9ecd6564
QT
4672 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4673 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4674 flags);
4675 if (!fcport->ct_desc.ct_sns) {
4676 ql_log(ql_log_warn, vha, 0xd049,
4677 "Failed to allocate ct_sns request.\n");
4678 kfree(fcport);
4679 return NULL;
4680 }
4681
1da177e4 4682 /* Setup fcport template structure. */
e315cd28 4683 fcport->vha = vha;
1da177e4
LT
4684 fcport->port_type = FCT_UNKNOWN;
4685 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 4686 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 4687 fcport->supported_classes = FC_COS_UNSPECIFIED;
f635e48e 4688 fcport->fp_speed = PORT_SPEED_UNKNOWN;
1da177e4 4689
726b8548
QT
4690 fcport->disc_state = DSC_DELETED;
4691 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4692 fcport->deleted = QLA_SESS_DELETED;
4693 fcport->login_retry = vha->hw->login_retry_count;
9ecd6564 4694 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
726b8548
QT
4695 fcport->logout_on_delete = 1;
4696
4697 if (!fcport->ct_desc.ct_sns) {
83548fe2 4698 ql_log(ql_log_warn, vha, 0xd049,
726b8548
QT
4699 "Failed to allocate ct_sns request.\n");
4700 kfree(fcport);
4701 fcport = NULL;
4702 }
9ecd6564 4703
726b8548 4704 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
cd4ed6b4 4705 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
726b8548
QT
4706 INIT_LIST_HEAD(&fcport->gnl_entry);
4707 INIT_LIST_HEAD(&fcport->list);
4708
bbfbbbc1 4709 return fcport;
1da177e4
LT
4710}
4711
726b8548
QT
4712void
4713qla2x00_free_fcport(fc_port_t *fcport)
4714{
4715 if (fcport->ct_desc.ct_sns) {
4716 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4717 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4718 fcport->ct_desc.ct_sns_dma);
4719
4720 fcport->ct_desc.ct_sns = NULL;
4721 }
4722 kfree(fcport);
4723}
4724
1da177e4
LT
4725/*
4726 * qla2x00_configure_loop
4727 * Updates Fibre Channel Device Database with what is actually on loop.
4728 *
4729 * Input:
4730 * ha = adapter block pointer.
4731 *
4732 * Returns:
4733 * 0 = success.
4734 * 1 = error.
4735 * 2 = database was full and device was not configured.
4736 */
4737static int
e315cd28 4738qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
4739{
4740 int rval;
4741 unsigned long flags, save_flags;
e315cd28 4742 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
4743 rval = QLA_SUCCESS;
4744
4745 /* Get Initiator ID */
e315cd28
AC
4746 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4747 rval = qla2x00_configure_hba(vha);
1da177e4 4748 if (rval != QLA_SUCCESS) {
7c3df132
SK
4749 ql_dbg(ql_dbg_disc, vha, 0x2013,
4750 "Unable to configure HBA.\n");
1da177e4
LT
4751 return (rval);
4752 }
4753 }
4754
e315cd28 4755 save_flags = flags = vha->dpc_flags;
7c3df132
SK
4756 ql_dbg(ql_dbg_disc, vha, 0x2014,
4757 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
4758
4759 /*
4760 * If we have both an RSCN and PORT UPDATE pending then handle them
4761 * both at the same time.
4762 */
e315cd28
AC
4763 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4764 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 4765
3064ff39
MH
4766 qla2x00_get_data_rate(vha);
4767
1da177e4
LT
4768 /* Determine what we need to do */
4769 if (ha->current_topology == ISP_CFG_FL &&
4770 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4771
1da177e4
LT
4772 set_bit(RSCN_UPDATE, &flags);
4773
4774 } else if (ha->current_topology == ISP_CFG_F &&
4775 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4776
1da177e4
LT
4777 set_bit(RSCN_UPDATE, &flags);
4778 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
4779
4780 } else if (ha->current_topology == ISP_CFG_N) {
4781 clear_bit(RSCN_UPDATE, &flags);
48acad09
QT
4782 if (qla_tgt_mode_enabled(vha)) {
4783 /* allow the other side to start the login */
9cd883f0
QT
4784 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4785 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
9cd883f0 4786 }
41dc529a
QT
4787 } else if (ha->current_topology == ISP_CFG_NL) {
4788 clear_bit(RSCN_UPDATE, &flags);
4789 set_bit(LOCAL_LOOP_UPDATE, &flags);
e315cd28 4790 } else if (!vha->flags.online ||
1da177e4 4791 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1da177e4
LT
4792 set_bit(RSCN_UPDATE, &flags);
4793 set_bit(LOCAL_LOOP_UPDATE, &flags);
4794 }
4795
4796 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
4797 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4798 ql_dbg(ql_dbg_disc, vha, 0x2015,
4799 "Loop resync needed, failing.\n");
1da177e4 4800 rval = QLA_FUNCTION_FAILED;
642ef983 4801 } else
e315cd28 4802 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
4803 }
4804
4805 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132 4806 if (LOOP_TRANSITION(vha)) {
83548fe2 4807 ql_dbg(ql_dbg_disc, vha, 0x2099,
7c3df132 4808 "Needs RSCN update and loop transition.\n");
1da177e4 4809 rval = QLA_FUNCTION_FAILED;
7c3df132 4810 }
e315cd28
AC
4811 else
4812 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
4813 }
4814
4815 if (rval == QLA_SUCCESS) {
e315cd28
AC
4816 if (atomic_read(&vha->loop_down_timer) ||
4817 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
4818 rval = QLA_FUNCTION_FAILED;
4819 } else {
e315cd28 4820 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
4821 ql_dbg(ql_dbg_disc, vha, 0x2069,
4822 "LOOP READY.\n");
ec7193e2 4823 ha->flags.fw_init_done = 1;
3bb67df5
DKU
4824
4825 /*
4826 * Process any ATIO queue entries that came in
4827 * while we weren't online.
4828 */
ead03855
QT
4829 if (qla_tgt_mode_enabled(vha) ||
4830 qla_dual_mode_enabled(vha)) {
1073daa4
QT
4831 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4832 qlt_24xx_process_atio_queue(vha, 0);
4833 spin_unlock_irqrestore(&ha->tgt.atio_lock,
4834 flags);
3bb67df5 4835 }
1da177e4
LT
4836 }
4837 }
4838
4839 if (rval) {
7c3df132
SK
4840 ql_dbg(ql_dbg_disc, vha, 0x206a,
4841 "%s *** FAILED ***.\n", __func__);
1da177e4 4842 } else {
7c3df132
SK
4843 ql_dbg(ql_dbg_disc, vha, 0x206b,
4844 "%s: exiting normally.\n", __func__);
1da177e4
LT
4845 }
4846
cc3ef7bc 4847 /* Restore state if a resync event occurred during processing */
e315cd28 4848 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 4849 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 4850 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 4851 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 4852 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 4853 }
1da177e4
LT
4854 }
4855
4856 return (rval);
4857}
4858
1da177e4
LT
4859/*
4860 * qla2x00_configure_local_loop
4861 * Updates Fibre Channel Device Database with local loop devices.
4862 *
4863 * Input:
4864 * ha = adapter block pointer.
4865 *
4866 * Returns:
4867 * 0 = success.
4868 */
4869static int
e315cd28 4870qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
4871{
4872 int rval, rval2;
4873 int found_devs;
4874 int found;
4875 fc_port_t *fcport, *new_fcport;
4876
4877 uint16_t index;
4878 uint16_t entries;
4879 char *id_iter;
4880 uint16_t loop_id;
4881 uint8_t domain, area, al_pa;
e315cd28 4882 struct qla_hw_data *ha = vha->hw;
41dc529a 4883 unsigned long flags;
1da177e4 4884
8777e431
QT
4885 /* Inititae N2N login. */
4886 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
4887 /* borrowing */
4888 u32 *bp, i, sz;
4889
4890 memset(ha->init_cb, 0, ha->init_cb_size);
4891 sz = min_t(int, sizeof(struct els_plogi_payload),
4892 ha->init_cb_size);
4893 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
4894 (void *)ha->init_cb, sz);
4895 if (rval == QLA_SUCCESS) {
4896 bp = (uint32_t *)ha->init_cb;
4897 for (i = 0; i < sz/4 ; i++, bp++)
4898 *bp = cpu_to_be32(*bp);
4899
4900 memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
4901 sizeof(ha->plogi_els_payld.data));
4902 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4903 } else {
4904 ql_dbg(ql_dbg_init, vha, 0x00d1,
4905 "PLOGI ELS param read fail.\n");
4906 }
4907 return QLA_SUCCESS;
4908 }
4909
1da177e4
LT
4910 found_devs = 0;
4911 new_fcport = NULL;
642ef983 4912 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 4913
1da177e4 4914 /* Get list of logged in devices. */
642ef983 4915 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 4916 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
4917 &entries);
4918 if (rval != QLA_SUCCESS)
4919 goto cleanup_allocation;
4920
83548fe2 4921 ql_dbg(ql_dbg_disc, vha, 0x2011,
7c3df132
SK
4922 "Entries in ID list (%d).\n", entries);
4923 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4924 (uint8_t *)ha->gid_list,
4925 entries * sizeof(struct gid_list_info));
1da177e4 4926
0e324e94
QT
4927 if (entries == 0) {
4928 spin_lock_irqsave(&vha->work_lock, flags);
4929 vha->scan.scan_retry++;
4930 spin_unlock_irqrestore(&vha->work_lock, flags);
4931
4932 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4933 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4934 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4935 }
4936 } else {
4937 vha->scan.scan_retry = 0;
4938 }
4939
9cd883f0
QT
4940 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4941 fcport->scan_state = QLA_FCPORT_SCAN;
4942 }
4943
1da177e4 4944 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4945 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4946 if (new_fcport == NULL) {
83548fe2 4947 ql_log(ql_log_warn, vha, 0x2012,
7c3df132 4948 "Memory allocation failed for fcport.\n");
1da177e4
LT
4949 rval = QLA_MEMORY_ALLOC_FAILED;
4950 goto cleanup_allocation;
4951 }
4952 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4953
1da177e4
LT
4954 /* Add devices to port list. */
4955 id_iter = (char *)ha->gid_list;
4956 for (index = 0; index < entries; index++) {
4957 domain = ((struct gid_list_info *)id_iter)->domain;
4958 area = ((struct gid_list_info *)id_iter)->area;
4959 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 4960 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
4961 loop_id = (uint16_t)
4962 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 4963 else
1da177e4
LT
4964 loop_id = le16_to_cpu(
4965 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 4966 id_iter += ha->gid_list_info_size;
1da177e4
LT
4967
4968 /* Bypass reserved domain fields. */
4969 if ((domain & 0xf0) == 0xf0)
4970 continue;
4971
1da177e4
LT
4972 /* Bypass invalid local loop ID. */
4973 if (loop_id > LAST_LOCAL_LOOP_ID)
4974 continue;
4975
41dc529a 4976 memset(new_fcport->port_name, 0, WWN_SIZE);
370d550e 4977
1da177e4
LT
4978 /* Fill in member data. */
4979 new_fcport->d_id.b.domain = domain;
4980 new_fcport->d_id.b.area = area;
4981 new_fcport->d_id.b.al_pa = al_pa;
4982 new_fcport->loop_id = loop_id;
9cd883f0 4983 new_fcport->scan_state = QLA_FCPORT_FOUND;
41dc529a 4984
e315cd28 4985 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 4986 if (rval2 != QLA_SUCCESS) {
83548fe2 4987 ql_dbg(ql_dbg_disc, vha, 0x2097,
7c3df132
SK
4988 "Failed to retrieve fcport information "
4989 "-- get_port_database=%x, loop_id=0x%04x.\n",
4990 rval2, new_fcport->loop_id);
edd05de1
DG
4991 /* Skip retry if N2N */
4992 if (ha->current_topology != ISP_CFG_N) {
4993 ql_dbg(ql_dbg_disc, vha, 0x2105,
4994 "Scheduling resync.\n");
4995 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4996 continue;
4997 }
1da177e4
LT
4998 }
4999
41dc529a 5000 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
5001 /* Check for matching device in port list. */
5002 found = 0;
5003 fcport = NULL;
e315cd28 5004 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
5005 if (memcmp(new_fcport->port_name, fcport->port_name,
5006 WWN_SIZE))
5007 continue;
5008
ddb9b126 5009 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
5010 fcport->loop_id = new_fcport->loop_id;
5011 fcport->port_type = new_fcport->port_type;
5012 fcport->d_id.b24 = new_fcport->d_id.b24;
5013 memcpy(fcport->node_name, new_fcport->node_name,
5014 WWN_SIZE);
9cd883f0 5015 fcport->scan_state = QLA_FCPORT_FOUND;
1da177e4
LT
5016 found++;
5017 break;
5018 }
5019
5020 if (!found) {
5021 /* New device, add to fcports list. */
e315cd28 5022 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
5023
5024 /* Allocate a new replacement fcport. */
5025 fcport = new_fcport;
41dc529a
QT
5026
5027 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5028
e315cd28 5029 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
41dc529a 5030
1da177e4 5031 if (new_fcport == NULL) {
83548fe2 5032 ql_log(ql_log_warn, vha, 0xd031,
7c3df132 5033 "Failed to allocate memory for fcport.\n");
1da177e4
LT
5034 rval = QLA_MEMORY_ALLOC_FAILED;
5035 goto cleanup_allocation;
5036 }
41dc529a 5037 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
5038 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5039 }
5040
41dc529a
QT
5041 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5042
d8b45213 5043 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 5044 fcport->fp_speed = ha->link_data_rate;
d8b45213 5045
1da177e4
LT
5046 found_devs++;
5047 }
5048
9cd883f0
QT
5049 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5050 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5051 break;
5052
5053 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5054 if ((qla_dual_mode_enabled(vha) ||
5055 qla_ini_mode_enabled(vha)) &&
5056 atomic_read(&fcport->state) == FCS_ONLINE) {
5057 qla2x00_mark_device_lost(vha, fcport,
5058 ql2xplogiabsentdevice, 0);
5059 if (fcport->loop_id != FC_NO_LOOP_ID &&
5060 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5061 fcport->port_type != FCT_INITIATOR &&
5062 fcport->port_type != FCT_BROADCAST) {
5063 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5064 "%s %d %8phC post del sess\n",
5065 __func__, __LINE__,
5066 fcport->port_name);
5067
d8630bb9 5068 qlt_schedule_sess_for_deletion(fcport);
9cd883f0
QT
5069 continue;
5070 }
5071 }
5072 }
5073
5074 if (fcport->scan_state == QLA_FCPORT_FOUND)
5075 qla24xx_fcport_handle_login(vha, fcport);
5076 }
5077
1da177e4 5078cleanup_allocation:
c9475cb0 5079 kfree(new_fcport);
1da177e4
LT
5080
5081 if (rval != QLA_SUCCESS) {
83548fe2 5082 ql_dbg(ql_dbg_disc, vha, 0x2098,
7c3df132 5083 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
5084 }
5085
1da177e4
LT
5086 return (rval);
5087}
5088
d8b45213 5089static void
e315cd28 5090qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 5091{
d8b45213 5092 int rval;
93f2bd67 5093 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 5094 struct qla_hw_data *ha = vha->hw;
d8b45213 5095
c76f2c01 5096 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
5097 return;
5098
c9afb9a2
GM
5099 if (atomic_read(&fcport->state) != FCS_ONLINE)
5100 return;
5101
39bd9622 5102 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
413c2f33
HM
5103 fcport->fp_speed > ha->link_data_rate ||
5104 !ha->flags.gpsc_supported)
d8b45213
AV
5105 return;
5106
e315cd28 5107 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 5108 mb);
d8b45213 5109 if (rval != QLA_SUCCESS) {
7c3df132 5110 ql_dbg(ql_dbg_disc, vha, 0x2004,
7b833558
OK
5111 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5112 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
d8b45213 5113 } else {
7c3df132 5114 ql_dbg(ql_dbg_disc, vha, 0x2005,
33b28357 5115 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
d0297c9a 5116 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
33b28357 5117 fcport->fp_speed, fcport->port_name);
d8b45213
AV
5118 }
5119}
5120
cc28e0ac
QT
5121void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5122{
5123 qla2x00_iidma_fcport(vha, fcport);
5124 qla24xx_update_fcport_fcp_prio(vha, fcport);
5125}
5126
5127int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5128{
5129 struct qla_work_evt *e;
5130
5131 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5132 if (!e)
5133 return QLA_FUNCTION_FAILED;
5134
5135 e->u.fcport.fcport = fcport;
5136 return qla2x00_post_work(vha, e);
5137}
5138
726b8548 5139/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
23be331d 5140static void
e315cd28 5141qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118 5142{
5143 struct fc_rport_identifiers rport_ids;
bdf79621 5144 struct fc_rport *rport;
044d78e1 5145 unsigned long flags;
8482e118 5146
b63d8b89
QT
5147 if (atomic_read(&fcport->state) == FCS_ONLINE)
5148 return;
5149
f8b02a85
AV
5150 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5151 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118 5152 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5153 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 5154 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 5155 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 5156 if (!rport) {
7c3df132
SK
5157 ql_log(ql_log_warn, vha, 0x2006,
5158 "Unable to allocate fc remote port.\n");
77d74143
AV
5159 return;
5160 }
2d70c103 5161
044d78e1 5162 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 5163 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 5164 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 5165
ad3e0eda 5166 rport->supported_classes = fcport->supported_classes;
77d74143 5167
8482e118 5168 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5169 if (fcport->port_type == FCT_INITIATOR)
5170 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
5171 if (fcport->port_type == FCT_TARGET)
5172 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
726b8548 5173
83548fe2
QT
5174 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5175 "%s %8phN. rport %p is %s mode\n",
5176 __func__, fcport->port_name, rport,
5177 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
726b8548 5178
77d74143 5179 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
5180}
5181
23be331d
AB
5182/*
5183 * qla2x00_update_fcport
5184 * Updates device on list.
5185 *
5186 * Input:
5187 * ha = adapter block pointer.
5188 * fcport = port structure pointer.
5189 *
5190 * Return:
5191 * 0 - Success
5192 * BIT_0 - error
5193 *
5194 * Context:
5195 * Kernel context.
5196 */
5197void
e315cd28 5198qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 5199{
726b8548
QT
5200 if (IS_SW_RESV_ADDR(fcport->d_id))
5201 return;
5202
cd4ed6b4
QT
5203 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5204 __func__, fcport->port_name);
5205
5206 fcport->disc_state = DSC_UPD_FCPORT;
5207 fcport->login_retry = vha->hw->login_retry_count;
b63d8b89 5208 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
b63d8b89
QT
5209 fcport->deleted = 0;
5210 fcport->logout_on_delete = 1;
5211 fcport->login_retry = vha->hw->login_retry_count;
8777e431 5212 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
23be331d 5213
8777e431
QT
5214 switch (vha->hw->current_topology) {
5215 case ISP_CFG_N:
5216 case ISP_CFG_NL:
5217 fcport->keep_nport_handle = 1;
5218 break;
5219 default:
5220 break;
5221 }
5222
aecf0434
QT
5223 qla2x00_iidma_fcport(vha, fcport);
5224
e84067d7
DG
5225 if (fcport->fc4f_nvme) {
5226 qla_nvme_register_remote(vha, fcport);
b63d8b89
QT
5227 fcport->disc_state = DSC_LOGIN_COMPLETE;
5228 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e84067d7
DG
5229 return;
5230 }
5231
21090cbe 5232 qla24xx_update_fcport_fcp_prio(vha, fcport);
d20ed91b 5233
726b8548
QT
5234 switch (vha->host->active_mode) {
5235 case MODE_INITIATOR:
d20ed91b 5236 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
5237 break;
5238 case MODE_TARGET:
5239 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5240 !vha->vha_tgt.qla_tgt->tgt_stopped)
5241 qlt_fc_port_added(vha, fcport);
5242 break;
5243 case MODE_DUAL:
d20ed91b 5244 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
5245 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5246 !vha->vha_tgt.qla_tgt->tgt_stopped)
5247 qlt_fc_port_added(vha, fcport);
5248 break;
5249 default:
5250 break;
d20ed91b 5251 }
cc28e0ac 5252
aecf0434
QT
5253 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5254
cc28e0ac
QT
5255 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5256 if (fcport->id_changed) {
5257 fcport->id_changed = 0;
5258 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5259 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5260 __func__, __LINE__, fcport->port_name,
5261 vha->fcport_count);
5262 qla24xx_post_gfpnid_work(vha, fcport);
5263 } else {
5264 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5265 "%s %d %8phC post gpsc fcp_cnt %d\n",
5266 __func__, __LINE__, fcport->port_name,
5267 vha->fcport_count);
5268 qla24xx_post_gpsc_work(vha, fcport);
5269 }
5270 }
cd4ed6b4
QT
5271
5272 fcport->disc_state = DSC_LOGIN_COMPLETE;
5273}
5274
5275void qla_register_fcport_fn(struct work_struct *work)
5276{
5277 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5278 u32 rscn_gen = fcport->rscn_gen;
5279 u16 data[2];
5280
5281 if (IS_SW_RESV_ADDR(fcport->d_id))
5282 return;
5283
5284 qla2x00_update_fcport(fcport->vha, fcport);
5285
5286 if (rscn_gen != fcport->rscn_gen) {
5287 /* RSCN(s) came in while registration */
5288 switch (fcport->next_disc_state) {
5289 case DSC_DELETE_PEND:
5290 qlt_schedule_sess_for_deletion(fcport);
5291 break;
5292 case DSC_ADISC:
5293 data[0] = data[1] = 0;
5294 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5295 data);
5296 break;
5297 default:
5298 break;
5299 }
5300 }
23be331d
AB
5301}
5302
1da177e4
LT
5303/*
5304 * qla2x00_configure_fabric
5305 * Setup SNS devices with loop ID's.
5306 *
5307 * Input:
5308 * ha = adapter block pointer.
5309 *
5310 * Returns:
5311 * 0 = success.
5312 * BIT_0 = error
5313 */
5314static int
e315cd28 5315qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 5316{
b3b02e6e 5317 int rval;
726b8548 5318 fc_port_t *fcport;
1da177e4 5319 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 5320 uint16_t loop_id;
1da177e4 5321 LIST_HEAD(new_fcports);
e315cd28 5322 struct qla_hw_data *ha = vha->hw;
df673274 5323 int discovery_gen;
1da177e4
LT
5324
5325 /* If FL port exists, then SNS is present */
e428924c 5326 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
5327 loop_id = NPH_F_PORT;
5328 else
5329 loop_id = SNS_FL_PORT;
e315cd28 5330 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 5331 if (rval != QLA_SUCCESS) {
83548fe2 5332 ql_dbg(ql_dbg_disc, vha, 0x20a0,
7c3df132 5333 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 5334
e315cd28 5335 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
5336 return (QLA_SUCCESS);
5337 }
e315cd28 5338 vha->device_flags |= SWITCH_FOUND;
1da177e4 5339
41dc529a
QT
5340
5341 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5342 rval = qla2x00_send_change_request(vha, 0x3, 0);
5343 if (rval != QLA_SUCCESS)
5344 ql_log(ql_log_warn, vha, 0x121,
5345 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5346 rval);
5347 }
5348
5349
1da177e4 5350 do {
726b8548
QT
5351 qla2x00_mgmt_svr_login(vha);
5352
cca5335c
AV
5353 /* FDMI support. */
5354 if (ql2xfdmienable &&
e315cd28
AC
5355 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5356 qla2x00_fdmi_register(vha);
cca5335c 5357
1da177e4 5358 /* Ensure we are logged into the SNS. */
a14c7711 5359 loop_id = NPH_SNS_LID(ha);
0b91d116
CD
5360 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5361 0xfc, mb, BIT_1|BIT_0);
a14c7711
JC
5362 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5363 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5364 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5365 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
0b91d116 5366 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 5367 return rval;
0b91d116 5368 }
e315cd28
AC
5369 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5370 if (qla2x00_rft_id(vha)) {
1da177e4 5371 /* EMPTY */
83548fe2 5372 ql_dbg(ql_dbg_disc, vha, 0x20a2,
7c3df132 5373 "Register FC-4 TYPE failed.\n");
b98ae0d7
QT
5374 if (test_bit(LOOP_RESYNC_NEEDED,
5375 &vha->dpc_flags))
5376 break;
1da177e4 5377 }
d3bae931 5378 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
1da177e4 5379 /* EMPTY */
83548fe2 5380 ql_dbg(ql_dbg_disc, vha, 0x209a,
7c3df132 5381 "Register FC-4 Features failed.\n");
b98ae0d7
QT
5382 if (test_bit(LOOP_RESYNC_NEEDED,
5383 &vha->dpc_flags))
5384 break;
1da177e4 5385 }
d3bae931
DG
5386 if (vha->flags.nvme_enabled) {
5387 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5388 ql_dbg(ql_dbg_disc, vha, 0x2049,
5389 "Register NVME FC Type Features failed.\n");
5390 }
5391 }
e315cd28 5392 if (qla2x00_rnn_id(vha)) {
1da177e4 5393 /* EMPTY */
83548fe2 5394 ql_dbg(ql_dbg_disc, vha, 0x2104,
7c3df132 5395 "Register Node Name failed.\n");
b98ae0d7
QT
5396 if (test_bit(LOOP_RESYNC_NEEDED,
5397 &vha->dpc_flags))
5398 break;
e315cd28 5399 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 5400 /* EMPTY */
83548fe2 5401 ql_dbg(ql_dbg_disc, vha, 0x209b,
0bf0efa1 5402 "Register Symbolic Node Name failed.\n");
b98ae0d7
QT
5403 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5404 break;
1da177e4
LT
5405 }
5406 }
5407
827210ba 5408
df673274
AP
5409 /* Mark the time right before querying FW for connected ports.
5410 * This process is long, asynchronous and by the time it's done,
5411 * collected information might not be accurate anymore. E.g.
5412 * disconnected port might have re-connected and a brand new
5413 * session has been created. In this case session's generation
5414 * will be newer than discovery_gen. */
5415 qlt_do_generation_tick(vha, &discovery_gen);
5416
a4239945 5417 if (USE_ASYNC_SCAN(ha)) {
33b28357
QT
5418 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
5419 NULL);
a4239945
QT
5420 if (rval)
5421 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5422 } else {
f352eeb7
QT
5423 list_for_each_entry(fcport, &vha->vp_fcports, list)
5424 fcport->scan_state = QLA_FCPORT_SCAN;
5425
a4239945
QT
5426 rval = qla2x00_find_all_fabric_devs(vha);
5427 }
1da177e4
LT
5428 if (rval != QLA_SUCCESS)
5429 break;
1da177e4
LT
5430 } while (0);
5431
e84067d7
DG
5432 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5433 qla_nvme_register_hba(vha);
5434
726b8548 5435 if (rval)
7c3df132
SK
5436 ql_dbg(ql_dbg_disc, vha, 0x2068,
5437 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
5438
5439 return (rval);
5440}
5441
1da177e4
LT
5442/*
5443 * qla2x00_find_all_fabric_devs
5444 *
5445 * Input:
5446 * ha = adapter block pointer.
5447 * dev = database device entry pointer.
5448 *
5449 * Returns:
5450 * 0 = success.
5451 *
5452 * Context:
5453 * Kernel context.
5454 */
5455static int
726b8548 5456qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1da177e4
LT
5457{
5458 int rval;
5459 uint16_t loop_id;
726b8548 5460 fc_port_t *fcport, *new_fcport;
1da177e4
LT
5461 int found;
5462
5463 sw_info_t *swl;
5464 int swl_idx;
5465 int first_dev, last_dev;
1516ef44 5466 port_id_t wrap = {}, nxt_d_id;
e315cd28 5467 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 5468 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
726b8548 5469 unsigned long flags;
1da177e4
LT
5470
5471 rval = QLA_SUCCESS;
5472
5473 /* Try GID_PT to get device list, else GAN. */
7a67735b 5474 if (!ha->swl)
642ef983 5475 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
5476 GFP_KERNEL);
5477 swl = ha->swl;
bbfbbbc1 5478 if (!swl) {
1da177e4 5479 /*EMPTY*/
83548fe2 5480 ql_dbg(ql_dbg_disc, vha, 0x209c,
7c3df132 5481 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 5482 } else {
642ef983 5483 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 5484 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 5485 swl = NULL;
b98ae0d7
QT
5486 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5487 return rval;
e315cd28 5488 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 5489 swl = NULL;
b98ae0d7
QT
5490 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5491 return rval;
e315cd28 5492 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 5493 swl = NULL;
b98ae0d7
QT
5494 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5495 return rval;
726b8548
QT
5496 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5497 swl = NULL;
b98ae0d7
QT
5498 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5499 return rval;
1da177e4 5500 }
e8c72ba5
CD
5501
5502 /* If other queries succeeded probe for FC-4 type */
b98ae0d7 5503 if (swl) {
e8c72ba5 5504 qla2x00_gff_id(vha, swl);
b98ae0d7
QT
5505 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5506 return rval;
5507 }
1da177e4
LT
5508 }
5509 swl_idx = 0;
5510
5511 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 5512 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5513 if (new_fcport == NULL) {
83548fe2 5514 ql_log(ql_log_warn, vha, 0x209d,
7c3df132 5515 "Failed to allocate memory for fcport.\n");
1da177e4
LT
5516 return (QLA_MEMORY_ALLOC_FAILED);
5517 }
5518 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
5519 /* Set start port ID scan at adapter ID. */
5520 first_dev = 1;
5521 last_dev = 0;
5522
5523 /* Starting free loop ID. */
e315cd28
AC
5524 loop_id = ha->min_external_loopid;
5525 for (; loop_id <= ha->max_loop_id; loop_id++) {
5526 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
5527 continue;
5528
3a6478df
GM
5529 if (ha->current_topology == ISP_CFG_FL &&
5530 (atomic_read(&vha->loop_down_timer) ||
5531 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
5532 atomic_set(&vha->loop_down_timer, 0);
5533 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5534 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 5535 break;
bb2d52b2 5536 }
1da177e4
LT
5537
5538 if (swl != NULL) {
5539 if (last_dev) {
5540 wrap.b24 = new_fcport->d_id.b24;
5541 } else {
5542 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5543 memcpy(new_fcport->node_name,
5544 swl[swl_idx].node_name, WWN_SIZE);
5545 memcpy(new_fcport->port_name,
5546 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
5547 memcpy(new_fcport->fabric_port_name,
5548 swl[swl_idx].fabric_port_name, WWN_SIZE);
5549 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 5550 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4 5551
a5d42f4c 5552 new_fcport->nvme_flag = 0;
1a28faa0 5553 new_fcport->fc4f_nvme = 0;
a5d42f4c
DG
5554 if (vha->flags.nvme_enabled &&
5555 swl[swl_idx].fc4f_nvme) {
5556 new_fcport->fc4f_nvme =
5557 swl[swl_idx].fc4f_nvme;
5558 ql_log(ql_log_info, vha, 0x2131,
5559 "FOUND: NVME port %8phC as FC Type 28h\n",
5560 new_fcport->port_name);
5561 }
5562
1da177e4
LT
5563 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5564 last_dev = 1;
5565 }
5566 swl_idx++;
5567 }
5568 } else {
5569 /* Send GA_NXT to the switch */
e315cd28 5570 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 5571 if (rval != QLA_SUCCESS) {
83548fe2 5572 ql_log(ql_log_warn, vha, 0x209e,
7c3df132
SK
5573 "SNS scan failed -- assuming "
5574 "zero-entry result.\n");
1da177e4
LT
5575 rval = QLA_SUCCESS;
5576 break;
5577 }
5578 }
5579
5580 /* If wrap on switch device list, exit. */
5581 if (first_dev) {
5582 wrap.b24 = new_fcport->d_id.b24;
5583 first_dev = 0;
5584 } else if (new_fcport->d_id.b24 == wrap.b24) {
83548fe2 5585 ql_dbg(ql_dbg_disc, vha, 0x209f,
7c3df132
SK
5586 "Device wrap (%02x%02x%02x).\n",
5587 new_fcport->d_id.b.domain,
5588 new_fcport->d_id.b.area,
5589 new_fcport->d_id.b.al_pa);
1da177e4
LT
5590 break;
5591 }
5592
2c3dfe3f 5593 /* Bypass if same physical adapter. */
e315cd28 5594 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
5595 continue;
5596
2c3dfe3f 5597 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
5598 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5599 continue;
2c3dfe3f 5600
f7d289f6
AV
5601 /* Bypass if same domain and area of adapter. */
5602 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 5603 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
5604 ISP_CFG_FL)
5605 continue;
5606
1da177e4
LT
5607 /* Bypass reserved domain fields. */
5608 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5609 continue;
5610
e8c72ba5 5611 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
5612 if (ql2xgffidenable &&
5613 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5614 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
5615 continue;
5616
726b8548
QT
5617 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5618
1da177e4
LT
5619 /* Locate matching device in database. */
5620 found = 0;
e315cd28 5621 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
5622 if (memcmp(new_fcport->port_name, fcport->port_name,
5623 WWN_SIZE))
5624 continue;
5625
827210ba 5626 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 5627
1da177e4
LT
5628 found++;
5629
d8b45213
AV
5630 /* Update port state. */
5631 memcpy(fcport->fabric_port_name,
5632 new_fcport->fabric_port_name, WWN_SIZE);
5633 fcport->fp_speed = new_fcport->fp_speed;
5634
1da177e4 5635 /*
b2032fd5
RD
5636 * If address the same and state FCS_ONLINE
5637 * (or in target mode), nothing changed.
1da177e4
LT
5638 */
5639 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
b2032fd5 5640 (atomic_read(&fcport->state) == FCS_ONLINE ||
726b8548 5641 (vha->host->active_mode == MODE_TARGET))) {
1da177e4
LT
5642 break;
5643 }
5644
5645 /*
5646 * If device was not a fabric device before.
5647 */
5648 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5649 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 5650 qla2x00_clear_loop_id(fcport);
1da177e4
LT
5651 fcport->flags |= (FCF_FABRIC_DEVICE |
5652 FCF_LOGIN_NEEDED);
1da177e4
LT
5653 break;
5654 }
5655
5656 /*
5657 * Port ID changed or device was marked to be updated;
5658 * Log it out if still logged in and mark it for
5659 * relogin later.
5660 */
726b8548 5661 if (qla_tgt_mode_enabled(base_vha)) {
b2032fd5
RD
5662 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5663 "port changed FC ID, %8phC"
5664 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5665 fcport->port_name,
5666 fcport->d_id.b.domain,
5667 fcport->d_id.b.area,
5668 fcport->d_id.b.al_pa,
5669 fcport->loop_id,
5670 new_fcport->d_id.b.domain,
5671 new_fcport->d_id.b.area,
5672 new_fcport->d_id.b.al_pa);
5673 fcport->d_id.b24 = new_fcport->d_id.b24;
5674 break;
5675 }
5676
1da177e4
LT
5677 fcport->d_id.b24 = new_fcport->d_id.b24;
5678 fcport->flags |= FCF_LOGIN_NEEDED;
1da177e4
LT
5679 break;
5680 }
5681
9dd9686b
DT
5682 if (fcport->fc4f_nvme) {
5683 if (fcport->disc_state == DSC_DELETE_PEND) {
5684 fcport->disc_state = DSC_GNL;
5685 vha->fcport_count--;
5686 fcport->login_succ = 0;
5687 }
5688 }
5689
726b8548
QT
5690 if (found) {
5691 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1da177e4 5692 continue;
726b8548 5693 }
1da177e4 5694 /* If device was not in our fcports list, then add it. */
b2032fd5 5695 new_fcport->scan_state = QLA_FCPORT_FOUND;
726b8548
QT
5696 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5697
5698 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5699
1da177e4
LT
5700
5701 /* Allocate a new replacement fcport. */
5702 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 5703 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5704 if (new_fcport == NULL) {
83548fe2 5705 ql_log(ql_log_warn, vha, 0xd032,
7c3df132 5706 "Memory allocation failed for fcport.\n");
1da177e4
LT
5707 return (QLA_MEMORY_ALLOC_FAILED);
5708 }
5709 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5710 new_fcport->d_id.b24 = nxt_d_id.b24;
5711 }
5712
726b8548
QT
5713 qla2x00_free_fcport(new_fcport);
5714
5715 /*
5716 * Logout all previous fabric dev marked lost, except FCP2 devices.
5717 */
5718 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5719 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5720 break;
5721
5722 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5723 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5724 continue;
5725
5726 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5727 if ((qla_dual_mode_enabled(vha) ||
5728 qla_ini_mode_enabled(vha)) &&
5729 atomic_read(&fcport->state) == FCS_ONLINE) {
5730 qla2x00_mark_device_lost(vha, fcport,
5731 ql2xplogiabsentdevice, 0);
5732 if (fcport->loop_id != FC_NO_LOOP_ID &&
5733 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5734 fcport->port_type != FCT_INITIATOR &&
5735 fcport->port_type != FCT_BROADCAST) {
83548fe2 5736 ql_dbg(ql_dbg_disc, vha, 0x20f0,
726b8548
QT
5737 "%s %d %8phC post del sess\n",
5738 __func__, __LINE__,
5739 fcport->port_name);
d8630bb9 5740 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
5741 continue;
5742 }
5743 }
5744 }
1da177e4 5745
726b8548
QT
5746 if (fcport->scan_state == QLA_FCPORT_FOUND)
5747 qla24xx_fcport_handle_login(vha, fcport);
5748 }
1da177e4
LT
5749 return (rval);
5750}
5751
5752/*
5753 * qla2x00_find_new_loop_id
5754 * Scan through our port list and find a new usable loop ID.
5755 *
5756 * Input:
5757 * ha: adapter state pointer.
5758 * dev: port structure pointer.
5759 *
5760 * Returns:
5761 * qla2x00 local function return status code.
5762 *
5763 * Context:
5764 * Kernel context.
5765 */
03bcfb57 5766int
e315cd28 5767qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
1da177e4
LT
5768{
5769 int rval;
e315cd28 5770 struct qla_hw_data *ha = vha->hw;
feafb7b1 5771 unsigned long flags = 0;
1da177e4
LT
5772
5773 rval = QLA_SUCCESS;
5774
5f16b331 5775 spin_lock_irqsave(&ha->vport_slock, flags);
1da177e4 5776
5f16b331
CD
5777 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5778 LOOPID_MAP_SIZE);
5779 if (dev->loop_id >= LOOPID_MAP_SIZE ||
5780 qla2x00_is_reserved_id(vha, dev->loop_id)) {
5781 dev->loop_id = FC_NO_LOOP_ID;
5782 rval = QLA_FUNCTION_FAILED;
5783 } else
5784 set_bit(dev->loop_id, ha->loop_id_map);
1da177e4 5785
5f16b331 5786 spin_unlock_irqrestore(&ha->vport_slock, flags);
1da177e4 5787
5f16b331
CD
5788 if (rval == QLA_SUCCESS)
5789 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5790 "Assigning new loopid=%x, portid=%x.\n",
5791 dev->loop_id, dev->d_id.b24);
5792 else
5793 ql_log(ql_log_warn, dev->vha, 0x2087,
5794 "No loop_id's available, portid=%x.\n",
5795 dev->d_id.b24);
1da177e4
LT
5796
5797 return (rval);
5798}
5799
1da177e4 5800
f6602f3b
QT
5801/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
5802int
5803qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
5804{
5805 int loop_id = FC_NO_LOOP_ID;
5806 int lid = NPH_MGMT_SERVER - vha->vp_idx;
5807 unsigned long flags;
5808 struct qla_hw_data *ha = vha->hw;
5809
5810 if (vha->vp_idx == 0) {
5811 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
5812 return NPH_MGMT_SERVER;
5813 }
5814
5815 /* pick id from high and work down to low */
5816 spin_lock_irqsave(&ha->vport_slock, flags);
5817 for (; lid > 0; lid--) {
5818 if (!test_bit(lid, vha->hw->loop_id_map)) {
5819 set_bit(lid, vha->hw->loop_id_map);
5820 loop_id = lid;
5821 break;
5822 }
5823 }
5824 spin_unlock_irqrestore(&ha->vport_slock, flags);
5825
5826 return loop_id;
5827}
5828
1da177e4
LT
5829/*
5830 * qla2x00_fabric_login
5831 * Issue fabric login command.
5832 *
5833 * Input:
5834 * ha = adapter block pointer.
5835 * device = pointer to FC device type structure.
5836 *
5837 * Returns:
5838 * 0 - Login successfully
5839 * 1 - Login failed
5840 * 2 - Initiator device
5841 * 3 - Fatal error
5842 */
5843int
e315cd28 5844qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
5845 uint16_t *next_loopid)
5846{
5847 int rval;
5848 int retry;
5849 uint16_t tmp_loopid;
5850 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 5851 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
5852
5853 retry = 0;
5854 tmp_loopid = 0;
5855
5856 for (;;) {
7c3df132
SK
5857 ql_dbg(ql_dbg_disc, vha, 0x2000,
5858 "Trying Fabric Login w/loop id 0x%04x for port "
5859 "%02x%02x%02x.\n",
5860 fcport->loop_id, fcport->d_id.b.domain,
5861 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
5862
5863 /* Login fcport on switch. */
0b91d116 5864 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
5865 fcport->d_id.b.domain, fcport->d_id.b.area,
5866 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
5867 if (rval != QLA_SUCCESS) {
5868 return rval;
5869 }
1da177e4
LT
5870 if (mb[0] == MBS_PORT_ID_USED) {
5871 /*
5872 * Device has another loop ID. The firmware team
0107109e
AV
5873 * recommends the driver perform an implicit login with
5874 * the specified ID again. The ID we just used is save
5875 * here so we return with an ID that can be tried by
5876 * the next login.
1da177e4
LT
5877 */
5878 retry++;
5879 tmp_loopid = fcport->loop_id;
5880 fcport->loop_id = mb[1];
5881
7c3df132
SK
5882 ql_dbg(ql_dbg_disc, vha, 0x2001,
5883 "Fabric Login: port in use - next loop "
5884 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 5885 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 5886 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
5887
5888 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
5889 /*
5890 * Login succeeded.
5891 */
5892 if (retry) {
5893 /* A retry occurred before. */
5894 *next_loopid = tmp_loopid;
5895 } else {
5896 /*
5897 * No retry occurred before. Just increment the
5898 * ID value for next login.
5899 */
5900 *next_loopid = (fcport->loop_id + 1);
5901 }
5902
5903 if (mb[1] & BIT_0) {
5904 fcport->port_type = FCT_INITIATOR;
5905 } else {
5906 fcport->port_type = FCT_TARGET;
5907 if (mb[1] & BIT_1) {
8474f3a0 5908 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
5909 }
5910 }
5911
ad3e0eda
AV
5912 if (mb[10] & BIT_0)
5913 fcport->supported_classes |= FC_COS_CLASS2;
5914 if (mb[10] & BIT_1)
5915 fcport->supported_classes |= FC_COS_CLASS3;
5916
2d70c103
NB
5917 if (IS_FWI2_CAPABLE(ha)) {
5918 if (mb[10] & BIT_7)
5919 fcport->flags |=
5920 FCF_CONF_COMP_SUPPORTED;
5921 }
5922
1da177e4
LT
5923 rval = QLA_SUCCESS;
5924 break;
5925 } else if (mb[0] == MBS_LOOP_ID_USED) {
5926 /*
5927 * Loop ID already used, try next loop ID.
5928 */
5929 fcport->loop_id++;
e315cd28 5930 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
5931 if (rval != QLA_SUCCESS) {
5932 /* Ran out of loop IDs to use */
5933 break;
5934 }
5935 } else if (mb[0] == MBS_COMMAND_ERROR) {
5936 /*
5937 * Firmware possibly timed out during login. If NO
5938 * retries are left to do then the device is declared
5939 * dead.
5940 */
5941 *next_loopid = fcport->loop_id;
e315cd28 5942 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5943 fcport->d_id.b.domain, fcport->d_id.b.area,
5944 fcport->d_id.b.al_pa);
e315cd28 5945 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
5946
5947 rval = 1;
5948 break;
5949 } else {
5950 /*
5951 * unrecoverable / not handled error
5952 */
7c3df132
SK
5953 ql_dbg(ql_dbg_disc, vha, 0x2002,
5954 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5955 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5956 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5957 fcport->loop_id, jiffies);
1da177e4
LT
5958
5959 *next_loopid = fcport->loop_id;
e315cd28 5960 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5961 fcport->d_id.b.domain, fcport->d_id.b.area,
5962 fcport->d_id.b.al_pa);
5f16b331 5963 qla2x00_clear_loop_id(fcport);
0eedfcf0 5964 fcport->login_retry = 0;
1da177e4
LT
5965
5966 rval = 3;
5967 break;
5968 }
5969 }
5970
5971 return (rval);
5972}
5973
5974/*
5975 * qla2x00_local_device_login
5976 * Issue local device login command.
5977 *
5978 * Input:
5979 * ha = adapter block pointer.
5980 * loop_id = loop id of device to login to.
5981 *
5982 * Returns (Where's the #define!!!!):
5983 * 0 - Login successfully
5984 * 1 - Login failed
5985 * 3 - Fatal error
5986 */
5987int
e315cd28 5988qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
5989{
5990 int rval;
5991 uint16_t mb[MAILBOX_REGISTER_COUNT];
5992
5993 memset(mb, 0, sizeof(mb));
e315cd28 5994 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
5995 if (rval == QLA_SUCCESS) {
5996 /* Interrogate mailbox registers for any errors */
5997 if (mb[0] == MBS_COMMAND_ERROR)
5998 rval = 1;
5999 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6000 /* device not in PCB table */
6001 rval = 3;
6002 }
6003
6004 return (rval);
6005}
6006
6007/*
6008 * qla2x00_loop_resync
6009 * Resync with fibre channel devices.
6010 *
6011 * Input:
6012 * ha = adapter block pointer.
6013 *
6014 * Returns:
6015 * 0 = success
6016 */
6017int
e315cd28 6018qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 6019{
73208dfd 6020 int rval = QLA_SUCCESS;
1da177e4 6021 uint32_t wait_time;
67c2e93a
AC
6022 struct req_que *req;
6023 struct rsp_que *rsp;
6024
d7459527 6025 req = vha->req;
67c2e93a 6026 rsp = req->rsp;
1da177e4 6027
e315cd28
AC
6028 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6029 if (vha->flags.online) {
6030 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
6031 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6032 wait_time = 256;
6033 do {
8ae6d9c7
GM
6034 if (!IS_QLAFX00(vha->hw)) {
6035 /*
6036 * Issue a marker after FW becomes
6037 * ready.
6038 */
6039 qla2x00_marker(vha, req, rsp, 0, 0,
6040 MK_SYNC_ALL);
6041 vha->marker_needed = 0;
6042 }
1da177e4
LT
6043
6044 /* Remap devices on Loop. */
e315cd28 6045 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 6046
8ae6d9c7
GM
6047 if (IS_QLAFX00(vha->hw))
6048 qlafx00_configure_devices(vha);
6049 else
6050 qla2x00_configure_loop(vha);
6051
1da177e4 6052 wait_time--;
e315cd28
AC
6053 } while (!atomic_read(&vha->loop_down_timer) &&
6054 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6055 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6056 &vha->dpc_flags)));
1da177e4 6057 }
1da177e4
LT
6058 }
6059
e315cd28 6060 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 6061 return (QLA_FUNCTION_FAILED);
1da177e4 6062
e315cd28 6063 if (rval)
7c3df132
SK
6064 ql_dbg(ql_dbg_disc, vha, 0x206c,
6065 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
6066
6067 return (rval);
6068}
6069
579d12b5
SK
6070/*
6071* qla2x00_perform_loop_resync
6072* Description: This function will set the appropriate flags and call
6073* qla2x00_loop_resync. If successful loop will be resynced
6074* Arguments : scsi_qla_host_t pointer
6075* returm : Success or Failure
6076*/
6077
6078int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6079{
6080 int32_t rval = 0;
6081
6082 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6083 /*Configure the flags so that resync happens properly*/
6084 atomic_set(&ha->loop_down_timer, 0);
6085 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6086 atomic_set(&ha->loop_state, LOOP_UP);
6087 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6088 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6089 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6090
6091 rval = qla2x00_loop_resync(ha);
6092 } else
6093 atomic_set(&ha->loop_state, LOOP_DEAD);
6094
6095 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6096 }
6097
6098 return rval;
6099}
6100
d97994dc 6101void
67becc00 6102qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc 6103{
6104 fc_port_t *fcport;
feafb7b1
AE
6105 struct scsi_qla_host *vha;
6106 struct qla_hw_data *ha = base_vha->hw;
6107 unsigned long flags;
d97994dc 6108
feafb7b1 6109 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 6110 /* Go with deferred removal of rport references. */
feafb7b1
AE
6111 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
6112 atomic_inc(&vha->vref_count);
6113 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 6114 if (fcport->drport &&
feafb7b1
AE
6115 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6116 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 6117 qla2x00_rport_del(fcport);
df673274 6118
feafb7b1
AE
6119 spin_lock_irqsave(&ha->vport_slock, flags);
6120 }
6121 }
6122 atomic_dec(&vha->vref_count);
c4a9b538 6123 wake_up(&vha->vref_waitq);
feafb7b1
AE
6124 }
6125 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc 6126}
6127
7d613ac6
SV
6128/* Assumes idc_lock always held on entry */
6129void
6130qla83xx_reset_ownership(scsi_qla_host_t *vha)
6131{
6132 struct qla_hw_data *ha = vha->hw;
6133 uint32_t drv_presence, drv_presence_mask;
6134 uint32_t dev_part_info1, dev_part_info2, class_type;
6135 uint32_t class_type_mask = 0x3;
6136 uint16_t fcoe_other_function = 0xffff, i;
6137
7ec0effd
AD
6138 if (IS_QLA8044(ha)) {
6139 drv_presence = qla8044_rd_direct(vha,
6140 QLA8044_CRB_DRV_ACTIVE_INDEX);
6141 dev_part_info1 = qla8044_rd_direct(vha,
6142 QLA8044_CRB_DEV_PART_INFO_INDEX);
6143 dev_part_info2 = qla8044_rd_direct(vha,
6144 QLA8044_CRB_DEV_PART_INFO2);
6145 } else {
6146 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6147 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6148 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6149 }
7d613ac6
SV
6150 for (i = 0; i < 8; i++) {
6151 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6152 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6153 (i != ha->portnum)) {
6154 fcoe_other_function = i;
6155 break;
6156 }
6157 }
6158 if (fcoe_other_function == 0xffff) {
6159 for (i = 0; i < 8; i++) {
6160 class_type = ((dev_part_info2 >> (i * 4)) &
6161 class_type_mask);
6162 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6163 ((i + 8) != ha->portnum)) {
6164 fcoe_other_function = i + 8;
6165 break;
6166 }
6167 }
6168 }
6169 /*
6170 * Prepare drv-presence mask based on fcoe functions present.
6171 * However consider only valid physical fcoe function numbers (0-15).
6172 */
6173 drv_presence_mask = ~((1 << (ha->portnum)) |
6174 ((fcoe_other_function == 0xffff) ?
6175 0 : (1 << (fcoe_other_function))));
6176
6177 /* We are the reset owner iff:
6178 * - No other protocol drivers present.
6179 * - This is the lowest among fcoe functions. */
6180 if (!(drv_presence & drv_presence_mask) &&
6181 (ha->portnum < fcoe_other_function)) {
6182 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6183 "This host is Reset owner.\n");
6184 ha->flags.nic_core_reset_owner = 1;
6185 }
6186}
6187
fa492630 6188static int
7d613ac6
SV
6189__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6190{
6191 int rval = QLA_SUCCESS;
6192 struct qla_hw_data *ha = vha->hw;
6193 uint32_t drv_ack;
6194
6195 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6196 if (rval == QLA_SUCCESS) {
6197 drv_ack |= (1 << ha->portnum);
6198 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6199 }
6200
6201 return rval;
6202}
6203
fa492630 6204static int
7d613ac6
SV
6205__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6206{
6207 int rval = QLA_SUCCESS;
6208 struct qla_hw_data *ha = vha->hw;
6209 uint32_t drv_ack;
6210
6211 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6212 if (rval == QLA_SUCCESS) {
6213 drv_ack &= ~(1 << ha->portnum);
6214 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6215 }
6216
6217 return rval;
6218}
6219
fa492630 6220static const char *
7d613ac6
SV
6221qla83xx_dev_state_to_string(uint32_t dev_state)
6222{
6223 switch (dev_state) {
6224 case QLA8XXX_DEV_COLD:
6225 return "COLD/RE-INIT";
6226 case QLA8XXX_DEV_INITIALIZING:
6227 return "INITIALIZING";
6228 case QLA8XXX_DEV_READY:
6229 return "READY";
6230 case QLA8XXX_DEV_NEED_RESET:
6231 return "NEED RESET";
6232 case QLA8XXX_DEV_NEED_QUIESCENT:
6233 return "NEED QUIESCENT";
6234 case QLA8XXX_DEV_FAILED:
6235 return "FAILED";
6236 case QLA8XXX_DEV_QUIESCENT:
6237 return "QUIESCENT";
6238 default:
6239 return "Unknown";
6240 }
6241}
6242
6243/* Assumes idc-lock always held on entry */
6244void
6245qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6246{
6247 struct qla_hw_data *ha = vha->hw;
6248 uint32_t idc_audit_reg = 0, duration_secs = 0;
6249
6250 switch (audit_type) {
6251 case IDC_AUDIT_TIMESTAMP:
6252 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6253 idc_audit_reg = (ha->portnum) |
6254 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6255 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6256 break;
6257
6258 case IDC_AUDIT_COMPLETION:
6259 duration_secs = ((jiffies_to_msecs(jiffies) -
6260 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6261 idc_audit_reg = (ha->portnum) |
6262 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6263 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6264 break;
6265
6266 default:
6267 ql_log(ql_log_warn, vha, 0xb078,
6268 "Invalid audit type specified.\n");
6269 break;
6270 }
6271}
6272
6273/* Assumes idc_lock always held on entry */
fa492630 6274static int
7d613ac6
SV
6275qla83xx_initiating_reset(scsi_qla_host_t *vha)
6276{
6277 struct qla_hw_data *ha = vha->hw;
6278 uint32_t idc_control, dev_state;
6279
6280 __qla83xx_get_idc_control(vha, &idc_control);
6281 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6282 ql_log(ql_log_info, vha, 0xb080,
6283 "NIC Core reset has been disabled. idc-control=0x%x\n",
6284 idc_control);
6285 return QLA_FUNCTION_FAILED;
6286 }
6287
6288 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6289 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6290 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6291 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6292 QLA8XXX_DEV_NEED_RESET);
6293 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6294 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6295 } else {
6296 const char *state = qla83xx_dev_state_to_string(dev_state);
6297 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6298
6299 /* SV: XXX: Is timeout required here? */
6300 /* Wait for IDC state change READY -> NEED_RESET */
6301 while (dev_state == QLA8XXX_DEV_READY) {
6302 qla83xx_idc_unlock(vha, 0);
6303 msleep(200);
6304 qla83xx_idc_lock(vha, 0);
6305 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6306 }
6307 }
6308
6309 /* Send IDC ack by writing to drv-ack register */
6310 __qla83xx_set_drv_ack(vha);
6311
6312 return QLA_SUCCESS;
6313}
6314
6315int
6316__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6317{
6318 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6319}
6320
7d613ac6
SV
6321int
6322__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6323{
6324 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6325}
6326
fa492630 6327static int
7d613ac6
SV
6328qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6329{
6330 uint32_t drv_presence = 0;
6331 struct qla_hw_data *ha = vha->hw;
6332
6333 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6334 if (drv_presence & (1 << ha->portnum))
6335 return QLA_SUCCESS;
6336 else
6337 return QLA_TEST_FAILED;
6338}
6339
6340int
6341qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6342{
6343 int rval = QLA_SUCCESS;
6344 struct qla_hw_data *ha = vha->hw;
6345
6346 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6347 "Entered %s().\n", __func__);
6348
6349 if (vha->device_flags & DFLG_DEV_FAILED) {
6350 ql_log(ql_log_warn, vha, 0xb059,
6351 "Device in unrecoverable FAILED state.\n");
6352 return QLA_FUNCTION_FAILED;
6353 }
6354
6355 qla83xx_idc_lock(vha, 0);
6356
6357 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6358 ql_log(ql_log_warn, vha, 0xb05a,
6359 "Function=0x%x has been removed from IDC participation.\n",
6360 ha->portnum);
6361 rval = QLA_FUNCTION_FAILED;
6362 goto exit;
6363 }
6364
6365 qla83xx_reset_ownership(vha);
6366
6367 rval = qla83xx_initiating_reset(vha);
6368
6369 /*
6370 * Perform reset if we are the reset-owner,
6371 * else wait till IDC state changes to READY/FAILED.
6372 */
6373 if (rval == QLA_SUCCESS) {
6374 rval = qla83xx_idc_state_handler(vha);
6375
6376 if (rval == QLA_SUCCESS)
6377 ha->flags.nic_core_hung = 0;
6378 __qla83xx_clear_drv_ack(vha);
6379 }
6380
6381exit:
6382 qla83xx_idc_unlock(vha, 0);
6383
6384 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6385
6386 return rval;
6387}
6388
81178772
SK
6389int
6390qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6391{
6392 struct qla_hw_data *ha = vha->hw;
6393 int rval = QLA_FUNCTION_FAILED;
6394
6395 if (!IS_MCTP_CAPABLE(ha)) {
6396 /* This message can be removed from the final version */
6397 ql_log(ql_log_info, vha, 0x506d,
6398 "This board is not MCTP capable\n");
6399 return rval;
6400 }
6401
6402 if (!ha->mctp_dump) {
6403 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6404 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6405
6406 if (!ha->mctp_dump) {
6407 ql_log(ql_log_warn, vha, 0x506e,
6408 "Failed to allocate memory for mctp dump\n");
6409 return rval;
6410 }
6411 }
6412
6413#define MCTP_DUMP_STR_ADDR 0x00000000
6414 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6415 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6416 if (rval != QLA_SUCCESS) {
6417 ql_log(ql_log_warn, vha, 0x506f,
6418 "Failed to capture mctp dump\n");
6419 } else {
6420 ql_log(ql_log_info, vha, 0x5070,
6421 "Mctp dump capture for host (%ld/%p).\n",
6422 vha->host_no, ha->mctp_dump);
6423 ha->mctp_dumped = 1;
6424 }
6425
409ee0fe 6426 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
6427 ha->flags.nic_core_reset_hdlr_active = 1;
6428 rval = qla83xx_restart_nic_firmware(vha);
6429 if (rval)
6430 /* NIC Core reset failed. */
6431 ql_log(ql_log_warn, vha, 0x5071,
6432 "Failed to restart nic firmware\n");
6433 else
6434 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6435 "Restarted NIC firmware successfully.\n");
6436 ha->flags.nic_core_reset_hdlr_active = 0;
6437 }
6438
6439 return rval;
6440
6441}
6442
579d12b5 6443/*
8fcd6b8b 6444* qla2x00_quiesce_io
579d12b5
SK
6445* Description: This function will block the new I/Os
6446* Its not aborting any I/Os as context
6447* is not destroyed during quiescence
6448* Arguments: scsi_qla_host_t
6449* return : void
6450*/
6451void
8fcd6b8b 6452qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
6453{
6454 struct qla_hw_data *ha = vha->hw;
6455 struct scsi_qla_host *vp;
6456
8fcd6b8b
CD
6457 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6458 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
6459
6460 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6461 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6462 atomic_set(&vha->loop_state, LOOP_DOWN);
6463 qla2x00_mark_all_devices_lost(vha, 0);
6464 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 6465 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
6466 } else {
6467 if (!atomic_read(&vha->loop_down_timer))
6468 atomic_set(&vha->loop_down_timer,
6469 LOOP_DOWN_TIME);
6470 }
6471 /* Wait for pending cmds to complete */
6472 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
6473}
6474
a9083016
GM
6475void
6476qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6477{
6478 struct qla_hw_data *ha = vha->hw;
579d12b5 6479 struct scsi_qla_host *vp;
feafb7b1 6480 unsigned long flags;
6aef87be 6481 fc_port_t *fcport;
7c3f8fd1 6482 u16 i;
a9083016 6483
e46ef004
SK
6484 /* For ISP82XX, driver waits for completion of the commands.
6485 * online flag should be set.
6486 */
7ec0effd 6487 if (!(IS_P3P_TYPE(ha)))
e46ef004 6488 vha->flags.online = 0;
a9083016
GM
6489 ha->flags.chip_reset_done = 0;
6490 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 6491 vha->qla_stats.total_isp_aborts++;
a9083016 6492
7c3df132
SK
6493 ql_log(ql_log_info, vha, 0x00af,
6494 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 6495
b2000805 6496 ha->flags.purge_mbox = 1;
e46ef004
SK
6497 /* For ISP82XX, reset_chip is just disabling interrupts.
6498 * Driver waits for the completion of the commands.
6499 * the interrupts need to be enabled.
6500 */
7ec0effd 6501 if (!(IS_P3P_TYPE(ha)))
a9083016
GM
6502 ha->isp_ops->reset_chip(vha);
6503
5d74c87a 6504 ha->link_data_rate = PORT_SPEED_UNKNOWN;
9cd883f0
QT
6505 SAVE_TOPO(ha);
6506 ha->flags.rida_fmt2 = 0;
ec7193e2
QT
6507 ha->flags.n2n_ae = 0;
6508 ha->flags.lip_ae = 0;
6509 ha->current_topology = 0;
6510 ha->flags.fw_started = 0;
6511 ha->flags.fw_init_done = 0;
b2000805
QT
6512 ha->chip_reset++;
6513 ha->base_qpair->chip_reset = ha->chip_reset;
7c3f8fd1
QT
6514 for (i = 0; i < ha->max_qpairs; i++) {
6515 if (ha->queue_pair_map[i])
6516 ha->queue_pair_map[i]->chip_reset =
6517 ha->base_qpair->chip_reset;
6518 }
726b8548 6519
b2000805
QT
6520 /* purge MBox commands */
6521 if (atomic_read(&ha->num_pend_mbx_stage3)) {
6522 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
6523 complete(&ha->mbx_intr_comp);
6524 }
6525
6526 i = 0;
6527 while (atomic_read(&ha->num_pend_mbx_stage3) ||
6528 atomic_read(&ha->num_pend_mbx_stage2) ||
6529 atomic_read(&ha->num_pend_mbx_stage1)) {
6530 msleep(20);
6531 i++;
6532 if (i > 50)
6533 break;
6534 }
6535 ha->flags.purge_mbox = 0;
6536
a9083016
GM
6537 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6538 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6539 atomic_set(&vha->loop_state, LOOP_DOWN);
6540 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
6541
6542 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 6543 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
6544 atomic_inc(&vp->vref_count);
6545 spin_unlock_irqrestore(&ha->vport_slock, flags);
6546
a9083016 6547 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
6548
6549 spin_lock_irqsave(&ha->vport_slock, flags);
6550 atomic_dec(&vp->vref_count);
6551 }
6552 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
6553 } else {
6554 if (!atomic_read(&vha->loop_down_timer))
6555 atomic_set(&vha->loop_down_timer,
6556 LOOP_DOWN_TIME);
6557 }
6558
6aef87be
AV
6559 /* Clear all async request states across all VPs. */
6560 list_for_each_entry(fcport, &vha->vp_fcports, list)
6561 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6562 spin_lock_irqsave(&ha->vport_slock, flags);
6563 list_for_each_entry(vp, &ha->vp_list, list) {
6564 atomic_inc(&vp->vref_count);
6565 spin_unlock_irqrestore(&ha->vport_slock, flags);
6566
6567 list_for_each_entry(fcport, &vp->vp_fcports, list)
6568 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6569
6570 spin_lock_irqsave(&ha->vport_slock, flags);
6571 atomic_dec(&vp->vref_count);
6572 }
6573 spin_unlock_irqrestore(&ha->vport_slock, flags);
6574
bddd2d65
LC
6575 if (!ha->flags.eeh_busy) {
6576 /* Make sure for ISP 82XX IO DMA is complete */
7ec0effd 6577 if (IS_P3P_TYPE(ha)) {
7190575f 6578 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
6579 ql_log(ql_log_info, vha, 0x00b4,
6580 "Done chip reset cleanup.\n");
a9083016 6581
e46ef004
SK
6582 /* Done waiting for pending commands.
6583 * Reset the online flag.
6584 */
6585 vha->flags.online = 0;
4d78c973 6586 }
a9083016 6587
bddd2d65
LC
6588 /* Requeue all commands in outstanding command list. */
6589 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6590 }
b6a029e1
AE
6591 /* memory barrier */
6592 wmb();
a9083016
GM
6593}
6594
1da177e4
LT
6595/*
6596* qla2x00_abort_isp
6597* Resets ISP and aborts all outstanding commands.
6598*
6599* Input:
6600* ha = adapter block pointer.
6601*
6602* Returns:
6603* 0 = success
6604*/
6605int
e315cd28 6606qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 6607{
476e8978 6608 int rval;
1da177e4 6609 uint8_t status = 0;
e315cd28
AC
6610 struct qla_hw_data *ha = vha->hw;
6611 struct scsi_qla_host *vp;
73208dfd 6612 struct req_que *req = ha->req_q_map[0];
feafb7b1 6613 unsigned long flags;
1da177e4 6614
e315cd28 6615 if (vha->flags.online) {
a9083016 6616 qla2x00_abort_isp_cleanup(vha);
1da177e4 6617
a6171297
SV
6618 if (IS_QLA8031(ha)) {
6619 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6620 "Clearing fcoe driver presence.\n");
6621 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6622 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6623 "Error while clearing DRV-Presence.\n");
6624 }
6625
85880801
AV
6626 if (unlikely(pci_channel_offline(ha->pdev) &&
6627 ha->flags.pci_channel_io_perm_failure)) {
6628 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6629 status = 0;
6630 return status;
6631 }
6632
0645cb83
QT
6633 switch (vha->qlini_mode) {
6634 case QLA2XXX_INI_MODE_DISABLED:
6635 if (!qla_tgt_mode_enabled(vha))
6636 return 0;
6637 break;
6638 case QLA2XXX_INI_MODE_DUAL:
6639 if (!qla_dual_mode_enabled(vha))
6640 return 0;
6641 break;
6642 case QLA2XXX_INI_MODE_ENABLED:
6643 default:
6644 break;
6645 }
6646
73208dfd 6647 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 6648
e315cd28 6649 ha->isp_ops->nvram_config(vha);
1da177e4 6650
e315cd28
AC
6651 if (!qla2x00_restart_isp(vha)) {
6652 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 6653
e315cd28 6654 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
6655 /*
6656 * Issue marker command only when we are going
6657 * to start the I/O .
6658 */
e315cd28 6659 vha->marker_needed = 1;
1da177e4
LT
6660 }
6661
e315cd28 6662 vha->flags.online = 1;
1da177e4 6663
fd34f556 6664 ha->isp_ops->enable_intrs(ha);
1da177e4 6665
fa2a1ce5 6666 ha->isp_abort_cnt = 0;
e315cd28 6667 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 6668
6246b8a1
GM
6669 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6670 qla2x00_get_fw_version(vha);
df613b96
AV
6671 if (ha->fce) {
6672 ha->flags.fce_enabled = 1;
6673 memset(ha->fce, 0,
6674 fce_calc_size(ha->fce_bufs));
e315cd28 6675 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
6676 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6677 &ha->fce_bufs);
6678 if (rval) {
7c3df132 6679 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
6680 "Unable to reinitialize FCE "
6681 "(%d).\n", rval);
6682 ha->flags.fce_enabled = 0;
6683 }
6684 }
436a7b11
AV
6685
6686 if (ha->eft) {
6687 memset(ha->eft, 0, EFT_SIZE);
e315cd28 6688 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
6689 ha->eft_dma, EFT_NUM_BUFFERS);
6690 if (rval) {
7c3df132 6691 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
6692 "Unable to reinitialize EFT "
6693 "(%d).\n", rval);
6694 }
6695 }
1da177e4 6696 } else { /* failed the ISP abort */
e315cd28
AC
6697 vha->flags.online = 1;
6698 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 6699 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
6700 ql_log(ql_log_fatal, vha, 0x8035,
6701 "ISP error recover failed - "
6702 "board disabled.\n");
fa2a1ce5 6703 /*
1da177e4
LT
6704 * The next call disables the board
6705 * completely.
6706 */
1e4ac5d6 6707 qla2x00_abort_isp_cleanup(vha);
e315cd28 6708 vha->flags.online = 0;
1da177e4 6709 clear_bit(ISP_ABORT_RETRY,
e315cd28 6710 &vha->dpc_flags);
1da177e4
LT
6711 status = 0;
6712 } else { /* schedule another ISP abort */
6713 ha->isp_abort_cnt--;
7c3df132
SK
6714 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6715 "ISP abort - retry remaining %d.\n",
6716 ha->isp_abort_cnt);
1da177e4
LT
6717 status = 1;
6718 }
6719 } else {
6720 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
6721 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6722 "ISP error recovery - retrying (%d) "
6723 "more times.\n", ha->isp_abort_cnt);
e315cd28 6724 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
6725 status = 1;
6726 }
6727 }
fa2a1ce5 6728
1da177e4
LT
6729 }
6730
e315cd28 6731 if (!status) {
7c3df132 6732 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
1608cc4a 6733 qla2x00_configure_hba(vha);
feafb7b1
AE
6734 spin_lock_irqsave(&ha->vport_slock, flags);
6735 list_for_each_entry(vp, &ha->vp_list, list) {
6736 if (vp->vp_idx) {
6737 atomic_inc(&vp->vref_count);
6738 spin_unlock_irqrestore(&ha->vport_slock, flags);
6739
e315cd28 6740 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
6741
6742 spin_lock_irqsave(&ha->vport_slock, flags);
6743 atomic_dec(&vp->vref_count);
6744 }
e315cd28 6745 }
feafb7b1
AE
6746 spin_unlock_irqrestore(&ha->vport_slock, flags);
6747
7d613ac6
SV
6748 if (IS_QLA8031(ha)) {
6749 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6750 "Setting back fcoe driver presence.\n");
6751 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6752 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6753 "Error while setting DRV-Presence.\n");
6754 }
e315cd28 6755 } else {
d8424f68
JP
6756 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6757 __func__);
1da177e4
LT
6758 }
6759
6760 return(status);
6761}
6762
6763/*
6764* qla2x00_restart_isp
6765* restarts the ISP after a reset
6766*
6767* Input:
6768* ha = adapter block pointer.
6769*
6770* Returns:
6771* 0 = success
6772*/
6773static int
e315cd28 6774qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 6775{
c6b2fca8 6776 int status = 0;
e315cd28 6777 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
6778 struct req_que *req = ha->req_q_map[0];
6779 struct rsp_que *rsp = ha->rsp_q_map[0];
1da177e4
LT
6780
6781 /* If firmware needs to be loaded */
e315cd28
AC
6782 if (qla2x00_isp_firmware(vha)) {
6783 vha->flags.online = 0;
6784 status = ha->isp_ops->chip_diag(vha);
6785 if (!status)
6786 status = qla2x00_setup_chip(vha);
1da177e4
LT
6787 }
6788
e315cd28
AC
6789 if (!status && !(status = qla2x00_init_rings(vha))) {
6790 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 6791 ha->flags.chip_reset_done = 1;
7108b76e 6792
73208dfd
AC
6793 /* Initialize the queues in use */
6794 qla25xx_init_queues(ha);
6795
e315cd28
AC
6796 status = qla2x00_fw_ready(vha);
6797 if (!status) {
0107109e 6798 /* Issue a marker after FW becomes ready. */
73208dfd 6799 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7108b76e 6800 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
6801 }
6802
6803 /* if no cable then assume it's good */
e315cd28 6804 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4 6805 status = 0;
1da177e4
LT
6806 }
6807 return (status);
6808}
6809
73208dfd
AC
6810static int
6811qla25xx_init_queues(struct qla_hw_data *ha)
6812{
6813 struct rsp_que *rsp = NULL;
6814 struct req_que *req = NULL;
6815 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6816 int ret = -1;
6817 int i;
6818
2afa19a9 6819 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd 6820 rsp = ha->rsp_q_map[i];
cb43285f 6821 if (rsp && test_bit(i, ha->rsp_qid_map)) {
73208dfd 6822 rsp->options &= ~BIT_0;
618a7523 6823 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 6824 if (ret != QLA_SUCCESS)
7c3df132
SK
6825 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6826 "%s Rsp que: %d init failed.\n",
6827 __func__, rsp->id);
73208dfd 6828 else
7c3df132
SK
6829 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6830 "%s Rsp que: %d inited.\n",
6831 __func__, rsp->id);
73208dfd 6832 }
2afa19a9
AC
6833 }
6834 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd 6835 req = ha->req_q_map[i];
cb43285f
QT
6836 if (req && test_bit(i, ha->req_qid_map)) {
6837 /* Clear outstanding commands array. */
73208dfd 6838 req->options &= ~BIT_0;
618a7523 6839 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 6840 if (ret != QLA_SUCCESS)
7c3df132
SK
6841 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6842 "%s Req que: %d init failed.\n",
6843 __func__, req->id);
73208dfd 6844 else
7c3df132
SK
6845 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6846 "%s Req que: %d inited.\n",
6847 __func__, req->id);
73208dfd
AC
6848 }
6849 }
6850 return ret;
6851}
6852
1da177e4
LT
6853/*
6854* qla2x00_reset_adapter
6855* Reset adapter.
6856*
6857* Input:
6858* ha = adapter block pointer.
6859*/
abbd8870 6860void
e315cd28 6861qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
6862{
6863 unsigned long flags = 0;
e315cd28 6864 struct qla_hw_data *ha = vha->hw;
3d71644c 6865 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 6866
e315cd28 6867 vha->flags.online = 0;
fd34f556 6868 ha->isp_ops->disable_intrs(ha);
1da177e4 6869
1da177e4
LT
6870 spin_lock_irqsave(&ha->hardware_lock, flags);
6871 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
6872 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6873 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
6874 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6875 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6876}
0107109e
AV
6877
6878void
e315cd28 6879qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
6880{
6881 unsigned long flags = 0;
e315cd28 6882 struct qla_hw_data *ha = vha->hw;
0107109e
AV
6883 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6884
7ec0effd 6885 if (IS_P3P_TYPE(ha))
a9083016
GM
6886 return;
6887
e315cd28 6888 vha->flags.online = 0;
fd34f556 6889 ha->isp_ops->disable_intrs(ha);
0107109e
AV
6890
6891 spin_lock_irqsave(&ha->hardware_lock, flags);
6892 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
6893 RD_REG_DWORD(&reg->hccr);
6894 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
6895 RD_REG_DWORD(&reg->hccr);
6896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
6897
6898 if (IS_NOPOLLING_TYPE(ha))
6899 ha->isp_ops->enable_intrs(ha);
0107109e
AV
6900}
6901
4e08df3f
DM
6902/* On sparc systems, obtain port and node WWN from firmware
6903 * properties.
6904 */
e315cd28
AC
6905static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6906 struct nvram_24xx *nv)
4e08df3f
DM
6907{
6908#ifdef CONFIG_SPARC
e315cd28 6909 struct qla_hw_data *ha = vha->hw;
4e08df3f 6910 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
6911 struct device_node *dp = pci_device_to_OF_node(pdev);
6912 const u8 *val;
4e08df3f
DM
6913 int len;
6914
6915 val = of_get_property(dp, "port-wwn", &len);
6916 if (val && len >= WWN_SIZE)
6917 memcpy(nv->port_name, val, WWN_SIZE);
6918
6919 val = of_get_property(dp, "node-wwn", &len);
6920 if (val && len >= WWN_SIZE)
6921 memcpy(nv->node_name, val, WWN_SIZE);
6922#endif
6923}
6924
0107109e 6925int
e315cd28 6926qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 6927{
4e08df3f 6928 int rval;
0107109e
AV
6929 struct init_cb_24xx *icb;
6930 struct nvram_24xx *nv;
6931 uint32_t *dptr;
6932 uint8_t *dptr1, *dptr2;
6933 uint32_t chksum;
6934 uint16_t cnt;
e315cd28 6935 struct qla_hw_data *ha = vha->hw;
0107109e 6936
4e08df3f 6937 rval = QLA_SUCCESS;
0107109e 6938 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 6939 nv = ha->nvram;
0107109e
AV
6940
6941 /* Determine NVRAM starting address. */
f73cb695 6942 if (ha->port_no == 0) {
e5b68a61
AC
6943 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6944 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6945 } else {
0107109e 6946 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790 6947 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6948 }
f73cb695 6949
e5b68a61
AC
6950 ha->nvram_size = sizeof(struct nvram_24xx);
6951 ha->vpd_size = FA_NVRAM_VPD_SIZE;
0107109e 6952
281afe19
SJ
6953 /* Get VPD data into cache */
6954 ha->vpd = ha->nvram + VPD_OFFSET;
e315cd28 6955 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
281afe19
SJ
6956 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6957
6958 /* Get NVRAM data into cache and calculate checksum. */
0107109e 6959 dptr = (uint32_t *)nv;
e315cd28 6960 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
0107109e 6961 ha->nvram_size);
da08ef5c
JC
6962 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6963 chksum += le32_to_cpu(*dptr);
0107109e 6964
7c3df132
SK
6965 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6966 "Contents of NVRAM\n");
6967 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6968 (uint8_t *)nv, ha->nvram_size);
0107109e
AV
6969
6970 /* Bad NVRAM data, set defaults parameters. */
6971 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6972 || nv->id[3] != ' ' ||
ad950360 6973 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
0107109e 6974 /* Reset NVRAM data. */
7c3df132 6975 ql_log(ql_log_warn, vha, 0x006b,
9e336520 6976 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132
SK
6977 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6978 ql_log(ql_log_warn, vha, 0x006c,
6979 "Falling back to functioning (yet invalid -- WWPN) "
6980 "defaults.\n");
4e08df3f
DM
6981
6982 /*
6983 * Set default initialization control block.
6984 */
6985 memset(nv, 0, ha->nvram_size);
ad950360
BVA
6986 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6987 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 6988 nv->frame_payload_size = 2048;
ad950360
BVA
6989 nv->execution_throttle = cpu_to_le16(0xFFFF);
6990 nv->exchange_count = cpu_to_le16(0);
6991 nv->hard_address = cpu_to_le16(124);
4e08df3f 6992 nv->port_name[0] = 0x21;
f73cb695 6993 nv->port_name[1] = 0x00 + ha->port_no + 1;
4e08df3f
DM
6994 nv->port_name[2] = 0x00;
6995 nv->port_name[3] = 0xe0;
6996 nv->port_name[4] = 0x8b;
6997 nv->port_name[5] = 0x1c;
6998 nv->port_name[6] = 0x55;
6999 nv->port_name[7] = 0x86;
7000 nv->node_name[0] = 0x20;
7001 nv->node_name[1] = 0x00;
7002 nv->node_name[2] = 0x00;
7003 nv->node_name[3] = 0xe0;
7004 nv->node_name[4] = 0x8b;
7005 nv->node_name[5] = 0x1c;
7006 nv->node_name[6] = 0x55;
7007 nv->node_name[7] = 0x86;
e315cd28 7008 qla24xx_nvram_wwn_from_ofw(vha, nv);
ad950360
BVA
7009 nv->login_retry_count = cpu_to_le16(8);
7010 nv->interrupt_delay_timer = cpu_to_le16(0);
7011 nv->login_timeout = cpu_to_le16(0);
4e08df3f 7012 nv->firmware_options_1 =
ad950360
BVA
7013 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7014 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7015 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7016 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7017 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7018 nv->efi_parameters = cpu_to_le32(0);
4e08df3f 7019 nv->reset_delay = 5;
ad950360
BVA
7020 nv->max_luns_per_target = cpu_to_le16(128);
7021 nv->port_down_retry_count = cpu_to_le16(30);
7022 nv->link_down_timeout = cpu_to_le16(30);
4e08df3f
DM
7023
7024 rval = 1;
0107109e
AV
7025 }
7026
726b8548 7027 if (qla_tgt_mode_enabled(vha)) {
2d70c103 7028 /* Don't enable full login after initial LIP */
ad950360 7029 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
2d70c103 7030 /* Don't enable LIP full login for initiator */
ad950360 7031 nv->host_p &= cpu_to_le32(~BIT_10);
2d70c103
NB
7032 }
7033
7034 qlt_24xx_config_nvram_stage1(vha, nv);
7035
0107109e 7036 /* Reset Initialization control block */
e315cd28 7037 memset(icb, 0, ha->init_cb_size);
0107109e
AV
7038
7039 /* Copy 1st segment. */
7040 dptr1 = (uint8_t *)icb;
7041 dptr2 = (uint8_t *)&nv->version;
7042 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7043 while (cnt--)
7044 *dptr1++ = *dptr2++;
7045
7046 icb->login_retry_count = nv->login_retry_count;
3ea66e28 7047 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
7048
7049 /* Copy 2nd segment. */
7050 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7051 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7052 cnt = (uint8_t *)&icb->reserved_3 -
7053 (uint8_t *)&icb->interrupt_delay_timer;
7054 while (cnt--)
7055 *dptr1++ = *dptr2++;
0eaaca4c 7056 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
0107109e
AV
7057 /*
7058 * Setup driver NVRAM options.
7059 */
e315cd28 7060 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 7061 "QLA2462");
0107109e 7062
2d70c103
NB
7063 qlt_24xx_config_nvram_stage2(vha, icb);
7064
ad950360 7065 if (nv->host_p & cpu_to_le32(BIT_15)) {
2d70c103 7066 /* Use alternate WWN? */
5341e868
AV
7067 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7068 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7069 }
7070
0107109e 7071 /* Prepare nodename */
ad950360 7072 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
7073 /*
7074 * Firmware will apply the following mask if the nodename was
7075 * not provided.
7076 */
7077 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7078 icb->node_name[0] &= 0xF0;
7079 }
7080
7081 /* Set host adapter parameters. */
7082 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
7083 ha->flags.enable_lip_reset = 0;
7084 ha->flags.enable_lip_full_login =
7085 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7086 ha->flags.enable_target_reset =
7087 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
0107109e 7088 ha->flags.enable_led_scheme = 0;
d4c760c2 7089 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
0107109e 7090
fd0e7e4d
AV
7091 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7092 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
7093
7094 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7095 sizeof(ha->fw_seriallink_options24));
7096
7097 /* save HBA serial number */
7098 ha->serial0 = icb->port_name[5];
7099 ha->serial1 = icb->port_name[6];
7100 ha->serial2 = icb->port_name[7];
e315cd28
AC
7101 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7102 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 7103
ad950360 7104 icb->execution_throttle = cpu_to_le16(0xFFFF);
bc8fb3cb 7105
0107109e
AV
7106 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7107
7108 /* Set minimum login_timeout to 4 seconds. */
7109 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7110 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7111 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 7112 nv->login_timeout = cpu_to_le16(4);
0107109e 7113 ha->login_timeout = le16_to_cpu(nv->login_timeout);
0107109e 7114
00a537b8
AV
7115 /* Set minimum RATOV to 100 tenths of a second. */
7116 ha->r_a_tov = 100;
0107109e
AV
7117
7118 ha->loop_reset_delay = nv->reset_delay;
7119
7120 /* Link Down Timeout = 0:
7121 *
7122 * When Port Down timer expires we will start returning
7123 * I/O's to OS with "DID_NO_CONNECT".
7124 *
7125 * Link Down Timeout != 0:
7126 *
7127 * The driver waits for the link to come up after link down
7128 * before returning I/Os to OS with "DID_NO_CONNECT".
7129 */
7130 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7131 ha->loop_down_abort_time =
7132 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7133 } else {
7134 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7135 ha->loop_down_abort_time =
7136 (LOOP_DOWN_TIME - ha->link_down_timeout);
7137 }
7138
7139 /* Need enough time to try and get the port back. */
7140 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7141 if (qlport_down_retry)
7142 ha->port_down_retry_count = qlport_down_retry;
7143
7144 /* Set login_retry_count */
7145 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7146 if (ha->port_down_retry_count ==
7147 le16_to_cpu(nv->port_down_retry_count) &&
7148 ha->port_down_retry_count > 3)
7149 ha->login_retry_count = ha->port_down_retry_count;
7150 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7151 ha->login_retry_count = ha->port_down_retry_count;
7152 if (ql2xloginretrycount)
7153 ha->login_retry_count = ql2xloginretrycount;
7154
8777e431
QT
7155 /* N2N: driver will initiate Login instead of FW */
7156 icb->firmware_options_3 |= BIT_8;
7157
4fdfefe5 7158 /* Enable ZIO. */
e315cd28 7159 if (!vha->flags.init_done) {
4fdfefe5
AV
7160 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7161 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7162 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7163 le16_to_cpu(icb->interrupt_delay_timer): 2;
7164 }
ad950360 7165 icb->firmware_options_2 &= cpu_to_le32(
4fdfefe5 7166 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4fdfefe5 7167 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 7168 ha->zio_mode = QLA_ZIO_MODE_6;
7169
7c3df132 7170 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
7171 "ZIO mode %d enabled; timer delay (%d us).\n",
7172 ha->zio_mode, ha->zio_timer * 100);
7173
7174 icb->firmware_options_2 |= cpu_to_le32(
7175 (uint32_t)ha->zio_mode);
7176 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4fdfefe5
AV
7177 }
7178
4e08df3f 7179 if (rval) {
7c3df132
SK
7180 ql_log(ql_log_warn, vha, 0x0070,
7181 "NVRAM configuration failed.\n");
4e08df3f
DM
7182 }
7183 return (rval);
0107109e
AV
7184}
7185
4243c115
SC
7186uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
7187{
7188 struct qla27xx_image_status pri_image_status, sec_image_status;
7189 uint8_t valid_pri_image, valid_sec_image;
7190 uint32_t *wptr;
7191 uint32_t cnt, chksum, size;
7192 struct qla_hw_data *ha = vha->hw;
7193
7194 valid_pri_image = valid_sec_image = 1;
7195 ha->active_image = 0;
7196 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
7197
7198 if (!ha->flt_region_img_status_pri) {
7199 valid_pri_image = 0;
7200 goto check_sec_image;
7201 }
7202
7203 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
7204 ha->flt_region_img_status_pri, size);
7205
7206 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
7207 ql_dbg(ql_dbg_init, vha, 0x018b,
7208 "Primary image signature (0x%x) not valid\n",
7209 pri_image_status.signature);
7210 valid_pri_image = 0;
7211 goto check_sec_image;
7212 }
7213
7214 wptr = (uint32_t *)(&pri_image_status);
7215 cnt = size;
7216
da08ef5c
JC
7217 for (chksum = 0; cnt--; wptr++)
7218 chksum += le32_to_cpu(*wptr);
41dc529a 7219
4243c115
SC
7220 if (chksum) {
7221 ql_dbg(ql_dbg_init, vha, 0x018c,
7222 "Checksum validation failed for primary image (0x%x)\n",
7223 chksum);
7224 valid_pri_image = 0;
7225 }
7226
7227check_sec_image:
7228 if (!ha->flt_region_img_status_sec) {
7229 valid_sec_image = 0;
7230 goto check_valid_image;
7231 }
7232
7233 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
7234 ha->flt_region_img_status_sec, size);
7235
7236 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
7237 ql_dbg(ql_dbg_init, vha, 0x018d,
7238 "Secondary image signature(0x%x) not valid\n",
7239 sec_image_status.signature);
7240 valid_sec_image = 0;
7241 goto check_valid_image;
7242 }
7243
7244 wptr = (uint32_t *)(&sec_image_status);
7245 cnt = size;
da08ef5c
JC
7246 for (chksum = 0; cnt--; wptr++)
7247 chksum += le32_to_cpu(*wptr);
4243c115
SC
7248 if (chksum) {
7249 ql_dbg(ql_dbg_init, vha, 0x018e,
7250 "Checksum validation failed for secondary image (0x%x)\n",
7251 chksum);
7252 valid_sec_image = 0;
7253 }
7254
7255check_valid_image:
7256 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
7257 ha->active_image = QLA27XX_PRIMARY_IMAGE;
7258 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
7259 if (!ha->active_image ||
7260 pri_image_status.generation_number <
7261 sec_image_status.generation_number)
7262 ha->active_image = QLA27XX_SECONDARY_IMAGE;
7263 }
7264
22ebde16 7265 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
4243c115
SC
7266 ha->active_image == 0 ? "default bootld and fw" :
7267 ha->active_image == 1 ? "primary" :
7268 ha->active_image == 2 ? "secondary" :
7269 "Invalid");
7270
7271 return ha->active_image;
7272}
7273
413975a0 7274static int
cbc8eb67
AV
7275qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7276 uint32_t faddr)
d1c61909 7277{
73208dfd 7278 int rval = QLA_SUCCESS;
d1c61909 7279 int segments, fragment;
d1c61909
AV
7280 uint32_t *dcode, dlen;
7281 uint32_t risc_addr;
7282 uint32_t risc_size;
7283 uint32_t i;
e315cd28 7284 struct qla_hw_data *ha = vha->hw;
73208dfd 7285 struct req_que *req = ha->req_q_map[0];
eaac30be 7286
7c3df132 7287 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 7288 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 7289
d1c61909
AV
7290 rval = QLA_SUCCESS;
7291
7292 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 7293 dcode = (uint32_t *)req->ring;
d1c61909
AV
7294 *srisc_addr = 0;
7295
4243c115
SC
7296 if (IS_QLA27XX(ha) &&
7297 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
7298 faddr = ha->flt_region_fw_sec;
7299
d1c61909 7300 /* Validate firmware image by checking version. */
e315cd28 7301 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
d1c61909
AV
7302 for (i = 0; i < 4; i++)
7303 dcode[i] = be32_to_cpu(dcode[i]);
7304 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7305 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7306 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7307 dcode[3] == 0)) {
7c3df132
SK
7308 ql_log(ql_log_fatal, vha, 0x008c,
7309 "Unable to verify the integrity of flash firmware "
7310 "image.\n");
7311 ql_log(ql_log_fatal, vha, 0x008d,
7312 "Firmware data: %08x %08x %08x %08x.\n",
7313 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
7314
7315 return QLA_FUNCTION_FAILED;
7316 }
7317
7318 while (segments && rval == QLA_SUCCESS) {
7319 /* Read segment's load information. */
e315cd28 7320 qla24xx_read_flash_data(vha, dcode, faddr, 4);
d1c61909
AV
7321
7322 risc_addr = be32_to_cpu(dcode[2]);
7323 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7324 risc_size = be32_to_cpu(dcode[3]);
7325
7326 fragment = 0;
7327 while (risc_size > 0 && rval == QLA_SUCCESS) {
7328 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7329 if (dlen > risc_size)
7330 dlen = risc_size;
7331
7c3df132
SK
7332 ql_dbg(ql_dbg_init, vha, 0x008e,
7333 "Loading risc segment@ risc addr %x "
7334 "number of dwords 0x%x offset 0x%x.\n",
7335 risc_addr, dlen, faddr);
d1c61909 7336
e315cd28 7337 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
7338 for (i = 0; i < dlen; i++)
7339 dcode[i] = swab32(dcode[i]);
7340
73208dfd 7341 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
d1c61909
AV
7342 dlen);
7343 if (rval) {
7c3df132
SK
7344 ql_log(ql_log_fatal, vha, 0x008f,
7345 "Failed to load segment %d of firmware.\n",
7346 fragment);
f261f7af 7347 return QLA_FUNCTION_FAILED;
d1c61909
AV
7348 }
7349
7350 faddr += dlen;
7351 risc_addr += dlen;
7352 risc_size -= dlen;
7353 fragment++;
7354 }
7355
7356 /* Next segment. */
7357 segments--;
7358 }
7359
f73cb695
CD
7360 if (!IS_QLA27XX(ha))
7361 return rval;
7362
7363 if (ha->fw_dump_template)
7364 vfree(ha->fw_dump_template);
7365 ha->fw_dump_template = NULL;
7366 ha->fw_dump_template_len = 0;
7367
7368 ql_dbg(ql_dbg_init, vha, 0x0161,
7369 "Loading fwdump template from %x\n", faddr);
7370 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7371 risc_size = be32_to_cpu(dcode[2]);
7372 ql_dbg(ql_dbg_init, vha, 0x0162,
7373 "-> array size %x dwords\n", risc_size);
7374 if (risc_size == 0 || risc_size == ~0)
7375 goto default_template;
7376
7377 dlen = (risc_size - 8) * sizeof(*dcode);
7378 ql_dbg(ql_dbg_init, vha, 0x0163,
7379 "-> template allocating %x bytes...\n", dlen);
7380 ha->fw_dump_template = vmalloc(dlen);
7381 if (!ha->fw_dump_template) {
7382 ql_log(ql_log_warn, vha, 0x0164,
7383 "Failed fwdump template allocate %x bytes.\n", risc_size);
7384 goto default_template;
7385 }
7386
7387 faddr += 7;
7388 risc_size -= 8;
7389 dcode = ha->fw_dump_template;
7390 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
7391 for (i = 0; i < risc_size; i++)
7392 dcode[i] = le32_to_cpu(dcode[i]);
7393
7394 if (!qla27xx_fwdt_template_valid(dcode)) {
7395 ql_log(ql_log_warn, vha, 0x0165,
7396 "Failed fwdump template validate\n");
7397 goto default_template;
7398 }
7399
7400 dlen = qla27xx_fwdt_template_size(dcode);
7401 ql_dbg(ql_dbg_init, vha, 0x0166,
7402 "-> template size %x bytes\n", dlen);
7403 if (dlen > risc_size * sizeof(*dcode)) {
7404 ql_log(ql_log_warn, vha, 0x0167,
4fae52b5 7405 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 7406 (size_t)(dlen - risc_size * sizeof(*dcode)));
f73cb695
CD
7407 goto default_template;
7408 }
7409 ha->fw_dump_template_len = dlen;
7410 return rval;
7411
7412default_template:
7413 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
7414 if (ha->fw_dump_template)
7415 vfree(ha->fw_dump_template);
7416 ha->fw_dump_template = NULL;
7417 ha->fw_dump_template_len = 0;
7418
7419 dlen = qla27xx_fwdt_template_default_size();
7420 ql_dbg(ql_dbg_init, vha, 0x0169,
7421 "-> template allocating %x bytes...\n", dlen);
7422 ha->fw_dump_template = vmalloc(dlen);
7423 if (!ha->fw_dump_template) {
7424 ql_log(ql_log_warn, vha, 0x016a,
7425 "Failed fwdump template allocate %x bytes.\n", risc_size);
7426 goto failed_template;
7427 }
7428
7429 dcode = ha->fw_dump_template;
7430 risc_size = dlen / sizeof(*dcode);
7431 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
7432 for (i = 0; i < risc_size; i++)
7433 dcode[i] = be32_to_cpu(dcode[i]);
7434
7435 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7436 ql_log(ql_log_warn, vha, 0x016b,
7437 "Failed fwdump template validate\n");
7438 goto failed_template;
7439 }
7440
7441 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7442 ql_dbg(ql_dbg_init, vha, 0x016c,
7443 "-> template size %x bytes\n", dlen);
7444 ha->fw_dump_template_len = dlen;
7445 return rval;
7446
7447failed_template:
7448 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
7449 if (ha->fw_dump_template)
7450 vfree(ha->fw_dump_template);
7451 ha->fw_dump_template = NULL;
7452 ha->fw_dump_template_len = 0;
d1c61909
AV
7453 return rval;
7454}
7455
e9454a88 7456#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 7457
0107109e 7458int
e315cd28 7459qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
7460{
7461 int rval;
7462 int i, fragment;
7463 uint16_t *wcode, *fwcode;
7464 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7465 struct fw_blob *blob;
e315cd28 7466 struct qla_hw_data *ha = vha->hw;
73208dfd 7467 struct req_que *req = ha->req_q_map[0];
5433383e
AV
7468
7469 /* Load firmware blob. */
e315cd28 7470 blob = qla2x00_request_firmware(vha);
5433383e 7471 if (!blob) {
7c3df132 7472 ql_log(ql_log_info, vha, 0x0083,
94bcf830 7473 "Firmware image unavailable.\n");
7c3df132
SK
7474 ql_log(ql_log_info, vha, 0x0084,
7475 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
7476 return QLA_FUNCTION_FAILED;
7477 }
7478
7479 rval = QLA_SUCCESS;
7480
73208dfd 7481 wcode = (uint16_t *)req->ring;
5433383e
AV
7482 *srisc_addr = 0;
7483 fwcode = (uint16_t *)blob->fw->data;
7484 fwclen = 0;
7485
7486 /* Validate firmware image by checking version. */
7487 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132 7488 ql_log(ql_log_fatal, vha, 0x0085,
5b5e0928 7489 "Unable to verify integrity of firmware image (%zd).\n",
5433383e
AV
7490 blob->fw->size);
7491 goto fail_fw_integrity;
7492 }
7493 for (i = 0; i < 4; i++)
7494 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7495 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7496 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7497 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
7498 ql_log(ql_log_fatal, vha, 0x0086,
7499 "Unable to verify integrity of firmware image.\n");
7500 ql_log(ql_log_fatal, vha, 0x0087,
7501 "Firmware data: %04x %04x %04x %04x.\n",
7502 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
7503 goto fail_fw_integrity;
7504 }
7505
7506 seg = blob->segs;
7507 while (*seg && rval == QLA_SUCCESS) {
7508 risc_addr = *seg;
7509 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7510 risc_size = be16_to_cpu(fwcode[3]);
7511
7512 /* Validate firmware image size. */
7513 fwclen += risc_size * sizeof(uint16_t);
7514 if (blob->fw->size < fwclen) {
7c3df132 7515 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 7516 "Unable to verify integrity of firmware image "
5b5e0928 7517 "(%zd).\n", blob->fw->size);
5433383e
AV
7518 goto fail_fw_integrity;
7519 }
7520
7521 fragment = 0;
7522 while (risc_size > 0 && rval == QLA_SUCCESS) {
7523 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7524 if (wlen > risc_size)
7525 wlen = risc_size;
7c3df132
SK
7526 ql_dbg(ql_dbg_init, vha, 0x0089,
7527 "Loading risc segment@ risc addr %x number of "
7528 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
7529
7530 for (i = 0; i < wlen; i++)
7531 wcode[i] = swab16(fwcode[i]);
7532
73208dfd 7533 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
7534 wlen);
7535 if (rval) {
7c3df132
SK
7536 ql_log(ql_log_fatal, vha, 0x008a,
7537 "Failed to load segment %d of firmware.\n",
7538 fragment);
5433383e
AV
7539 break;
7540 }
7541
7542 fwcode += wlen;
7543 risc_addr += wlen;
7544 risc_size -= wlen;
7545 fragment++;
7546 }
7547
7548 /* Next segment. */
7549 seg++;
7550 }
7551 return rval;
7552
7553fail_fw_integrity:
7554 return QLA_FUNCTION_FAILED;
7555}
7556
eaac30be
AV
7557static int
7558qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
7559{
7560 int rval;
7561 int segments, fragment;
7562 uint32_t *dcode, dlen;
7563 uint32_t risc_addr;
7564 uint32_t risc_size;
7565 uint32_t i;
5433383e 7566 struct fw_blob *blob;
f73cb695
CD
7567 const uint32_t *fwcode;
7568 uint32_t fwclen;
e315cd28 7569 struct qla_hw_data *ha = vha->hw;
73208dfd 7570 struct req_que *req = ha->req_q_map[0];
0107109e 7571
5433383e 7572 /* Load firmware blob. */
e315cd28 7573 blob = qla2x00_request_firmware(vha);
5433383e 7574 if (!blob) {
7c3df132 7575 ql_log(ql_log_warn, vha, 0x0090,
94bcf830 7576 "Firmware image unavailable.\n");
7c3df132
SK
7577 ql_log(ql_log_warn, vha, 0x0091,
7578 "Firmware images can be retrieved from: "
7579 QLA_FW_URL ".\n");
d1c61909 7580
eaac30be 7581 return QLA_FUNCTION_FAILED;
0107109e
AV
7582 }
7583
cfb0919c
CD
7584 ql_dbg(ql_dbg_init, vha, 0x0092,
7585 "FW: Loading via request-firmware.\n");
eaac30be 7586
0107109e
AV
7587 rval = QLA_SUCCESS;
7588
7589 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 7590 dcode = (uint32_t *)req->ring;
0107109e 7591 *srisc_addr = 0;
5433383e 7592 fwcode = (uint32_t *)blob->fw->data;
0107109e
AV
7593 fwclen = 0;
7594
7595 /* Validate firmware image by checking version. */
5433383e 7596 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7c3df132 7597 ql_log(ql_log_fatal, vha, 0x0093,
5b5e0928 7598 "Unable to verify integrity of firmware image (%zd).\n",
5433383e 7599 blob->fw->size);
f73cb695 7600 return QLA_FUNCTION_FAILED;
0107109e
AV
7601 }
7602 for (i = 0; i < 4; i++)
7603 dcode[i] = be32_to_cpu(fwcode[i + 4]);
7604 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7605 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7606 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7607 dcode[3] == 0)) {
7c3df132 7608 ql_log(ql_log_fatal, vha, 0x0094,
5b5e0928 7609 "Unable to verify integrity of firmware image (%zd).\n",
7c3df132
SK
7610 blob->fw->size);
7611 ql_log(ql_log_fatal, vha, 0x0095,
7612 "Firmware data: %08x %08x %08x %08x.\n",
7613 dcode[0], dcode[1], dcode[2], dcode[3]);
f73cb695 7614 return QLA_FUNCTION_FAILED;
0107109e
AV
7615 }
7616
7617 while (segments && rval == QLA_SUCCESS) {
7618 risc_addr = be32_to_cpu(fwcode[2]);
7619 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7620 risc_size = be32_to_cpu(fwcode[3]);
7621
7622 /* Validate firmware image size. */
7623 fwclen += risc_size * sizeof(uint32_t);
5433383e 7624 if (blob->fw->size < fwclen) {
7c3df132 7625 ql_log(ql_log_fatal, vha, 0x0096,
5433383e 7626 "Unable to verify integrity of firmware image "
5b5e0928 7627 "(%zd).\n", blob->fw->size);
f73cb695 7628 return QLA_FUNCTION_FAILED;
0107109e
AV
7629 }
7630
7631 fragment = 0;
7632 while (risc_size > 0 && rval == QLA_SUCCESS) {
7633 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7634 if (dlen > risc_size)
7635 dlen = risc_size;
7636
7c3df132
SK
7637 ql_dbg(ql_dbg_init, vha, 0x0097,
7638 "Loading risc segment@ risc addr %x "
7639 "number of dwords 0x%x.\n", risc_addr, dlen);
0107109e
AV
7640
7641 for (i = 0; i < dlen; i++)
7642 dcode[i] = swab32(fwcode[i]);
7643
73208dfd 7644 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
590f98e5 7645 dlen);
0107109e 7646 if (rval) {
7c3df132
SK
7647 ql_log(ql_log_fatal, vha, 0x0098,
7648 "Failed to load segment %d of firmware.\n",
7649 fragment);
f261f7af 7650 return QLA_FUNCTION_FAILED;
0107109e
AV
7651 }
7652
7653 fwcode += dlen;
7654 risc_addr += dlen;
7655 risc_size -= dlen;
7656 fragment++;
7657 }
7658
7659 /* Next segment. */
7660 segments--;
7661 }
f73cb695
CD
7662
7663 if (!IS_QLA27XX(ha))
7664 return rval;
7665
7666 if (ha->fw_dump_template)
7667 vfree(ha->fw_dump_template);
7668 ha->fw_dump_template = NULL;
7669 ha->fw_dump_template_len = 0;
7670
7671 ql_dbg(ql_dbg_init, vha, 0x171,
97ea702b
CD
7672 "Loading fwdump template from %x\n",
7673 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
f73cb695
CD
7674 risc_size = be32_to_cpu(fwcode[2]);
7675 ql_dbg(ql_dbg_init, vha, 0x172,
7676 "-> array size %x dwords\n", risc_size);
7677 if (risc_size == 0 || risc_size == ~0)
7678 goto default_template;
7679
7680 dlen = (risc_size - 8) * sizeof(*fwcode);
7681 ql_dbg(ql_dbg_init, vha, 0x0173,
7682 "-> template allocating %x bytes...\n", dlen);
7683 ha->fw_dump_template = vmalloc(dlen);
7684 if (!ha->fw_dump_template) {
7685 ql_log(ql_log_warn, vha, 0x0174,
7686 "Failed fwdump template allocate %x bytes.\n", risc_size);
7687 goto default_template;
7688 }
7689
7690 fwcode += 7;
7691 risc_size -= 8;
7692 dcode = ha->fw_dump_template;
7693 for (i = 0; i < risc_size; i++)
7694 dcode[i] = le32_to_cpu(fwcode[i]);
7695
7696 if (!qla27xx_fwdt_template_valid(dcode)) {
7697 ql_log(ql_log_warn, vha, 0x0175,
7698 "Failed fwdump template validate\n");
7699 goto default_template;
7700 }
7701
7702 dlen = qla27xx_fwdt_template_size(dcode);
7703 ql_dbg(ql_dbg_init, vha, 0x0176,
7704 "-> template size %x bytes\n", dlen);
7705 if (dlen > risc_size * sizeof(*fwcode)) {
7706 ql_log(ql_log_warn, vha, 0x0177,
4fae52b5 7707 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 7708 (size_t)(dlen - risc_size * sizeof(*fwcode)));
f73cb695
CD
7709 goto default_template;
7710 }
7711 ha->fw_dump_template_len = dlen;
0107109e
AV
7712 return rval;
7713
f73cb695
CD
7714default_template:
7715 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
7716 if (ha->fw_dump_template)
7717 vfree(ha->fw_dump_template);
7718 ha->fw_dump_template = NULL;
7719 ha->fw_dump_template_len = 0;
7720
7721 dlen = qla27xx_fwdt_template_default_size();
7722 ql_dbg(ql_dbg_init, vha, 0x0179,
7723 "-> template allocating %x bytes...\n", dlen);
7724 ha->fw_dump_template = vmalloc(dlen);
7725 if (!ha->fw_dump_template) {
7726 ql_log(ql_log_warn, vha, 0x017a,
7727 "Failed fwdump template allocate %x bytes.\n", risc_size);
7728 goto failed_template;
7729 }
7730
7731 dcode = ha->fw_dump_template;
7732 risc_size = dlen / sizeof(*fwcode);
7733 fwcode = qla27xx_fwdt_template_default();
7734 for (i = 0; i < risc_size; i++)
7735 dcode[i] = be32_to_cpu(fwcode[i]);
7736
7737 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7738 ql_log(ql_log_warn, vha, 0x017b,
7739 "Failed fwdump template validate\n");
7740 goto failed_template;
7741 }
7742
7743 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7744 ql_dbg(ql_dbg_init, vha, 0x017c,
7745 "-> template size %x bytes\n", dlen);
7746 ha->fw_dump_template_len = dlen;
7747 return rval;
7748
7749failed_template:
7750 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
7751 if (ha->fw_dump_template)
7752 vfree(ha->fw_dump_template);
7753 ha->fw_dump_template = NULL;
7754 ha->fw_dump_template_len = 0;
7755 return rval;
0107109e 7756}
18c6c127 7757
eaac30be
AV
7758int
7759qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7760{
7761 int rval;
7762
e337d907
AV
7763 if (ql2xfwloadbin == 1)
7764 return qla81xx_load_risc(vha, srisc_addr);
7765
eaac30be
AV
7766 /*
7767 * FW Load priority:
7768 * 1) Firmware via request-firmware interface (.bin file).
7769 * 2) Firmware residing in flash.
7770 */
7771 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7772 if (rval == QLA_SUCCESS)
7773 return rval;
7774
cbc8eb67
AV
7775 return qla24xx_load_risc_flash(vha, srisc_addr,
7776 vha->hw->flt_region_fw);
eaac30be
AV
7777}
7778
7779int
7780qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7781{
7782 int rval;
cbc8eb67 7783 struct qla_hw_data *ha = vha->hw;
eaac30be 7784
e337d907 7785 if (ql2xfwloadbin == 2)
cbc8eb67 7786 goto try_blob_fw;
e337d907 7787
eaac30be
AV
7788 /*
7789 * FW Load priority:
7790 * 1) Firmware residing in flash.
7791 * 2) Firmware via request-firmware interface (.bin file).
cbc8eb67 7792 * 3) Golden-Firmware residing in flash -- limited operation.
eaac30be 7793 */
cbc8eb67 7794 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
eaac30be
AV
7795 if (rval == QLA_SUCCESS)
7796 return rval;
7797
cbc8eb67
AV
7798try_blob_fw:
7799 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7800 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
7801 return rval;
7802
7c3df132
SK
7803 ql_log(ql_log_info, vha, 0x0099,
7804 "Attempting to fallback to golden firmware.\n");
cbc8eb67
AV
7805 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
7806 if (rval != QLA_SUCCESS)
7807 return rval;
7808
7c3df132 7809 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
cbc8eb67 7810 ha->flags.running_gold_fw = 1;
cbc8eb67 7811 return rval;
eaac30be
AV
7812}
7813
18c6c127 7814void
e315cd28 7815qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
7816{
7817 int ret, retries;
e315cd28 7818 struct qla_hw_data *ha = vha->hw;
18c6c127 7819
85880801
AV
7820 if (ha->flags.pci_channel_io_perm_failure)
7821 return;
e428924c 7822 if (!IS_FWI2_CAPABLE(ha))
18c6c127 7823 return;
75edf81d
AV
7824 if (!ha->fw_major_version)
7825 return;
ec7193e2
QT
7826 if (!ha->flags.fw_started)
7827 return;
18c6c127 7828
e315cd28 7829 ret = qla2x00_stop_firmware(vha);
7c7f1f29 7830 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 7831 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
7832 ha->isp_ops->reset_chip(vha);
7833 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 7834 continue;
e315cd28 7835 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 7836 continue;
7c3df132
SK
7837 ql_log(ql_log_info, vha, 0x8015,
7838 "Attempting retry of stop-firmware command.\n");
e315cd28 7839 ret = qla2x00_stop_firmware(vha);
18c6c127 7840 }
ec7193e2 7841
4b60c827 7842 QLA_FW_STOPPED(ha);
ec7193e2 7843 ha->flags.fw_init_done = 0;
18c6c127 7844}
2c3dfe3f
SJ
7845
7846int
e315cd28 7847qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
7848{
7849 int rval = QLA_SUCCESS;
0b91d116 7850 int rval2;
2c3dfe3f 7851 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
7852 struct qla_hw_data *ha = vha->hw;
7853 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
67c2e93a
AC
7854 struct req_que *req;
7855 struct rsp_que *rsp;
2c3dfe3f 7856
e315cd28 7857 if (!vha->vp_idx)
2c3dfe3f
SJ
7858 return -EINVAL;
7859
e315cd28 7860 rval = qla2x00_fw_ready(base_vha);
d7459527
MH
7861 if (vha->qpair)
7862 req = vha->qpair->req;
67c2e93a 7863 else
d7459527 7864 req = ha->req_q_map[0];
67c2e93a
AC
7865 rsp = req->rsp;
7866
2c3dfe3f 7867 if (rval == QLA_SUCCESS) {
e315cd28 7868 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
73208dfd 7869 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
7870 }
7871
e315cd28 7872 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
7873
7874 /* Login to SNS first */
0b91d116
CD
7875 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7876 BIT_1);
7877 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7878 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7879 ql_dbg(ql_dbg_init, vha, 0x0120,
7880 "Failed SNS login: loop_id=%x, rval2=%d\n",
7881 NPH_SNS, rval2);
7882 else
7883 ql_dbg(ql_dbg_init, vha, 0x0103,
7884 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7885 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7886 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
7887 return (QLA_FUNCTION_FAILED);
7888 }
7889
e315cd28
AC
7890 atomic_set(&vha->loop_down_timer, 0);
7891 atomic_set(&vha->loop_state, LOOP_UP);
7892 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7893 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7894 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
7895
7896 return rval;
7897}
4d4df193
HK
7898
7899/* 84XX Support **************************************************************/
7900
7901static LIST_HEAD(qla_cs84xx_list);
7902static DEFINE_MUTEX(qla_cs84xx_mutex);
7903
7904static struct qla_chip_state_84xx *
e315cd28 7905qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
7906{
7907 struct qla_chip_state_84xx *cs84xx;
e315cd28 7908 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7909
7910 mutex_lock(&qla_cs84xx_mutex);
7911
7912 /* Find any shared 84xx chip. */
7913 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7914 if (cs84xx->bus == ha->pdev->bus) {
7915 kref_get(&cs84xx->kref);
7916 goto done;
7917 }
7918 }
7919
7920 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7921 if (!cs84xx)
7922 goto done;
7923
7924 kref_init(&cs84xx->kref);
7925 spin_lock_init(&cs84xx->access_lock);
7926 mutex_init(&cs84xx->fw_update_mutex);
7927 cs84xx->bus = ha->pdev->bus;
7928
7929 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7930done:
7931 mutex_unlock(&qla_cs84xx_mutex);
7932 return cs84xx;
7933}
7934
7935static void
7936__qla84xx_chip_release(struct kref *kref)
7937{
7938 struct qla_chip_state_84xx *cs84xx =
7939 container_of(kref, struct qla_chip_state_84xx, kref);
7940
7941 mutex_lock(&qla_cs84xx_mutex);
7942 list_del(&cs84xx->list);
7943 mutex_unlock(&qla_cs84xx_mutex);
7944 kfree(cs84xx);
7945}
7946
7947void
e315cd28 7948qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 7949{
e315cd28 7950 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7951 if (ha->cs84xx)
7952 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7953}
7954
7955static int
e315cd28 7956qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
7957{
7958 int rval;
7959 uint16_t status[2];
e315cd28 7960 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7961
7962 mutex_lock(&ha->cs84xx->fw_update_mutex);
7963
e315cd28 7964 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
7965
7966 mutex_unlock(&ha->cs84xx->fw_update_mutex);
7967
7968 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7969 QLA_SUCCESS;
7970}
3a03eb79
AV
7971
7972/* 81XX Support **************************************************************/
7973
7974int
7975qla81xx_nvram_config(scsi_qla_host_t *vha)
7976{
7977 int rval;
7978 struct init_cb_81xx *icb;
7979 struct nvram_81xx *nv;
7980 uint32_t *dptr;
7981 uint8_t *dptr1, *dptr2;
7982 uint32_t chksum;
7983 uint16_t cnt;
7984 struct qla_hw_data *ha = vha->hw;
7985
7986 rval = QLA_SUCCESS;
7987 icb = (struct init_cb_81xx *)ha->init_cb;
7988 nv = ha->nvram;
7989
7990 /* Determine NVRAM starting address. */
7991 ha->nvram_size = sizeof(struct nvram_81xx);
3a03eb79 7992 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7ec0effd
AD
7993 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7994 ha->vpd_size = FA_VPD_SIZE_82XX;
3a03eb79
AV
7995
7996 /* Get VPD data into cache */
7997 ha->vpd = ha->nvram + VPD_OFFSET;
3d79038f
AV
7998 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7999 ha->vpd_size);
3a03eb79
AV
8000
8001 /* Get NVRAM data into cache and calculate checksum. */
3d79038f 8002 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
3a03eb79 8003 ha->nvram_size);
3d79038f 8004 dptr = (uint32_t *)nv;
da08ef5c
JC
8005 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8006 chksum += le32_to_cpu(*dptr);
3a03eb79 8007
7c3df132
SK
8008 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8009 "Contents of NVRAM:\n");
8010 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8011 (uint8_t *)nv, ha->nvram_size);
3a03eb79
AV
8012
8013 /* Bad NVRAM data, set defaults parameters. */
8014 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
8015 || nv->id[3] != ' ' ||
ad950360 8016 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
3a03eb79 8017 /* Reset NVRAM data. */
7c3df132 8018 ql_log(ql_log_info, vha, 0x0073,
9e336520 8019 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132 8020 "version=0x%x.\n", chksum, nv->id[0],
3a03eb79 8021 le16_to_cpu(nv->nvram_version));
7c3df132
SK
8022 ql_log(ql_log_info, vha, 0x0074,
8023 "Falling back to functioning (yet invalid -- WWPN) "
8024 "defaults.\n");
3a03eb79
AV
8025
8026 /*
8027 * Set default initialization control block.
8028 */
8029 memset(nv, 0, ha->nvram_size);
ad950360
BVA
8030 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8031 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 8032 nv->frame_payload_size = 2048;
ad950360
BVA
8033 nv->execution_throttle = cpu_to_le16(0xFFFF);
8034 nv->exchange_count = cpu_to_le16(0);
3a03eb79 8035 nv->port_name[0] = 0x21;
f73cb695 8036 nv->port_name[1] = 0x00 + ha->port_no + 1;
3a03eb79
AV
8037 nv->port_name[2] = 0x00;
8038 nv->port_name[3] = 0xe0;
8039 nv->port_name[4] = 0x8b;
8040 nv->port_name[5] = 0x1c;
8041 nv->port_name[6] = 0x55;
8042 nv->port_name[7] = 0x86;
8043 nv->node_name[0] = 0x20;
8044 nv->node_name[1] = 0x00;
8045 nv->node_name[2] = 0x00;
8046 nv->node_name[3] = 0xe0;
8047 nv->node_name[4] = 0x8b;
8048 nv->node_name[5] = 0x1c;
8049 nv->node_name[6] = 0x55;
8050 nv->node_name[7] = 0x86;
ad950360
BVA
8051 nv->login_retry_count = cpu_to_le16(8);
8052 nv->interrupt_delay_timer = cpu_to_le16(0);
8053 nv->login_timeout = cpu_to_le16(0);
3a03eb79 8054 nv->firmware_options_1 =
ad950360
BVA
8055 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8056 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8057 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8058 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8059 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8060 nv->efi_parameters = cpu_to_le32(0);
3a03eb79 8061 nv->reset_delay = 5;
ad950360
BVA
8062 nv->max_luns_per_target = cpu_to_le16(128);
8063 nv->port_down_retry_count = cpu_to_le16(30);
8064 nv->link_down_timeout = cpu_to_le16(180);
eeebcc92 8065 nv->enode_mac[0] = 0x00;
6246b8a1
GM
8066 nv->enode_mac[1] = 0xC0;
8067 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
8068 nv->enode_mac[3] = 0x04;
8069 nv->enode_mac[4] = 0x05;
f73cb695 8070 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
8071
8072 rval = 1;
8073 }
8074
9e522cd8
AE
8075 if (IS_T10_PI_CAPABLE(ha))
8076 nv->frame_payload_size &= ~7;
8077
aa230bc5
AE
8078 qlt_81xx_config_nvram_stage1(vha, nv);
8079
3a03eb79 8080 /* Reset Initialization control block */
773120e4 8081 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
8082
8083 /* Copy 1st segment. */
8084 dptr1 = (uint8_t *)icb;
8085 dptr2 = (uint8_t *)&nv->version;
8086 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8087 while (cnt--)
8088 *dptr1++ = *dptr2++;
8089
8090 icb->login_retry_count = nv->login_retry_count;
8091
8092 /* Copy 2nd segment. */
8093 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8094 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8095 cnt = (uint8_t *)&icb->reserved_5 -
8096 (uint8_t *)&icb->interrupt_delay_timer;
8097 while (cnt--)
8098 *dptr1++ = *dptr2++;
8099
8100 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8101 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8102 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
8103 icb->enode_mac[0] = 0x00;
8104 icb->enode_mac[1] = 0xC0;
8105 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
8106 icb->enode_mac[3] = 0x04;
8107 icb->enode_mac[4] = 0x05;
f73cb695 8108 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
8109 }
8110
b64b0e8f
AV
8111 /* Use extended-initialization control block. */
8112 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
0eaaca4c 8113 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
3a03eb79
AV
8114 /*
8115 * Setup driver NVRAM options.
8116 */
8117 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 8118 "QLE8XXX");
3a03eb79 8119
aa230bc5
AE
8120 qlt_81xx_config_nvram_stage2(vha, icb);
8121
3a03eb79 8122 /* Use alternate WWN? */
ad950360 8123 if (nv->host_p & cpu_to_le32(BIT_15)) {
3a03eb79
AV
8124 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8125 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8126 }
8127
8128 /* Prepare nodename */
ad950360 8129 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
3a03eb79
AV
8130 /*
8131 * Firmware will apply the following mask if the nodename was
8132 * not provided.
8133 */
8134 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8135 icb->node_name[0] &= 0xF0;
8136 }
8137
8138 /* Set host adapter parameters. */
8139 ha->flags.disable_risc_code_load = 0;
8140 ha->flags.enable_lip_reset = 0;
8141 ha->flags.enable_lip_full_login =
8142 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
8143 ha->flags.enable_target_reset =
8144 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
8145 ha->flags.enable_led_scheme = 0;
8146 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
8147
8148 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8149 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8150
8151 /* save HBA serial number */
8152 ha->serial0 = icb->port_name[5];
8153 ha->serial1 = icb->port_name[6];
8154 ha->serial2 = icb->port_name[7];
8155 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8156 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8157
ad950360 8158 icb->execution_throttle = cpu_to_le16(0xFFFF);
3a03eb79
AV
8159
8160 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8161
8162 /* Set minimum login_timeout to 4 seconds. */
8163 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8164 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8165 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 8166 nv->login_timeout = cpu_to_le16(4);
3a03eb79 8167 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3a03eb79
AV
8168
8169 /* Set minimum RATOV to 100 tenths of a second. */
8170 ha->r_a_tov = 100;
8171
8172 ha->loop_reset_delay = nv->reset_delay;
8173
8174 /* Link Down Timeout = 0:
8175 *
7ec0effd 8176 * When Port Down timer expires we will start returning
3a03eb79
AV
8177 * I/O's to OS with "DID_NO_CONNECT".
8178 *
8179 * Link Down Timeout != 0:
8180 *
8181 * The driver waits for the link to come up after link down
8182 * before returning I/Os to OS with "DID_NO_CONNECT".
8183 */
8184 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8185 ha->loop_down_abort_time =
8186 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8187 } else {
8188 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8189 ha->loop_down_abort_time =
8190 (LOOP_DOWN_TIME - ha->link_down_timeout);
8191 }
8192
8193 /* Need enough time to try and get the port back. */
8194 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8195 if (qlport_down_retry)
8196 ha->port_down_retry_count = qlport_down_retry;
8197
8198 /* Set login_retry_count */
8199 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8200 if (ha->port_down_retry_count ==
8201 le16_to_cpu(nv->port_down_retry_count) &&
8202 ha->port_down_retry_count > 3)
8203 ha->login_retry_count = ha->port_down_retry_count;
8204 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8205 ha->login_retry_count = ha->port_down_retry_count;
8206 if (ql2xloginretrycount)
8207 ha->login_retry_count = ql2xloginretrycount;
8208
6246b8a1 8209 /* if not running MSI-X we need handshaking on interrupts */
f73cb695 8210 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
ad950360 8211 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6246b8a1 8212
3a03eb79
AV
8213 /* Enable ZIO. */
8214 if (!vha->flags.init_done) {
8215 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8216 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8217 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8218 le16_to_cpu(icb->interrupt_delay_timer): 2;
8219 }
ad950360 8220 icb->firmware_options_2 &= cpu_to_le32(
3a03eb79
AV
8221 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8222 vha->flags.process_response_queue = 0;
8223 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8224 ha->zio_mode = QLA_ZIO_MODE_6;
8225
7c3df132 8226 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 8227 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
8228 ha->zio_mode,
8229 ha->zio_timer * 100);
3a03eb79
AV
8230
8231 icb->firmware_options_2 |= cpu_to_le32(
8232 (uint32_t)ha->zio_mode);
8233 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8234 vha->flags.process_response_queue = 1;
8235 }
8236
41dc529a 8237 /* enable RIDA Format2 */
48acad09 8238 icb->firmware_options_3 |= BIT_0;
41dc529a 8239
8777e431
QT
8240 /* N2N: driver will initiate Login instead of FW */
8241 icb->firmware_options_3 |= BIT_8;
41dc529a 8242
edd05de1
DG
8243 if (IS_QLA27XX(ha)) {
8244 icb->firmware_options_3 |= BIT_8;
8245 ql_dbg(ql_log_info, vha, 0x0075,
8246 "Enabling direct connection.\n");
8247 }
8248
3a03eb79 8249 if (rval) {
7c3df132
SK
8250 ql_log(ql_log_warn, vha, 0x0076,
8251 "NVRAM configuration failed.\n");
3a03eb79
AV
8252 }
8253 return (rval);
8254}
8255
a9083016
GM
8256int
8257qla82xx_restart_isp(scsi_qla_host_t *vha)
8258{
8259 int status, rval;
a9083016
GM
8260 struct qla_hw_data *ha = vha->hw;
8261 struct req_que *req = ha->req_q_map[0];
8262 struct rsp_que *rsp = ha->rsp_q_map[0];
8263 struct scsi_qla_host *vp;
feafb7b1 8264 unsigned long flags;
a9083016
GM
8265
8266 status = qla2x00_init_rings(vha);
8267 if (!status) {
8268 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8269 ha->flags.chip_reset_done = 1;
8270
8271 status = qla2x00_fw_ready(vha);
8272 if (!status) {
a9083016
GM
8273 /* Issue a marker after FW becomes ready. */
8274 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
a9083016 8275 vha->flags.online = 1;
7108b76e 8276 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
a9083016
GM
8277 }
8278
8279 /* if no cable then assume it's good */
8280 if ((vha->device_flags & DFLG_NO_CABLE))
8281 status = 0;
a9083016
GM
8282 }
8283
8284 if (!status) {
8285 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8286
8287 if (!atomic_read(&vha->loop_down_timer)) {
8288 /*
8289 * Issue marker command only when we are going
8290 * to start the I/O .
8291 */
8292 vha->marker_needed = 1;
8293 }
8294
a9083016
GM
8295 ha->isp_ops->enable_intrs(ha);
8296
8297 ha->isp_abort_cnt = 0;
8298 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8299
53296788 8300 /* Update the firmware version */
3173167f 8301 status = qla82xx_check_md_needed(vha);
53296788 8302
a9083016
GM
8303 if (ha->fce) {
8304 ha->flags.fce_enabled = 1;
8305 memset(ha->fce, 0,
8306 fce_calc_size(ha->fce_bufs));
8307 rval = qla2x00_enable_fce_trace(vha,
8308 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8309 &ha->fce_bufs);
8310 if (rval) {
cfb0919c 8311 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
8312 "Unable to reinitialize FCE (%d).\n",
8313 rval);
a9083016
GM
8314 ha->flags.fce_enabled = 0;
8315 }
8316 }
8317
8318 if (ha->eft) {
8319 memset(ha->eft, 0, EFT_SIZE);
8320 rval = qla2x00_enable_eft_trace(vha,
8321 ha->eft_dma, EFT_NUM_BUFFERS);
8322 if (rval) {
cfb0919c 8323 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
8324 "Unable to reinitialize EFT (%d).\n",
8325 rval);
a9083016
GM
8326 }
8327 }
a9083016
GM
8328 }
8329
8330 if (!status) {
cfb0919c 8331 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 8332 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
8333
8334 spin_lock_irqsave(&ha->vport_slock, flags);
8335 list_for_each_entry(vp, &ha->vp_list, list) {
8336 if (vp->vp_idx) {
8337 atomic_inc(&vp->vref_count);
8338 spin_unlock_irqrestore(&ha->vport_slock, flags);
8339
a9083016 8340 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
8341
8342 spin_lock_irqsave(&ha->vport_slock, flags);
8343 atomic_dec(&vp->vref_count);
8344 }
a9083016 8345 }
feafb7b1
AE
8346 spin_unlock_irqrestore(&ha->vport_slock, flags);
8347
a9083016 8348 } else {
cfb0919c 8349 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 8350 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
8351 }
8352
8353 return status;
8354}
8355
3a03eb79 8356void
ae97c91e 8357qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 8358{
ae97c91e
AV
8359 struct qla_hw_data *ha = vha->hw;
8360
f198cafa
HM
8361 /* Hold status IOCBs until ABTS response received. */
8362 if (ql2xfwholdabts)
8363 ha->fw_options[3] |= BIT_12;
8364
088d09d4
GM
8365 /* Set Retry FLOGI in case of P2P connection */
8366 if (ha->operating_mode == P2P) {
8367 ha->fw_options[2] |= BIT_3;
8368 ql_dbg(ql_dbg_disc, vha, 0x2103,
8369 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
8370 __func__, ha->fw_options[2]);
8371 }
8372
41dc529a
QT
8373 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
8374 if (ql2xmvasynctoatio) {
8375 if (qla_tgt_mode_enabled(vha) ||
8376 qla_dual_mode_enabled(vha))
8377 ha->fw_options[2] |= BIT_11;
8378 else
8379 ha->fw_options[2] &= ~BIT_11;
8380 }
8381
f7e761f5 8382 if (qla_tgt_mode_enabled(vha) ||
2da52737
QT
8383 qla_dual_mode_enabled(vha)) {
8384 /* FW auto send SCSI status during */
8385 ha->fw_options[1] |= BIT_8;
8386 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
8387
8388 /* FW perform Exchange validation */
f7e761f5 8389 ha->fw_options[2] |= BIT_4;
2da52737
QT
8390 } else {
8391 ha->fw_options[1] &= ~BIT_8;
8392 ha->fw_options[10] &= 0x00ff;
8393
f7e761f5 8394 ha->fw_options[2] &= ~BIT_4;
2da52737 8395 }
f7e761f5 8396
41dc529a
QT
8397 if (ql2xetsenable) {
8398 /* Enable ETS Burst. */
8399 memset(ha->fw_options, 0, sizeof(ha->fw_options));
8400 ha->fw_options[2] |= BIT_9;
8401 }
8402
83548fe2
QT
8403 ql_dbg(ql_dbg_init, vha, 0x00e9,
8404 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
8405 __func__, ha->fw_options[1], ha->fw_options[2],
8406 ha->fw_options[3], vha->host->active_mode);
ae97c91e 8407
ae97c91e 8408 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 8409}
09ff701a
SR
8410
8411/*
8412 * qla24xx_get_fcp_prio
8413 * Gets the fcp cmd priority value for the logged in port.
8414 * Looks for a match of the port descriptors within
8415 * each of the fcp prio config entries. If a match is found,
8416 * the tag (priority) value is returned.
8417 *
8418 * Input:
21090cbe 8419 * vha = scsi host structure pointer.
09ff701a
SR
8420 * fcport = port structure pointer.
8421 *
8422 * Return:
6c452a45 8423 * non-zero (if found)
f28a0a96 8424 * -1 (if not found)
09ff701a
SR
8425 *
8426 * Context:
8427 * Kernel context
8428 */
f28a0a96 8429static int
09ff701a
SR
8430qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8431{
8432 int i, entries;
8433 uint8_t pid_match, wwn_match;
f28a0a96 8434 int priority;
09ff701a
SR
8435 uint32_t pid1, pid2;
8436 uint64_t wwn1, wwn2;
8437 struct qla_fcp_prio_entry *pri_entry;
8438 struct qla_hw_data *ha = vha->hw;
8439
8440 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 8441 return -1;
09ff701a 8442
f28a0a96 8443 priority = -1;
09ff701a
SR
8444 entries = ha->fcp_prio_cfg->num_entries;
8445 pri_entry = &ha->fcp_prio_cfg->entry[0];
8446
8447 for (i = 0; i < entries; i++) {
8448 pid_match = wwn_match = 0;
8449
8450 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8451 pri_entry++;
8452 continue;
8453 }
8454
8455 /* check source pid for a match */
8456 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8457 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8458 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8459 if (pid1 == INVALID_PORT_ID)
8460 pid_match++;
8461 else if (pid1 == pid2)
8462 pid_match++;
8463 }
8464
8465 /* check destination pid for a match */
8466 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8467 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8468 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8469 if (pid1 == INVALID_PORT_ID)
8470 pid_match++;
8471 else if (pid1 == pid2)
8472 pid_match++;
8473 }
8474
8475 /* check source WWN for a match */
8476 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8477 wwn1 = wwn_to_u64(vha->port_name);
8478 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8479 if (wwn2 == (uint64_t)-1)
8480 wwn_match++;
8481 else if (wwn1 == wwn2)
8482 wwn_match++;
8483 }
8484
8485 /* check destination WWN for a match */
8486 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8487 wwn1 = wwn_to_u64(fcport->port_name);
8488 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8489 if (wwn2 == (uint64_t)-1)
8490 wwn_match++;
8491 else if (wwn1 == wwn2)
8492 wwn_match++;
8493 }
8494
8495 if (pid_match == 2 || wwn_match == 2) {
8496 /* Found a matching entry */
8497 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8498 priority = pri_entry->tag;
8499 break;
8500 }
8501
8502 pri_entry++;
8503 }
8504
8505 return priority;
8506}
8507
8508/*
8509 * qla24xx_update_fcport_fcp_prio
8510 * Activates fcp priority for the logged in fc port
8511 *
8512 * Input:
21090cbe 8513 * vha = scsi host structure pointer.
09ff701a
SR
8514 * fcp = port structure pointer.
8515 *
8516 * Return:
8517 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8518 *
8519 * Context:
8520 * Kernel context.
8521 */
8522int
21090cbe 8523qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
8524{
8525 int ret;
f28a0a96 8526 int priority;
09ff701a
SR
8527 uint16_t mb[5];
8528
21090cbe
MI
8529 if (fcport->port_type != FCT_TARGET ||
8530 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
8531 return QLA_FUNCTION_FAILED;
8532
21090cbe 8533 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
8534 if (priority < 0)
8535 return QLA_FUNCTION_FAILED;
8536
7ec0effd 8537 if (IS_P3P_TYPE(vha->hw)) {
a00f6296
SK
8538 fcport->fcp_prio = priority & 0xf;
8539 return QLA_SUCCESS;
8540 }
8541
21090cbe 8542 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
8543 if (ret == QLA_SUCCESS) {
8544 if (fcport->fcp_prio != priority)
8545 ql_dbg(ql_dbg_user, vha, 0x709e,
8546 "Updated FCP_CMND priority - value=%d loop_id=%d "
8547 "port_id=%02x%02x%02x.\n", priority,
8548 fcport->loop_id, fcport->d_id.b.domain,
8549 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 8550 fcport->fcp_prio = priority & 0xf;
cfb0919c 8551 } else
7c3df132 8552 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
8553 "Unable to update FCP_CMND priority - ret=0x%x for "
8554 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8555 fcport->d_id.b.domain, fcport->d_id.b.area,
8556 fcport->d_id.b.al_pa);
09ff701a
SR
8557 return ret;
8558}
8559
8560/*
8561 * qla24xx_update_all_fcp_prio
8562 * Activates fcp priority for all the logged in ports
8563 *
8564 * Input:
8565 * ha = adapter block pointer.
8566 *
8567 * Return:
8568 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8569 *
8570 * Context:
8571 * Kernel context.
8572 */
8573int
8574qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8575{
8576 int ret;
8577 fc_port_t *fcport;
8578
8579 ret = QLA_FUNCTION_FAILED;
8580 /* We need to set priority for all logged in ports */
8581 list_for_each_entry(fcport, &vha->vp_fcports, list)
8582 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8583
8584 return ret;
8585}
d7459527 8586
82de802a
QT
8587struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8588 int vp_idx, bool startqp)
d7459527
MH
8589{
8590 int rsp_id = 0;
8591 int req_id = 0;
8592 int i;
8593 struct qla_hw_data *ha = vha->hw;
8594 uint16_t qpair_id = 0;
8595 struct qla_qpair *qpair = NULL;
8596 struct qla_msix_entry *msix;
8597
8598 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8599 ql_log(ql_log_warn, vha, 0x00181,
8600 "FW/Driver is not multi-queue capable.\n");
8601 return NULL;
8602 }
8603
c38d1baf 8604 if (ql2xmqsupport || ql2xnvmeenable) {
d7459527
MH
8605 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8606 if (qpair == NULL) {
8607 ql_log(ql_log_warn, vha, 0x0182,
8608 "Failed to allocate memory for queue pair.\n");
8609 return NULL;
8610 }
8611 memset(qpair, 0, sizeof(struct qla_qpair));
8612
8613 qpair->hw = vha->hw;
25ff6af1 8614 qpair->vha = vha;
82de802a
QT
8615 qpair->qp_lock_ptr = &qpair->qp_lock;
8616 spin_lock_init(&qpair->qp_lock);
af7bb382 8617 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
d7459527
MH
8618
8619 /* Assign available que pair id */
8620 mutex_lock(&ha->mq_lock);
8621 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
b95b9452 8622 if (ha->num_qpairs >= ha->max_qpairs) {
d7459527
MH
8623 mutex_unlock(&ha->mq_lock);
8624 ql_log(ql_log_warn, vha, 0x0183,
8625 "No resources to create additional q pair.\n");
8626 goto fail_qid_map;
8627 }
b95b9452 8628 ha->num_qpairs++;
d7459527
MH
8629 set_bit(qpair_id, ha->qpair_qid_map);
8630 ha->queue_pair_map[qpair_id] = qpair;
8631 qpair->id = qpair_id;
8632 qpair->vp_idx = vp_idx;
e6373f33 8633 qpair->fw_started = ha->flags.fw_started;
e326d22a 8634 INIT_LIST_HEAD(&qpair->hints_list);
7c3f8fd1
QT
8635 qpair->chip_reset = ha->base_qpair->chip_reset;
8636 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8637 qpair->enable_explicit_conf =
8638 ha->base_qpair->enable_explicit_conf;
d7459527
MH
8639
8640 for (i = 0; i < ha->msix_count; i++) {
093df737 8641 msix = &ha->msix_entries[i];
d7459527
MH
8642 if (msix->in_use)
8643 continue;
8644 qpair->msix = msix;
83548fe2 8645 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
d7459527
MH
8646 "Vector %x selected for qpair\n", msix->vector);
8647 break;
8648 }
8649 if (!qpair->msix) {
8650 ql_log(ql_log_warn, vha, 0x0184,
8651 "Out of MSI-X vectors!.\n");
8652 goto fail_msix;
8653 }
8654
8655 qpair->msix->in_use = 1;
8656 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8abfa9e2
QT
8657 qpair->pdev = ha->pdev;
8658 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
8659 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
d7459527
MH
8660
8661 mutex_unlock(&ha->mq_lock);
8662
8663 /* Create response queue first */
82de802a 8664 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
d7459527
MH
8665 if (!rsp_id) {
8666 ql_log(ql_log_warn, vha, 0x0185,
8667 "Failed to create response queue.\n");
8668 goto fail_rsp;
8669 }
8670
8671 qpair->rsp = ha->rsp_q_map[rsp_id];
8672
8673 /* Create request queue */
82de802a
QT
8674 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8675 startqp);
d7459527
MH
8676 if (!req_id) {
8677 ql_log(ql_log_warn, vha, 0x0186,
8678 "Failed to create request queue.\n");
8679 goto fail_req;
8680 }
8681
8682 qpair->req = ha->req_q_map[req_id];
8683 qpair->rsp->req = qpair->req;
82de802a 8684 qpair->rsp->qpair = qpair;
e326d22a
QT
8685 /* init qpair to this cpu. Will adjust at run time. */
8686 qla_cpu_update(qpair, smp_processor_id());
d7459527
MH
8687
8688 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8689 if (ha->fw_attributes & BIT_4)
8690 qpair->difdix_supported = 1;
8691 }
8692
8693 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8694 if (!qpair->srb_mempool) {
83548fe2 8695 ql_log(ql_log_warn, vha, 0xd036,
d7459527
MH
8696 "Failed to create srb mempool for qpair %d\n",
8697 qpair->id);
8698 goto fail_mempool;
8699 }
8700
8701 /* Mark as online */
8702 qpair->online = 1;
8703
8704 if (!vha->flags.qpairs_available)
8705 vha->flags.qpairs_available = 1;
8706
8707 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
8708 "Request/Response queue pair created, id %d\n",
8709 qpair->id);
8710 ql_dbg(ql_dbg_init, vha, 0x0187,
8711 "Request/Response queue pair created, id %d\n",
8712 qpair->id);
8713 }
8714 return qpair;
8715
8716fail_mempool:
8717fail_req:
8718 qla25xx_delete_rsp_que(vha, qpair->rsp);
8719fail_rsp:
8720 mutex_lock(&ha->mq_lock);
8721 qpair->msix->in_use = 0;
8722 list_del(&qpair->qp_list_elem);
8723 if (list_empty(&vha->qp_list))
8724 vha->flags.qpairs_available = 0;
8725fail_msix:
8726 ha->queue_pair_map[qpair_id] = NULL;
8727 clear_bit(qpair_id, ha->qpair_qid_map);
b95b9452 8728 ha->num_qpairs--;
d7459527
MH
8729 mutex_unlock(&ha->mq_lock);
8730fail_qid_map:
8731 kfree(qpair);
8732 return NULL;
8733}
8734
8735int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8736{
d65237c7 8737 int ret = QLA_FUNCTION_FAILED;
d7459527
MH
8738 struct qla_hw_data *ha = qpair->hw;
8739
8740 qpair->delete_in_progress = 1;
8741 while (atomic_read(&qpair->ref_count))
8742 msleep(500);
8743
8744 ret = qla25xx_delete_req_que(vha, qpair->req);
8745 if (ret != QLA_SUCCESS)
8746 goto fail;
7867b98d 8747
d7459527
MH
8748 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
8749 if (ret != QLA_SUCCESS)
8750 goto fail;
8751
8752 mutex_lock(&ha->mq_lock);
8753 ha->queue_pair_map[qpair->id] = NULL;
8754 clear_bit(qpair->id, ha->qpair_qid_map);
b95b9452 8755 ha->num_qpairs--;
d7459527 8756 list_del(&qpair->qp_list_elem);
d65237c7 8757 if (list_empty(&vha->qp_list)) {
d7459527 8758 vha->flags.qpairs_available = 0;
d65237c7
SC
8759 vha->flags.qpairs_req_created = 0;
8760 vha->flags.qpairs_rsp_created = 0;
8761 }
d7459527
MH
8762 mempool_destroy(qpair->srb_mempool);
8763 kfree(qpair);
8764 mutex_unlock(&ha->mq_lock);
8765
8766 return QLA_SUCCESS;
8767fail:
8768 return ret;
8769}